26 from .cache
import Cache
27 from .compat
import functools
, urllib
# isort: split
28 from .compat
import compat_os_name
, compat_shlex_quote
, urllib_req_to_req
29 from .cookies
import LenientSimpleCookie
, load_cookies
30 from .downloader
import FFmpegFD
, get_suitable_downloader
, shorten_protocol_name
31 from .downloader
.rtmp
import rtmpdump_version
32 from .extractor
import gen_extractor_classes
, get_info_extractor
33 from .extractor
.common
import UnsupportedURLIE
34 from .extractor
.openload
import PhantomJSwrapper
35 from .minicurses
import format_text
36 from .networking
import HEADRequest
, Request
, RequestDirector
37 from .networking
.common
import _REQUEST_HANDLERS
38 from .networking
.exceptions
import (
46 from .plugins
import directories
as plugin_directories
47 from .postprocessor
import _PLUGIN_CLASSES
as plugin_pps
48 from .postprocessor
import (
50 FFmpegFixupDuplicateMoovPP
,
51 FFmpegFixupDurationPP
,
54 FFmpegFixupStretchedPP
,
55 FFmpegFixupTimestampPP
,
58 FFmpegVideoConvertorPP
,
59 MoveFilesAfterDownloadPP
,
62 from .postprocessor
.ffmpeg
import resolve_mapping
as resolve_recode_mapping
63 from .update
import REPOSITORY
, current_git_head
, detect_variant
95 UnavailableVideoError
,
113 format_decimal_suffix
,
127 orderedSet_from_options
,
131 remove_terminal_sequences
,
140 supports_terminal_sequences
,
150 windows_enable_vt_mode
,
154 from .utils
._utils
import _YDLLogger
155 from .utils
.networking
import (
161 from .version
import CHANNEL
, RELEASE_GIT_HEAD
, VARIANT
, __version__
163 if compat_os_name
== 'nt':
170 YoutubeDL objects are the ones responsible of downloading the
171 actual video file and writing it to disk if the user has requested
172 it, among some other tasks. In most cases there should be one per
173 program. As, given a video URL, the downloader doesn't know how to
174 extract all the needed information, task that InfoExtractors do, it
175 has to pass the URL to one of them.
177 For this, YoutubeDL objects have a method that allows
178 InfoExtractors to be registered in a given order. When it is passed
179 a URL, the YoutubeDL object handles it to the first InfoExtractor it
180 finds that reports being able to handle it. The InfoExtractor extracts
181 all the information about the video or videos the URL refers to, and
182 YoutubeDL process the extracted information, possibly using a File
183 Downloader to download the video.
185 YoutubeDL objects accept a lot of parameters. In order not to saturate
186 the object constructor with arguments, it receives a dictionary of
187 options instead. These options are available through the params
188 attribute for the InfoExtractors to use. The YoutubeDL also
189 registers itself as the downloader in charge for the InfoExtractors
190 that are added to it, so this is a "mutual registration".
194 username: Username for authentication purposes.
195 password: Password for authentication purposes.
196 videopassword: Password for accessing a video.
197 ap_mso: Adobe Pass multiple-system operator identifier.
198 ap_username: Multiple-system operator account username.
199 ap_password: Multiple-system operator account password.
200 usenetrc: Use netrc for authentication instead.
201 netrc_location: Location of the netrc file. Defaults to ~/.netrc.
202 netrc_cmd: Use a shell command to get credentials
203 verbose: Print additional info to stdout.
204 quiet: Do not print messages to stdout.
205 no_warnings: Do not print out anything for warnings.
206 forceprint: A dict with keys WHEN mapped to a list of templates to
207 print to stdout. The allowed keys are video or any of the
208 items in utils.POSTPROCESS_WHEN.
209 For compatibility, a single list is also accepted
210 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
211 a list of tuples with (template, filename)
212 forcejson: Force printing info_dict as JSON.
213 dump_single_json: Force printing the info_dict of the whole playlist
214 (or video) as a single JSON line.
215 force_write_download_archive: Force writing download archive regardless
216 of 'skip_download' or 'simulate'.
217 simulate: Do not download the video files. If unset (or None),
218 simulate only if listsubtitles, listformats or list_thumbnails is used
219 format: Video format code. see "FORMAT SELECTION" for more details.
220 You can also pass a function. The function takes 'ctx' as
221 argument and returns the formats to download.
222 See "build_format_selector" for an implementation
223 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
224 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
225 extracting metadata even if the video is not actually
226 available for download (experimental)
227 format_sort: A list of fields by which to sort the video formats.
228 See "Sorting Formats" for more details.
229 format_sort_force: Force the given format_sort. see "Sorting Formats"
231 prefer_free_formats: Whether to prefer video formats with free containers
232 over non-free ones of same quality.
233 allow_multiple_video_streams: Allow multiple video streams to be merged
235 allow_multiple_audio_streams: Allow multiple audio streams to be merged
237 check_formats Whether to test if the formats are downloadable.
238 Can be True (check all), False (check none),
239 'selected' (check selected formats),
240 or None (check only if requested by extractor)
241 paths: Dictionary of output paths. The allowed keys are 'home'
242 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
243 outtmpl: Dictionary of templates for output names. Allowed keys
244 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
245 For compatibility with youtube-dl, a single string can also be used
246 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
247 restrictfilenames: Do not allow "&" and spaces in file names
248 trim_file_name: Limit length of filename (extension excluded)
249 windowsfilenames: Force the filenames to be windows compatible
250 ignoreerrors: Do not stop on download/postprocessing errors.
251 Can be 'only_download' to ignore only download errors.
252 Default is 'only_download' for CLI, but False for API
253 skip_playlist_after_errors: Number of allowed failures until the rest of
254 the playlist is skipped
255 allowed_extractors: List of regexes to match against extractor names that are allowed
256 overwrites: Overwrite all video and metadata files if True,
257 overwrite only non-video files if None
258 and don't overwrite any file if False
259 For compatibility with youtube-dl,
260 "nooverwrites" may also be used instead
261 playlist_items: Specific indices of playlist to download.
262 playlistrandom: Download playlist items in random order.
263 lazy_playlist: Process playlist entries as they are received.
264 matchtitle: Download only matching titles.
265 rejecttitle: Reject downloads for matching titles.
266 logger: Log messages to a logging.Logger instance.
267 logtostderr: Print everything to stderr instead of stdout.
268 consoletitle: Display progress in console window's titlebar.
269 writedescription: Write the video description to a .description file
270 writeinfojson: Write the video description to a .info.json file
271 clean_infojson: Remove internal metadata from the infojson
272 getcomments: Extract video comments. This will not be written to disk
273 unless writeinfojson is also given
274 writeannotations: Write the video annotations to a .annotations.xml file
275 writethumbnail: Write the thumbnail image to a file
276 allow_playlist_files: Whether to write playlists' description, infojson etc
277 also to disk when using the 'write*' options
278 write_all_thumbnails: Write all thumbnail formats to files
279 writelink: Write an internet shortcut file, depending on the
280 current platform (.url/.webloc/.desktop)
281 writeurllink: Write a Windows internet shortcut file (.url)
282 writewebloclink: Write a macOS internet shortcut file (.webloc)
283 writedesktoplink: Write a Linux internet shortcut file (.desktop)
284 writesubtitles: Write the video subtitles to a file
285 writeautomaticsub: Write the automatically generated subtitles to a file
286 listsubtitles: Lists all available subtitles for the video
287 subtitlesformat: The format code for subtitles
288 subtitleslangs: List of languages of the subtitles to download (can be regex).
289 The list may contain "all" to refer to all the available
290 subtitles. The language can be prefixed with a "-" to
291 exclude it from the requested languages, e.g. ['all', '-live_chat']
292 keepvideo: Keep the video file after post-processing
293 daterange: A utils.DateRange object, download only if the upload_date is in the range.
294 skip_download: Skip the actual download of the video file
295 cachedir: Location of the cache files in the filesystem.
296 False to disable filesystem cache.
297 noplaylist: Download single video instead of a playlist if in doubt.
298 age_limit: An integer representing the user's age in years.
299 Unsuitable videos for the given age are skipped.
300 min_views: An integer representing the minimum view count the video
301 must have in order to not be skipped.
302 Videos without view count information are always
303 downloaded. None for no limit.
304 max_views: An integer representing the maximum view count.
305 Videos that are more popular than that are not
307 Videos without view count information are always
308 downloaded. None for no limit.
309 download_archive: A set, or the name of a file where all downloads are recorded.
310 Videos already present in the file are not downloaded again.
311 break_on_existing: Stop the download process after attempting to download a
312 file that is in the archive.
313 break_per_url: Whether break_on_reject and break_on_existing
314 should act on each input URL as opposed to for the entire queue
315 cookiefile: File name or text stream from where cookies should be read and dumped to
316 cookiesfrombrowser: A tuple containing the name of the browser, the profile
317 name/path from where cookies are loaded, the name of the keyring,
318 and the container name, e.g. ('chrome', ) or
319 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
320 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
321 support RFC 5746 secure renegotiation
322 nocheckcertificate: Do not verify SSL certificates
323 client_certificate: Path to client certificate file in PEM format. May include the private key
324 client_certificate_key: Path to private key file for client certificate
325 client_certificate_password: Password for client certificate private key, if encrypted.
326 If not provided and the key is encrypted, yt-dlp will ask interactively
327 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
328 (Only supported by some extractors)
329 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
330 http_headers: A dictionary of custom headers to be used for all requests
331 proxy: URL of the proxy server to use
332 geo_verification_proxy: URL of the proxy to use for IP address verification
333 on geo-restricted sites.
334 socket_timeout: Time to wait for unresponsive hosts, in seconds
335 bidi_workaround: Work around buggy terminals without bidirectional text
336 support, using fridibi
337 debug_printtraffic:Print out sent and received HTTP traffic
338 default_search: Prepend this string if an input url is not valid.
339 'auto' for elaborate guessing
340 encoding: Use this encoding instead of the system-specified.
341 extract_flat: Whether to resolve and process url_results further
342 * False: Always process. Default for API
343 * True: Never process
344 * 'in_playlist': Do not process inside playlist/multi_video
345 * 'discard': Always process, but don't return the result
346 from inside playlist/multi_video
347 * 'discard_in_playlist': Same as "discard", but only for
348 playlists (not multi_video). Default for CLI
349 wait_for_video: If given, wait for scheduled streams to become available.
350 The value should be a tuple containing the range
351 (min_secs, max_secs) to wait between retries
352 postprocessors: A list of dictionaries, each with an entry
353 * key: The name of the postprocessor. See
354 yt_dlp/postprocessor/__init__.py for a list.
355 * when: When to run the postprocessor. Allowed values are
356 the entries of utils.POSTPROCESS_WHEN
357 Assumed to be 'post_process' if not given
358 progress_hooks: A list of functions that get called on download
359 progress, with a dictionary with the entries
360 * status: One of "downloading", "error", or "finished".
361 Check this first and ignore unknown values.
362 * info_dict: The extracted info_dict
364 If status is one of "downloading", or "finished", the
365 following properties may also be present:
366 * filename: The final filename (always present)
367 * tmpfilename: The filename we're currently writing to
368 * downloaded_bytes: Bytes on disk
369 * total_bytes: Size of the whole file, None if unknown
370 * total_bytes_estimate: Guess of the eventual file size,
372 * elapsed: The number of seconds since download started.
373 * eta: The estimated time in seconds, None if unknown
374 * speed: The download speed in bytes/second, None if
376 * fragment_index: The counter of the currently
377 downloaded video fragment.
378 * fragment_count: The number of fragments (= individual
379 files that will be merged)
381 Progress hooks are guaranteed to be called at least once
382 (with status "finished") if the download is successful.
383 postprocessor_hooks: A list of functions that get called on postprocessing
384 progress, with a dictionary with the entries
385 * status: One of "started", "processing", or "finished".
386 Check this first and ignore unknown values.
387 * postprocessor: Name of the postprocessor
388 * info_dict: The extracted info_dict
390 Progress hooks are guaranteed to be called at least twice
391 (with status "started" and "finished") if the processing is successful.
392 merge_output_format: "/" separated list of extensions to use when merging formats.
393 final_ext: Expected final extension; used to detect when the file was
394 already downloaded and converted
395 fixup: Automatically correct known faults of the file.
397 - "never": do nothing
398 - "warn": only emit a warning
399 - "detect_or_warn": check whether we can do anything
400 about it, warn otherwise (default)
401 source_address: Client-side IP address to bind to.
402 sleep_interval_requests: Number of seconds to sleep between requests
404 sleep_interval: Number of seconds to sleep before each download when
405 used alone or a lower bound of a range for randomized
406 sleep before each download (minimum possible number
407 of seconds to sleep) when used along with
409 max_sleep_interval:Upper bound of a range for randomized sleep before each
410 download (maximum possible number of seconds to sleep).
411 Must only be used along with sleep_interval.
412 Actual sleep time will be a random float from range
413 [sleep_interval; max_sleep_interval].
414 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
415 listformats: Print an overview of available video formats and exit.
416 list_thumbnails: Print a table of all thumbnails and exit.
417 match_filter: A function that gets called for every video with the signature
418 (info_dict, *, incomplete: bool) -> Optional[str]
419 For backward compatibility with youtube-dl, the signature
420 (info_dict) -> Optional[str] is also allowed.
421 - If it returns a message, the video is ignored.
422 - If it returns None, the video is downloaded.
423 - If it returns utils.NO_DEFAULT, the user is interactively
424 asked whether to download the video.
425 - Raise utils.DownloadCancelled(msg) to abort remaining
426 downloads when a video is rejected.
427 match_filter_func in utils.py is one example for this.
428 color: A Dictionary with output stream names as keys
429 and their respective color policy as values.
430 Can also just be a single color policy,
431 in which case it applies to all outputs.
432 Valid stream names are 'stdout' and 'stderr'.
433 Valid color policies are one of 'always', 'auto', 'no_color' or 'never'.
434 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
437 Two-letter ISO 3166-2 country code that will be used for
438 explicit geographic restriction bypassing via faking
439 X-Forwarded-For HTTP header
441 IP range in CIDR notation that will be used similarly to
443 external_downloader: A dictionary of protocol keys and the executable of the
444 external downloader to use for it. The allowed protocols
445 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
446 Set the value to 'native' to use the native downloader
447 compat_opts: Compatibility options. See "Differences in default behavior".
448 The following options do not work when used through the API:
449 filename, abort-on-error, multistreams, no-live-chat, format-sort
450 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
451 Refer __init__.py for their implementation
452 progress_template: Dictionary of templates for progress outputs.
453 Allowed keys are 'download', 'postprocess',
454 'download-title' (console title) and 'postprocess-title'.
455 The template is mapped on a dictionary with keys 'progress' and 'info'
456 retry_sleep_functions: Dictionary of functions that takes the number of attempts
457 as argument and returns the time to sleep in seconds.
458 Allowed keys are 'http', 'fragment', 'file_access'
459 download_ranges: A callback function that gets called for every video with
460 the signature (info_dict, ydl) -> Iterable[Section].
461 Only the returned sections will be downloaded.
462 Each Section is a dict with the following keys:
463 * start_time: Start time of the section in seconds
464 * end_time: End time of the section in seconds
465 * title: Section title (Optional)
466 * index: Section number (Optional)
467 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
468 noprogress: Do not print the progress bar
469 live_from_start: Whether to download livestreams videos from the start
471 The following parameters are not used by YoutubeDL itself, they are used by
472 the downloader (see yt_dlp/downloader/common.py):
473 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
474 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
475 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
476 external_downloader_args, concurrent_fragment_downloads.
478 The following options are used by the post processors:
479 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
480 to the binary or its containing directory.
481 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
482 and a list of additional command-line arguments for the
483 postprocessor/executable. The dict can also have "PP+EXE" keys
484 which are used when the given exe is used by the given PP.
485 Use 'default' as the name for arguments to passed to all PP
486 For compatibility with youtube-dl, a single list of args
489 The following options are used by the extractors:
490 extractor_retries: Number of times to retry for known errors (default: 3)
491 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
492 hls_split_discontinuity: Split HLS playlists to different formats at
493 discontinuities such as ad breaks (default: False)
494 extractor_args: A dictionary of arguments to be passed to the extractors.
495 See "EXTRACTOR ARGUMENTS" for details.
496 E.g. {'youtube': {'skip': ['dash', 'hls']}}
497 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
499 The following options are deprecated and may be removed in the future:
501 break_on_reject: Stop the download process when encountering a video that
502 has been filtered out.
503 - `raise DownloadCancelled(msg)` in match_filter instead
504 force_generic_extractor: Force downloader to use the generic extractor
505 - Use allowed_extractors = ['generic', 'default']
506 playliststart: - Use playlist_items
507 Playlist item to start at.
508 playlistend: - Use playlist_items
509 Playlist item to end at.
510 playlistreverse: - Use playlist_items
511 Download playlist items in reverse order.
512 forceurl: - Use forceprint
513 Force printing final URL.
514 forcetitle: - Use forceprint
515 Force printing title.
516 forceid: - Use forceprint
518 forcethumbnail: - Use forceprint
519 Force printing thumbnail URL.
520 forcedescription: - Use forceprint
521 Force printing description.
522 forcefilename: - Use forceprint
523 Force printing final filename.
524 forceduration: - Use forceprint
525 Force printing duration.
526 allsubtitles: - Use subtitleslangs = ['all']
527 Downloads all the subtitles of the video
528 (requires writesubtitles or writeautomaticsub)
529 include_ads: - Doesn't work
531 call_home: - Not implemented
532 Boolean, true iff we are allowed to contact the
533 yt-dlp servers for debugging.
534 post_hooks: - Register a custom postprocessor
535 A list of functions that get called as the final step
536 for each video file, after all postprocessors have been
537 called. The filename will be passed as the only argument.
538 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
539 Use the native HLS downloader instead of ffmpeg/avconv
540 if True, otherwise use ffmpeg/avconv if False, otherwise
541 use downloader suggested by extractor if None.
542 prefer_ffmpeg: - avconv support is deprecated
543 If False, use avconv instead of ffmpeg if both are available,
544 otherwise prefer ffmpeg.
545 youtube_include_dash_manifest: - Use extractor_args
546 If True (default), DASH manifests and related
547 data will be downloaded and processed by extractor.
548 You can reduce network I/O by disabling it if you don't
549 care about DASH. (only for youtube)
550 youtube_include_hls_manifest: - Use extractor_args
551 If True (default), HLS manifests and related
552 data will be downloaded and processed by extractor.
553 You can reduce network I/O by disabling it if you don't
554 care about HLS. (only for youtube)
555 no_color: Same as `color='no_color'`
559 'width', 'height', 'asr', 'audio_channels', 'fps',
560 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
561 'timestamp', 'release_timestamp',
562 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
563 'average_rating', 'comment_count', 'age_limit',
564 'start_time', 'end_time',
565 'chapter_number', 'season_number', 'episode_number',
566 'track_number', 'disc_number', 'release_year',
570 # NB: Keep in sync with the docstring of extractor/common.py
571 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
572 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
573 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
574 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
575 'preference', 'language', 'language_preference', 'quality', 'source_preference',
576 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
577 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
579 _format_selection_exts
= {
580 'audio': set(MEDIA_EXTENSIONS
.common_audio
),
581 'video': set(MEDIA_EXTENSIONS
.common_video
+ ('3gp', )),
582 'storyboards': set(MEDIA_EXTENSIONS
.storyboards
),
585 def __init__(self
, params
=None, auto_init
=True):
586 """Create a FileDownloader object with the given options.
587 @param auto_init Whether to load the default extractors and print header (if verbose).
588 Set to 'no_verbose_header' to not print the header
594 self
._ies
_instances
= {}
595 self
._pps
= {k: [] for k in POSTPROCESS_WHEN}
596 self
._printed
_messages
= set()
597 self
._first
_webpage
_request
= True
598 self
._post
_hooks
= []
599 self
._progress
_hooks
= []
600 self
._postprocessor
_hooks
= []
601 self
._download
_retcode
= 0
602 self
._num
_downloads
= 0
604 self
._playlist
_level
= 0
605 self
._playlist
_urls
= set()
606 self
.cache
= Cache(self
)
608 stdout
= sys
.stderr
if self
.params
.get('logtostderr') else sys
.stdout
609 self
._out
_files
= Namespace(
612 screen
=sys
.stderr
if self
.params
.get('quiet') else stdout
,
613 console
=None if compat_os_name
== 'nt' else next(
614 filter(supports_terminal_sequences
, (sys
.stderr
, sys
.stdout
)), None)
618 windows_enable_vt_mode()
619 except Exception as e
:
620 self
.write_debug(f
'Failed to enable VT mode: {e}')
622 if self
.params
.get('no_color'):
623 if self
.params
.get('color') is not None:
624 self
.report_warning('Overwriting params from "color" with "no_color"')
625 self
.params
['color'] = 'no_color'
627 term_allow_color
= os
.environ
.get('TERM', '').lower() != 'dumb'
629 def process_color_policy(stream
):
630 stream_name
= {sys.stdout: 'stdout', sys.stderr: 'stderr'}
[stream
]
631 policy
= traverse_obj(self
.params
, ('color', (stream_name
, None), {str}
), get_all
=False)
632 if policy
in ('auto', None):
633 return term_allow_color
and supports_terminal_sequences(stream
)
634 assert policy
in ('always', 'never', 'no_color')
635 return {'always': True, 'never': False}
.get(policy
, policy
)
637 self
._allow
_colors
= Namespace(**{
638 name
: process_color_policy(stream
)
639 for name
, stream
in self
._out
_files
.items_
if name
!= 'console'
642 # The code is left like this to be reused for future deprecations
643 MIN_SUPPORTED
, MIN_RECOMMENDED
= (3, 7), (3, 7)
644 current_version
= sys
.version_info
[:2]
645 if current_version
< MIN_RECOMMENDED
:
646 msg
= ('Support for Python version %d.%d has been deprecated. '
647 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
648 '\n You will no longer receive updates on this version')
649 if current_version
< MIN_SUPPORTED
:
650 msg
= 'Python version %d.%d is no longer supported'
651 self
.deprecated_feature(
652 f
'{msg}! Please update to Python %d.%d or above' % (*current_version
, *MIN_RECOMMENDED
))
654 if self
.params
.get('allow_unplayable_formats'):
656 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
657 'This is a developer option intended for debugging. \n'
658 ' If you experience any issues while using this option, '
659 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
661 if self
.params
.get('bidi_workaround', False):
664 master
, slave
= pty
.openpty()
665 width
= shutil
.get_terminal_size().columns
666 width_args
= [] if width
is None else ['-w', str(width
)]
667 sp_kwargs
= {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
669 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
671 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
672 self
._output
_channel
= os
.fdopen(master
, 'rb')
673 except OSError as ose
:
674 if ose
.errno
== errno
.ENOENT
:
676 'Could not find fribidi executable, ignoring --bidi-workaround. '
677 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
681 self
.params
['compat_opts'] = set(self
.params
.get('compat_opts', ()))
682 self
.params
['http_headers'] = HTTPHeaderDict(std_headers
, self
.params
.get('http_headers'))
683 self
.__header
_cookies
= []
684 self
._load
_cookies
(self
.params
['http_headers'].get('Cookie')) # compat
685 self
.params
['http_headers'].pop('Cookie', None)
687 self
._request
_director
= self
.build_request_director(
688 sorted(_REQUEST_HANDLERS
.values(), key
=lambda rh
: rh
.RH_NAME
.lower()))
689 if auto_init
and auto_init
!= 'no_verbose_header':
690 self
.print_debug_header()
692 def check_deprecated(param
, option
, suggestion
):
693 if self
.params
.get(param
) is not None:
694 self
.report_warning(f
'{option} is deprecated. Use {suggestion} instead')
698 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
699 if self
.params
.get('geo_verification_proxy') is None:
700 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
702 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
703 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
704 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
706 for msg
in self
.params
.get('_warnings', []):
707 self
.report_warning(msg
)
708 for msg
in self
.params
.get('_deprecation_warnings', []):
709 self
.deprecated_feature(msg
)
711 if 'list-formats' in self
.params
['compat_opts']:
712 self
.params
['listformats_table'] = False
714 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
715 # nooverwrites was unnecessarily changed to overwrites
716 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
717 # This ensures compatibility with both keys
718 self
.params
['overwrites'] = not self
.params
['nooverwrites']
719 elif self
.params
.get('overwrites') is None:
720 self
.params
.pop('overwrites', None)
722 self
.params
['nooverwrites'] = not self
.params
['overwrites']
724 if self
.params
.get('simulate') is None and any((
725 self
.params
.get('list_thumbnails'),
726 self
.params
.get('listformats'),
727 self
.params
.get('listsubtitles'),
729 self
.params
['simulate'] = 'list_only'
731 self
.params
.setdefault('forceprint', {})
732 self
.params
.setdefault('print_to_file', {})
734 # Compatibility with older syntax
735 if not isinstance(params
['forceprint'], dict):
736 self
.params
['forceprint'] = {'video': params['forceprint']}
739 self
.add_default_info_extractors()
741 if (sys
.platform
!= 'win32'
742 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
743 and not self
.params
.get('restrictfilenames', False)):
744 # Unicode filesystem API will throw errors (#1474, #13027)
746 'Assuming --restrict-filenames since file system encoding '
747 'cannot encode all characters. '
748 'Set the LC_ALL environment variable to fix this.')
749 self
.params
['restrictfilenames'] = True
751 self
._parse
_outtmpl
()
753 # Creating format selector here allows us to catch syntax errors before the extraction
754 self
.format_selector
= (
755 self
.params
.get('format') if self
.params
.get('format') in (None, '-')
756 else self
.params
['format'] if callable(self
.params
['format'])
757 else self
.build_format_selector(self
.params
['format']))
760 'post_hooks': self
.add_post_hook
,
761 'progress_hooks': self
.add_progress_hook
,
762 'postprocessor_hooks': self
.add_postprocessor_hook
,
764 for opt
, fn
in hooks
.items():
765 for ph
in self
.params
.get(opt
, []):
768 for pp_def_raw
in self
.params
.get('postprocessors', []):
769 pp_def
= dict(pp_def_raw
)
770 when
= pp_def
.pop('when', 'post_process')
771 self
.add_post_processor(
772 get_postprocessor(pp_def
.pop('key'))(self
, **pp_def
),
775 def preload_download_archive(fn
):
776 """Preload the archive, if any is specified"""
780 elif not is_path_like(fn
):
783 self
.write_debug(f
'Loading archive file {fn!r}')
785 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
786 for line
in archive_file
:
787 archive
.add(line
.strip())
788 except OSError as ioe
:
789 if ioe
.errno
!= errno
.ENOENT
:
793 self
.archive
= preload_download_archive(self
.params
.get('download_archive'))
795 def warn_if_short_id(self
, argv
):
796 # short YouTube ID starting with dash?
798 i
for i
, a
in enumerate(argv
)
799 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
803 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
804 + ['--'] + [argv
[i
] for i
in idxs
]
807 'Long argument string detected. '
808 'Use -- to separate parameters and URLs, like this:\n%s' %
809 args_to_str(correct_argv
))
811 def add_info_extractor(self
, ie
):
812 """Add an InfoExtractor object to the end of the list."""
814 self
._ies
[ie_key
] = ie
815 if not isinstance(ie
, type):
816 self
._ies
_instances
[ie_key
] = ie
817 ie
.set_downloader(self
)
819 def get_info_extractor(self
, ie_key
):
821 Get an instance of an IE with name ie_key, it will try to get one from
822 the _ies list, if there's no instance it will create a new one and add
823 it to the extractor list.
825 ie
= self
._ies
_instances
.get(ie_key
)
827 ie
= get_info_extractor(ie_key
)()
828 self
.add_info_extractor(ie
)
831 def add_default_info_extractors(self
):
833 Add the InfoExtractors returned by gen_extractors to the end of the list
835 all_ies
= {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
836 all_ies
['end'] = UnsupportedURLIE()
838 ie_names
= orderedSet_from_options(
839 self
.params
.get('allowed_extractors', ['default']), {
840 'all': list(all_ies
),
841 'default': [name
for name
, ie
in all_ies
.items() if ie
._ENABLED
],
843 except re
.error
as e
:
844 raise ValueError(f
'Wrong regex for allowed_extractors: {e.pattern}')
845 for name
in ie_names
:
846 self
.add_info_extractor(all_ies
[name
])
847 self
.write_debug(f
'Loaded {len(ie_names)} extractors')
849 def add_post_processor(self
, pp
, when
='post_process'):
850 """Add a PostProcessor object to the end of the chain."""
851 assert when
in POSTPROCESS_WHEN
, f
'Invalid when={when}'
852 self
._pps
[when
].append(pp
)
853 pp
.set_downloader(self
)
855 def add_post_hook(self
, ph
):
856 """Add the post hook"""
857 self
._post
_hooks
.append(ph
)
859 def add_progress_hook(self
, ph
):
860 """Add the download progress hook"""
861 self
._progress
_hooks
.append(ph
)
863 def add_postprocessor_hook(self
, ph
):
864 """Add the postprocessing progress hook"""
865 self
._postprocessor
_hooks
.append(ph
)
866 for pps
in self
._pps
.values():
868 pp
.add_progress_hook(ph
)
870 def _bidi_workaround(self
, message
):
871 if not hasattr(self
, '_output_channel'):
874 assert hasattr(self
, '_output_process')
875 assert isinstance(message
, str)
876 line_count
= message
.count('\n') + 1
877 self
._output
_process
.stdin
.write((message
+ '\n').encode())
878 self
._output
_process
.stdin
.flush()
879 res
= ''.join(self
._output
_channel
.readline().decode()
880 for _
in range(line_count
))
881 return res
[:-len('\n')]
883 def _write_string(self
, message
, out
=None, only_once
=False):
885 if message
in self
._printed
_messages
:
887 self
._printed
_messages
.add(message
)
888 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
890 def to_stdout(self
, message
, skip_eol
=False, quiet
=None):
891 """Print message to stdout"""
892 if quiet
is not None:
893 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
894 'Use "YoutubeDL.to_screen" instead')
895 if skip_eol
is not False:
896 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
897 'Use "YoutubeDL.to_screen" instead')
898 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.out
)
900 def to_screen(self
, message
, skip_eol
=False, quiet
=None, only_once
=False):
901 """Print message to screen if not in quiet mode"""
902 if self
.params
.get('logger'):
903 self
.params
['logger'].debug(message
)
905 if (self
.params
.get('quiet') if quiet
is None else quiet
) and not self
.params
.get('verbose'):
908 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
909 self
._out
_files
.screen
, only_once
=only_once
)
911 def to_stderr(self
, message
, only_once
=False):
912 """Print message to stderr"""
913 assert isinstance(message
, str)
914 if self
.params
.get('logger'):
915 self
.params
['logger'].error(message
)
917 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.error
, only_once
=only_once
)
919 def _send_console_code(self
, code
):
920 if compat_os_name
== 'nt' or not self
._out
_files
.console
:
922 self
._write
_string
(code
, self
._out
_files
.console
)
924 def to_console_title(self
, message
):
925 if not self
.params
.get('consoletitle', False):
927 message
= remove_terminal_sequences(message
)
928 if compat_os_name
== 'nt':
929 if ctypes
.windll
.kernel32
.GetConsoleWindow():
930 # c_wchar_p() might not be necessary if `message` is
931 # already of type unicode()
932 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
934 self
._send
_console
_code
(f
'\033]0;{message}\007')
936 def save_console_title(self
):
937 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
939 self
._send
_console
_code
('\033[22;0t') # Save the title on stack
941 def restore_console_title(self
):
942 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
944 self
._send
_console
_code
('\033[23;0t') # Restore the title from stack
947 self
.save_console_title()
950 def save_cookies(self
):
951 if self
.params
.get('cookiefile') is not None:
952 self
.cookiejar
.save(ignore_discard
=True, ignore_expires
=True)
954 def __exit__(self
, *args
):
955 self
.restore_console_title()
960 self
._request
_director
.close()
962 def trouble(self
, message
=None, tb
=None, is_error
=True):
963 """Determine action to take when a download problem appears.
965 Depending on if the downloader has been configured to ignore
966 download errors or not, this method may throw an exception or
967 not when errors are found, after printing the message.
969 @param tb If given, is additional traceback information
970 @param is_error Whether to raise error according to ignorerrors
972 if message
is not None:
973 self
.to_stderr(message
)
974 if self
.params
.get('verbose'):
976 if sys
.exc_info()[0]: # if .trouble has been called from an except block
978 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
979 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
980 tb
+= encode_compat_str(traceback
.format_exc())
982 tb_data
= traceback
.format_list(traceback
.extract_stack())
983 tb
= ''.join(tb_data
)
988 if not self
.params
.get('ignoreerrors'):
989 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
990 exc_info
= sys
.exc_info()[1].exc_info
992 exc_info
= sys
.exc_info()
993 raise DownloadError(message
, exc_info
)
994 self
._download
_retcode
= 1
998 EMPHASIS
='light blue',
1003 BAD_FORMAT
='light red',
1005 SUPPRESS
='light black',
1008 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
1011 original_text
= text
1012 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
1013 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', None) or 'ascii'
1014 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
1015 if fallback
is not None and text
!= original_text
:
1017 return format_text(text
, f
) if allow_colors
is True else text
if fallback
is None else fallback
1019 def _format_out(self
, *args
, **kwargs
):
1020 return self
._format
_text
(self
._out
_files
.out
, self
._allow
_colors
.out
, *args
, **kwargs
)
1022 def _format_screen(self
, *args
, **kwargs
):
1023 return self
._format
_text
(self
._out
_files
.screen
, self
._allow
_colors
.screen
, *args
, **kwargs
)
1025 def _format_err(self
, *args
, **kwargs
):
1026 return self
._format
_text
(self
._out
_files
.error
, self
._allow
_colors
.error
, *args
, **kwargs
)
1028 def report_warning(self
, message
, only_once
=False):
1030 Print the message to stderr, it will be prefixed with 'WARNING:'
1031 If stderr is a tty file the 'WARNING:' will be colored
1033 if self
.params
.get('logger') is not None:
1034 self
.params
['logger'].warning(message
)
1036 if self
.params
.get('no_warnings'):
1038 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
1040 def deprecation_warning(self
, message
, *, stacklevel
=0):
1041 deprecation_warning(
1042 message
, stacklevel
=stacklevel
+ 1, printer
=self
.report_error
, is_error
=False)
1044 def deprecated_feature(self
, message
):
1045 if self
.params
.get('logger') is not None:
1046 self
.params
['logger'].warning(f
'Deprecated Feature: {message}')
1047 self
.to_stderr(f
'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
1049 def report_error(self
, message
, *args
, **kwargs
):
1051 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1052 in red if stderr is a tty file.
1054 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args
, **kwargs
)
1056 def write_debug(self
, message
, only_once
=False):
1057 '''Log debug message or Print message to stderr'''
1058 if not self
.params
.get('verbose', False):
1060 message
= f
'[debug] {message}'
1061 if self
.params
.get('logger'):
1062 self
.params
['logger'].debug(message
)
1064 self
.to_stderr(message
, only_once
)
1066 def report_file_already_downloaded(self
, file_name
):
1067 """Report file has already been fully downloaded."""
1069 self
.to_screen('[download] %s has already been downloaded' % file_name
)
1070 except UnicodeEncodeError:
1071 self
.to_screen('[download] The file has already been downloaded')
1073 def report_file_delete(self
, file_name
):
1074 """Report that existing file will be deleted."""
1076 self
.to_screen('Deleting existing file %s' % file_name
)
1077 except UnicodeEncodeError:
1078 self
.to_screen('Deleting existing file')
1080 def raise_no_formats(self
, info
, forced
=False, *, msg
=None):
1081 has_drm
= info
.get('_has_drm')
1082 ignored
, expected
= self
.params
.get('ignore_no_formats_error'), bool(msg
)
1083 msg
= msg
or has_drm
and 'This video is DRM protected' or 'No video formats found!'
1084 if forced
or not ignored
:
1085 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
1086 expected
=has_drm
or ignored
or expected
)
1088 self
.report_warning(msg
)
1090 def parse_outtmpl(self
):
1091 self
.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1092 self
._parse
_outtmpl
()
1093 return self
.params
['outtmpl']
1095 def _parse_outtmpl(self
):
1097 if self
.params
.get('restrictfilenames'): # Remove spaces in the default template
1098 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
1100 outtmpl
= self
.params
.setdefault('outtmpl', {})
1101 if not isinstance(outtmpl
, dict):
1102 self
.params
['outtmpl'] = outtmpl
= {'default': outtmpl}
1103 outtmpl
.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None}
)
1105 def get_output_path(self
, dir_type
='', filename
=None):
1106 paths
= self
.params
.get('paths', {})
1107 assert isinstance(paths
, dict), '"paths" parameter must be a dictionary'
1108 path
= os
.path
.join(
1109 expand_path(paths
.get('home', '').strip()),
1110 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
1112 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
1115 def _outtmpl_expandpath(outtmpl
):
1116 # expand_path translates '%%' into '%' and '$$' into '$'
1117 # correspondingly that is not what we want since we need to keep
1118 # '%%' intact for template dict substitution step. Working around
1119 # with boundary-alike separator hack.
1120 sep
= ''.join(random
.choices(string
.ascii_letters
, k
=32))
1121 outtmpl
= outtmpl
.replace('%%', f
'%{sep}%').replace('$$', f
'${sep}$')
1123 # outtmpl should be expand_path'ed before template dict substitution
1124 # because meta fields may contain env variables we don't want to
1125 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1126 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1127 return expand_path(outtmpl
).replace(sep
, '')
1130 def escape_outtmpl(outtmpl
):
1131 ''' Escape any remaining strings like %s, %abc% etc. '''
1133 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
1134 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
1138 def validate_outtmpl(cls
, outtmpl
):
1139 ''' @return None or Exception object '''
1141 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljhqBUDS]'),
1142 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1143 cls
._outtmpl
_expandpath
(outtmpl
))
1145 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1147 except ValueError as err
:
1151 def _copy_infodict(info_dict
):
1152 info_dict
= dict(info_dict
)
1153 info_dict
.pop('__postprocessors', None)
1154 info_dict
.pop('__pending_error', None)
1157 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=False):
1158 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1159 @param sanitize Whether to sanitize the output as a filename.
1160 For backward compatibility, a function can also be passed
1163 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1165 info_dict
= self
._copy
_infodict
(info_dict
)
1166 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1167 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1168 if info_dict
.get('duration', None) is not None
1170 info_dict
['autonumber'] = int(self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
)
1171 info_dict
['video_autonumber'] = self
._num
_videos
1172 if info_dict
.get('resolution') is None:
1173 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1175 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1176 # of %(field)s to %(field)0Nd for backward compatibility
1177 field_size_compat_map
= {
1178 'playlist_index': number_of_digits(info_dict
.get('__last_playlist_index') or 0),
1179 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1180 'autonumber': self
.params
.get('autonumber_size') or 5,
1184 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1189 # Field is of the form key1.key2...
1190 # where keys (except first) can be string, int, slice or "{field, ...}"
1191 FIELD_INNER_RE
= r
'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1192 FIELD_RE
= r
'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1193 'inner': FIELD_INNER_RE
,
1194 'field': rf
'\w*(?:\.{FIELD_INNER_RE})*'
1196 MATH_FIELD_RE
= rf
'(?:{FIELD_RE}|-?{NUMBER_RE})'
1197 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1198 INTERNAL_FORMAT_RE
= re
.compile(rf
'''(?xs)
1200 (?P<fields>{FIELD_RE})
1201 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1202 (?:>(?P<strf_format>.+?))?
1204 (?P<alternate>(?<!\\),[^|&)]+)?
1205 (?:&(?P<replacement>.*?))?
1206 (?:\|(?P<default>.*?))?
1209 def _traverse_infodict(fields
):
1210 fields
= [f
for x
in re
.split(r
'\.({.+?})\.?', fields
)
1211 for f
in ([x
] if x
.startswith('{') else x
.split('.'))]
1213 if fields
and not fields
[i
]:
1216 for i
, f
in enumerate(fields
):
1217 if not f
.startswith('{'):
1219 assert f
.endswith('}'), f
'No closing brace for {f} in {fields}'
1220 fields
[i
] = {k: k.split('.') for k in f[1:-1].split(',')}
1222 return traverse_obj(info_dict
, fields
, is_user_input
=True, traverse_string
=True)
1224 def get_value(mdict
):
1226 value
= _traverse_infodict(mdict
['fields'])
1229 value
= float_or_none(value
)
1230 if value
is not None:
1233 offset_key
= mdict
['maths']
1235 value
= float_or_none(value
)
1239 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1240 offset_key
).group(0)
1241 offset_key
= offset_key
[len(item
):]
1242 if operator
is None:
1243 operator
= MATH_FUNCTIONS
[item
]
1245 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1246 offset
= float_or_none(item
)
1248 offset
= float_or_none(_traverse_infodict(item
))
1250 value
= operator(value
, multiplier
* offset
)
1251 except (TypeError, ZeroDivisionError):
1254 # Datetime formatting
1255 if mdict
['strf_format']:
1256 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1258 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1259 if sanitize
and value
== '':
1263 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1265 def filename_sanitizer(key
, value
, restricted
=self
.params
.get('restrictfilenames')):
1266 return sanitize_filename(str(value
), restricted
=restricted
, is_id
=(
1267 bool(re
.search(r
'(^|[_.])id(\.|$)', key
))
1268 if 'filename-sanitization' in self
.params
['compat_opts']
1271 sanitizer
= sanitize
if callable(sanitize
) else filename_sanitizer
1272 sanitize
= bool(sanitize
)
1274 def _dumpjson_default(obj
):
1275 if isinstance(obj
, (set, LazyList
)):
1279 class _ReplacementFormatter(string
.Formatter
):
1280 def get_field(self
, field_name
, args
, kwargs
):
1281 if field_name
.isdigit():
1283 raise ValueError('Unsupported field')
1285 replacement_formatter
= _ReplacementFormatter()
1287 def create_key(outer_mobj
):
1288 if not outer_mobj
.group('has_key'):
1289 return outer_mobj
.group(0)
1290 key
= outer_mobj
.group('key')
1291 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1292 value
, replacement
, default
, last_field
= None, None, na
, ''
1294 mobj
= mobj
.groupdict()
1295 default
= mobj
['default'] if mobj
['default'] is not None else default
1296 value
= get_value(mobj
)
1297 last_field
, replacement
= mobj
['fields'], mobj
['replacement']
1298 if value
is None and mobj
['alternate']:
1299 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['remaining'][1:])
1303 fmt
= outer_mobj
.group('format')
1304 if fmt
== 's' and value
is not None and last_field
in field_size_compat_map
.keys():
1305 fmt
= f
'0{field_size_compat_map[last_field]:d}d'
1307 if None not in (value
, replacement
):
1309 value
= replacement_formatter
.format(replacement
, value
)
1311 value
, default
= None, na
1313 flags
= outer_mobj
.group('conversion') or ''
1314 str_fmt
= f
'{fmt[:-1]}s'
1316 value
, fmt
= default
, 's'
1317 elif fmt
[-1] == 'l': # list
1318 delim
= '\n' if '#' in flags
else ', '
1319 value
, fmt
= delim
.join(map(str, variadic(value
, allowed_types
=(str, bytes)))), str_fmt
1320 elif fmt
[-1] == 'j': # json
1321 value
, fmt
= json
.dumps(
1322 value
, default
=_dumpjson_default
,
1323 indent
=4 if '#' in flags
else None, ensure_ascii
='+' not in flags
), str_fmt
1324 elif fmt
[-1] == 'h': # html
1325 value
, fmt
= escapeHTML(str(value
)), str_fmt
1326 elif fmt
[-1] == 'q': # quoted
1327 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1328 value
, fmt
= ' '.join(map(compat_shlex_quote
, value
)), str_fmt
1329 elif fmt
[-1] == 'B': # bytes
1330 value
= f
'%{str_fmt}'.encode() % str(value
).encode()
1331 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1332 elif fmt
[-1] == 'U': # unicode normalized
1333 value
, fmt
= unicodedata
.normalize(
1334 # "+" = compatibility equivalence, "#" = NFD
1335 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1337 elif fmt
[-1] == 'D': # decimal suffix
1338 num_fmt
, fmt
= fmt
[:-1].replace('#', ''), 's'
1339 value
= format_decimal_suffix(value
, f
'%{num_fmt}f%s' if num_fmt
else '%d%s',
1340 factor
=1024 if '#' in flags
else 1000)
1341 elif fmt
[-1] == 'S': # filename sanitization
1342 value
, fmt
= filename_sanitizer(last_field
, value
, restricted
='#' in flags
), str_fmt
1343 elif fmt
[-1] == 'c':
1345 value
= str(value
)[0]
1348 elif fmt
[-1] not in 'rsa': # numeric
1349 value
= float_or_none(value
)
1351 value
, fmt
= default
, 's'
1354 # If value is an object, sanitize might convert it to a string
1355 # So we convert it to repr first
1357 value
, fmt
= repr(value
), str_fmt
1358 elif fmt
[-1] == 'a':
1359 value
, fmt
= ascii(value
), str_fmt
1360 if fmt
[-1] in 'csra':
1361 value
= sanitizer(last_field
, value
)
1363 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1364 TMPL_DICT
[key
] = value
1365 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1367 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1369 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1370 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1371 return self
.escape_outtmpl(outtmpl
) % info_dict
1373 def _prepare_filename(self
, info_dict
, *, outtmpl
=None, tmpl_type
=None):
1374 assert None in (outtmpl
, tmpl_type
), 'outtmpl and tmpl_type are mutually exclusive'
1376 outtmpl
= self
.params
['outtmpl'].get(tmpl_type
or 'default', self
.params
['outtmpl']['default'])
1378 outtmpl
= self
._outtmpl
_expandpath
(outtmpl
)
1379 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, True)
1383 if tmpl_type
in ('', 'temp'):
1384 final_ext
, ext
= self
.params
.get('final_ext'), info_dict
.get('ext')
1385 if final_ext
and ext
and final_ext
!= ext
and filename
.endswith(f
'.{final_ext}'):
1386 filename
= replace_extension(filename
, ext
, final_ext
)
1388 force_ext
= OUTTMPL_TYPES
[tmpl_type
]
1390 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1392 # https://github.com/blackjack4494/youtube-dlc/issues/85
1393 trim_file_name
= self
.params
.get('trim_file_name', False)
1395 no_ext
, *ext
= filename
.rsplit('.', 2)
1396 filename
= join_nonempty(no_ext
[:trim_file_name
], *ext
, delim
='.')
1399 except ValueError as err
:
1400 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1403 def prepare_filename(self
, info_dict
, dir_type
='', *, outtmpl
=None, warn
=False):
1404 """Generate the output filename"""
1406 assert not dir_type
, 'outtmpl and dir_type are mutually exclusive'
1408 filename
= self
._prepare
_filename
(info_dict
, tmpl_type
=dir_type
, outtmpl
=outtmpl
)
1409 if not filename
and dir_type
not in ('', 'temp'):
1413 if not self
.params
.get('paths'):
1415 elif filename
== '-':
1416 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1417 elif os
.path
.isabs(filename
):
1418 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1419 if filename
== '-' or not filename
:
1422 return self
.get_output_path(dir_type
, filename
)
1424 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1425 """Returns None if the file should be downloaded"""
1426 _type
= 'video' if 'playlist-match-filter' in self
.params
['compat_opts'] else info_dict
.get('_type', 'video')
1427 assert incomplete
or _type
== 'video', 'Only video result can be considered complete'
1429 video_title
= info_dict
.get('title', info_dict
.get('id', 'entry'))
1432 if _type
in ('playlist', 'multi_video'):
1434 elif _type
in ('url', 'url_transparent') and not try_call(
1435 lambda: self
.get_info_extractor(info_dict
['ie_key']).is_single_video(info_dict
['url'])):
1438 if 'title' in info_dict
:
1439 # This can happen when we're just evaluating the playlist
1440 title
= info_dict
['title']
1441 matchtitle
= self
.params
.get('matchtitle', False)
1443 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1444 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1445 rejecttitle
= self
.params
.get('rejecttitle', False)
1447 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1448 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1450 date
= info_dict
.get('upload_date')
1451 if date
is not None:
1452 dateRange
= self
.params
.get('daterange', DateRange())
1453 if date
not in dateRange
:
1454 return f
'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1455 view_count
= info_dict
.get('view_count')
1456 if view_count
is not None:
1457 min_views
= self
.params
.get('min_views')
1458 if min_views
is not None and view_count
< min_views
:
1459 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1460 max_views
= self
.params
.get('max_views')
1461 if max_views
is not None and view_count
> max_views
:
1462 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1463 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1464 return 'Skipping "%s" because it is age restricted' % video_title
1466 match_filter
= self
.params
.get('match_filter')
1467 if match_filter
is None:
1473 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1475 # For backward compatibility
1476 ret
= None if incomplete
else match_filter(info_dict
)
1477 except DownloadCancelled
as err
:
1478 if err
.msg
is not NO_DEFAULT
:
1480 ret
, cancelled
= err
.msg
, err
1482 if ret
is NO_DEFAULT
:
1484 filename
= self
._format
_screen
(self
.prepare_filename(info_dict
), self
.Styles
.FILENAME
)
1485 reply
= input(self
._format
_screen
(
1486 f
'Download "{filename}"? (Y/n): ', self
.Styles
.EMPHASIS
)).lower().strip()
1487 if reply
in {'y', ''}
:
1491 raise type(cancelled
)(f
'Skipping {video_title}')
1492 return f
'Skipping {video_title}'
1495 if self
.in_download_archive(info_dict
):
1497 format_field(info_dict
, 'id', f
'{self._format_screen("%s", self.Styles.ID)}: '),
1498 format_field(info_dict
, 'title', f
'{self._format_screen("%s", self.Styles.EMPHASIS)} '),
1499 'has already been recorded in the archive'))
1500 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1503 reason
= check_filter()
1504 except DownloadCancelled
as e
:
1505 reason
, break_opt
, break_err
= e
.msg
, 'match_filter', type(e
)
1507 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1508 if reason
is not None:
1510 self
.to_screen('[download] ' + reason
)
1511 if self
.params
.get(break_opt
, False):
1516 def add_extra_info(info_dict
, extra_info
):
1517 '''Set the keys from extra_info in info dict if they are missing'''
1518 for key
, value
in extra_info
.items():
1519 info_dict
.setdefault(key
, value
)
1521 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1522 process
=True, force_generic_extractor
=False):
1524 Extract and return the information dictionary of the URL
1527 @param url URL to extract
1530 @param download Whether to download videos
1531 @param process Whether to resolve all unresolved references (URLs, playlist items).
1532 Must be True for download to work
1533 @param ie_key Use only the extractor with this key
1535 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1536 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
1539 if extra_info
is None:
1542 if not ie_key
and force_generic_extractor
:
1546 ies
= {ie_key: self._ies[ie_key]}
if ie_key
in self
._ies
else {}
1550 for key
, ie
in ies
.items():
1551 if not ie
.suitable(url
):
1554 if not ie
.working():
1555 self
.report_warning('The program functionality for this site has been marked as broken, '
1556 'and will probably not work.')
1558 temp_id
= ie
.get_temp_id(url
)
1559 if temp_id
is not None and self
.in_download_archive({'id': temp_id, 'ie_key': key}
):
1560 self
.to_screen(f
'[download] {self._format_screen(temp_id, self.Styles.ID)}: '
1561 'has already been recorded in the archive')
1562 if self
.params
.get('break_on_existing', False):
1563 raise ExistingVideoReached()
1565 return self
.__extract
_info
(url
, self
.get_info_extractor(key
), download
, extra_info
, process
)
1567 extractors_restricted
= self
.params
.get('allowed_extractors') not in (None, ['default'])
1568 self
.report_error(f
'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1569 tb
=False if extractors_restricted
else None)
1571 def _handle_extraction_exceptions(func
):
1572 @functools.wraps(func
)
1573 def wrapper(self
, *args
, **kwargs
):
1576 return func(self
, *args
, **kwargs
)
1577 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1579 except ReExtractInfo
as e
:
1581 self
.to_screen(f
'{e}; Re-extracting data')
1583 self
.to_stderr('\r')
1584 self
.report_warning(f
'{e}; Re-extracting data')
1586 except GeoRestrictedError
as e
:
1589 msg
+= '\nThis video is available in %s.' % ', '.join(
1590 map(ISO3166Utils
.short2full
, e
.countries
))
1591 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1592 self
.report_error(msg
)
1593 except ExtractorError
as e
: # An error we somewhat expected
1594 self
.report_error(str(e
), e
.format_traceback())
1595 except Exception as e
:
1596 if self
.params
.get('ignoreerrors'):
1597 self
.report_error(str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1603 def _wait_for_video(self
, ie_result
={}):
1604 if (not self
.params
.get('wait_for_video')
1605 or ie_result
.get('_type', 'video') != 'video'
1606 or ie_result
.get('formats') or ie_result
.get('url')):
1609 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1614 full_msg
= f
'{msg}\n'
1615 if not self
.params
.get('noprogress'):
1616 full_msg
= msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r'
1619 self
.to_screen(full_msg
, skip_eol
=True)
1622 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1623 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1624 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1625 diff
= round(random
.uniform(min_wait
, max_wait
) if (max_wait
and min_wait
) else (max_wait
or min_wait
), 0)
1626 self
.report_warning('Release time of video is not known')
1627 elif ie_result
and (diff
or 0) <= 0:
1628 self
.report_warning('Video should already be available according to extracted info')
1629 diff
= min(max(diff
or 0, min_wait
or 0), max_wait
or float('inf'))
1630 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1632 wait_till
= time
.time() + diff
1635 diff
= wait_till
- time
.time()
1638 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1639 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1641 except KeyboardInterrupt:
1643 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1644 except BaseException
as e
:
1645 if not isinstance(e
, ReExtractInfo
):
1649 def _load_cookies(self
, data
, *, autoscope
=True):
1650 """Loads cookies from a `Cookie` header
1652 This tries to work around the security vulnerability of passing cookies to every domain.
1653 See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
1655 @param data The Cookie header as string to load the cookies from
1656 @param autoscope If `False`, scope cookies using Set-Cookie syntax and error for cookie without domains
1657 If `True`, save cookies for later to be stored in the jar with a limited scope
1658 If a URL, save cookies in the jar with the domain of the URL
1660 for cookie
in LenientSimpleCookie(data
).values():
1661 if autoscope
and any(cookie
.values()):
1662 raise ValueError('Invalid syntax in Cookie Header')
1664 domain
= cookie
.get('domain') or ''
1665 expiry
= cookie
.get('expires')
1666 if expiry
== '': # 0 is valid
1668 prepared_cookie
= http
.cookiejar
.Cookie(
1669 cookie
.get('version') or 0, cookie
.key
, cookie
.value
, None, False,
1670 domain
, True, True, cookie
.get('path') or '', bool(cookie
.get('path')),
1671 cookie
.get('secure') or False, expiry
, False, None, None, {})
1674 self
.cookiejar
.set_cookie(prepared_cookie
)
1675 elif autoscope
is True:
1676 self
.deprecated_feature(
1677 'Passing cookies as a header is a potential security risk; '
1678 'they will be scoped to the domain of the downloaded urls. '
1679 'Please consider loading cookies from a file or browser instead.')
1680 self
.__header
_cookies
.append(prepared_cookie
)
1682 self
.report_warning(
1683 'The extractor result contains an unscoped cookie as an HTTP header. '
1684 f
'If you are using yt-dlp with an input URL{bug_reports_message(before=",")}',
1686 self
._apply
_header
_cookies
(autoscope
, [prepared_cookie
])
1688 self
.report_error('Unscoped cookies are not allowed; please specify some sort of scoping',
1689 tb
=False, is_error
=False)
1691 def _apply_header_cookies(self
, url
, cookies
=None):
1692 """Applies stray header cookies to the provided url
1694 This loads header cookies and scopes them to the domain provided in `url`.
1695 While this is not ideal, it helps reduce the risk of them being sent
1696 to an unintended destination while mostly maintaining compatibility.
1698 parsed
= urllib
.parse
.urlparse(url
)
1699 if not parsed
.hostname
:
1702 for cookie
in map(copy
.copy
, cookies
or self
.__header
_cookies
):
1703 cookie
.domain
= f
'.{parsed.hostname}'
1704 self
.cookiejar
.set_cookie(cookie
)
1706 @_handle_extraction_exceptions
1707 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1708 self
._apply
_header
_cookies
(url
)
1711 ie_result
= ie
.extract(url
)
1712 except UserNotLive
as e
:
1714 if self
.params
.get('wait_for_video'):
1715 self
.report_warning(e
)
1716 self
._wait
_for
_video
()
1718 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1719 self
.report_warning(f
'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1721 if isinstance(ie_result
, list):
1722 # Backwards compatibility: old IE result format
1724 '_type': 'compat_list',
1725 'entries': ie_result
,
1727 if extra_info
.get('original_url'):
1728 ie_result
.setdefault('original_url', extra_info
['original_url'])
1729 self
.add_default_extra_info(ie_result
, ie
, url
)
1731 self
._wait
_for
_video
(ie_result
)
1732 return self
.process_ie_result(ie_result
, download
, extra_info
)
1736 def add_default_extra_info(self
, ie_result
, ie
, url
):
1738 self
.add_extra_info(ie_result
, {
1740 'original_url': url
,
1742 webpage_url
= ie_result
.get('webpage_url')
1744 self
.add_extra_info(ie_result
, {
1745 'webpage_url_basename': url_basename(webpage_url
),
1746 'webpage_url_domain': get_domain(webpage_url
),
1749 self
.add_extra_info(ie_result
, {
1750 'extractor': ie
.IE_NAME
,
1751 'extractor_key': ie
.ie_key(),
1754 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1756 Take the result of the ie(may be modified) and resolve all unresolved
1757 references (URLs, playlist items).
1759 It will also download the videos if 'download'.
1760 Returns the resolved ie_result.
1762 if extra_info
is None:
1764 result_type
= ie_result
.get('_type', 'video')
1766 if result_type
in ('url', 'url_transparent'):
1767 ie_result
['url'] = sanitize_url(
1768 ie_result
['url'], scheme
='http' if self
.params
.get('prefer_insecure') else 'https')
1769 if ie_result
.get('original_url') and not extra_info
.get('original_url'):
1770 extra_info
= {'original_url': ie_result['original_url'], **extra_info}
1772 extract_flat
= self
.params
.get('extract_flat', False)
1773 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1774 or extract_flat
is True):
1775 info_copy
= ie_result
.copy()
1776 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1777 if ie
and not ie_result
.get('id'):
1778 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1779 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1780 self
.add_extra_info(info_copy
, extra_info
)
1781 info_copy
, _
= self
.pre_process(info_copy
)
1782 self
._fill
_common
_fields
(info_copy
, False)
1783 self
.__forced
_printings
(info_copy
)
1784 self
._raise
_pending
_errors
(info_copy
)
1785 if self
.params
.get('force_write_download_archive', False):
1786 self
.record_download_archive(info_copy
)
1789 if result_type
== 'video':
1790 self
.add_extra_info(ie_result
, extra_info
)
1791 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1792 self
._raise
_pending
_errors
(ie_result
)
1793 additional_urls
= (ie_result
or {}).get('additional_urls')
1795 # TODO: Improve MetadataParserPP to allow setting a list
1796 if isinstance(additional_urls
, str):
1797 additional_urls
= [additional_urls
]
1799 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1800 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1801 ie_result
['additional_entries'] = [
1803 url
, download
, extra_info
=extra_info
,
1804 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1805 for url
in additional_urls
1808 elif result_type
== 'url':
1809 # We have to add extra_info to the results because it may be
1810 # contained in a playlist
1811 return self
.extract_info(
1812 ie_result
['url'], download
,
1813 ie_key
=ie_result
.get('ie_key'),
1814 extra_info
=extra_info
)
1815 elif result_type
== 'url_transparent':
1816 # Use the information from the embedding page
1817 info
= self
.extract_info(
1818 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1819 extra_info
=extra_info
, download
=False, process
=False)
1821 # extract_info may return None when ignoreerrors is enabled and
1822 # extraction failed with an error, don't crash and return early
1827 exempted_fields
= {'_type', 'url', 'ie_key'}
1828 if not ie_result
.get('section_end') and ie_result
.get('section_start') is None:
1829 # For video clips, the id etc of the clip extractor should be used
1830 exempted_fields |
= {'id', 'extractor', 'extractor_key'}
1832 new_result
= info
.copy()
1833 new_result
.update(filter_dict(ie_result
, lambda k
, v
: v
is not None and k
not in exempted_fields
))
1835 # Extracted info may not be a video result (i.e.
1836 # info.get('_type', 'video') != video) but rather an url or
1837 # url_transparent. In such cases outer metadata (from ie_result)
1838 # should be propagated to inner one (info). For this to happen
1839 # _type of info should be overridden with url_transparent. This
1840 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1841 if new_result
.get('_type') == 'url':
1842 new_result
['_type'] = 'url_transparent'
1844 return self
.process_ie_result(
1845 new_result
, download
=download
, extra_info
=extra_info
)
1846 elif result_type
in ('playlist', 'multi_video'):
1847 # Protect from infinite recursion due to recursively nested playlists
1848 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1849 webpage_url
= ie_result
.get('webpage_url') # Playlists maynot have webpage_url
1850 if webpage_url
and webpage_url
in self
._playlist
_urls
:
1852 '[download] Skipping already downloaded playlist: %s'
1853 % ie_result
.get('title') or ie_result
.get('id'))
1856 self
._playlist
_level
+= 1
1857 self
._playlist
_urls
.add(webpage_url
)
1858 self
._fill
_common
_fields
(ie_result
, False)
1859 self
._sanitize
_thumbnails
(ie_result
)
1861 return self
.__process
_playlist
(ie_result
, download
)
1863 self
._playlist
_level
-= 1
1864 if not self
._playlist
_level
:
1865 self
._playlist
_urls
.clear()
1866 elif result_type
== 'compat_list':
1867 self
.report_warning(
1868 'Extractor %s returned a compat_list result. '
1869 'It needs to be updated.' % ie_result
.get('extractor'))
1872 self
.add_extra_info(r
, {
1873 'extractor': ie_result
['extractor'],
1874 'webpage_url': ie_result
['webpage_url'],
1875 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1876 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1877 'extractor_key': ie_result
['extractor_key'],
1880 ie_result
['entries'] = [
1881 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1882 for r
in ie_result
['entries']
1886 raise Exception('Invalid result type: %s' % result_type
)
1888 def _ensure_dir_exists(self
, path
):
1889 return make_dir(path
, self
.report_error
)
1892 def _playlist_infodict(ie_result
, strict
=False, **kwargs
):
1894 'playlist_count': ie_result
.get('playlist_count'),
1895 'playlist': ie_result
.get('title') or ie_result
.get('id'),
1896 'playlist_id': ie_result
.get('id'),
1897 'playlist_title': ie_result
.get('title'),
1898 'playlist_uploader': ie_result
.get('uploader'),
1899 'playlist_uploader_id': ie_result
.get('uploader_id'),
1904 if ie_result
.get('webpage_url'):
1906 'webpage_url': ie_result
['webpage_url'],
1907 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1908 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1912 'playlist_index': 0,
1913 '__last_playlist_index': max(ie_result
.get('requested_entries') or (0, 0)),
1914 'extractor': ie_result
['extractor'],
1915 'extractor_key': ie_result
['extractor_key'],
1918 def __process_playlist(self
, ie_result
, download
):
1919 """Process each entry in the playlist"""
1920 assert ie_result
['_type'] in ('playlist', 'multi_video')
1922 common_info
= self
._playlist
_infodict
(ie_result
, strict
=True)
1923 title
= common_info
.get('playlist') or '<Untitled>'
1924 if self
._match
_entry
(common_info
, incomplete
=True) is not None:
1926 self
.to_screen(f
'[download] Downloading {ie_result["_type"]}: {title}')
1928 all_entries
= PlaylistEntries(self
, ie_result
)
1929 entries
= orderedSet(all_entries
.get_requested_items(), lazy
=True)
1931 lazy
= self
.params
.get('lazy_playlist')
1933 resolved_entries
, n_entries
= [], 'N/A'
1934 ie_result
['requested_entries'], ie_result
['entries'] = None, None
1936 entries
= resolved_entries
= list(entries
)
1937 n_entries
= len(resolved_entries
)
1938 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1939 if not ie_result
.get('playlist_count'):
1940 # Better to do this after potentially exhausting entries
1941 ie_result
['playlist_count'] = all_entries
.get_full_count()
1943 extra
= self
._playlist
_infodict
(ie_result
, n_entries
=int_or_none(n_entries
))
1944 ie_copy
= collections
.ChainMap(ie_result
, extra
)
1946 _infojson_written
= False
1947 write_playlist_files
= self
.params
.get('allow_playlist_files', True)
1948 if write_playlist_files
and self
.params
.get('list_thumbnails'):
1949 self
.list_thumbnails(ie_result
)
1950 if write_playlist_files
and not self
.params
.get('simulate'):
1951 _infojson_written
= self
._write
_info
_json
(
1952 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1953 if _infojson_written
is None:
1955 if self
._write
_description
('playlist', ie_result
,
1956 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1958 # TODO: This should be passed to ThumbnailsConvertor if necessary
1959 self
._write
_thumbnails
('playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1962 if self
.params
.get('playlistreverse') or self
.params
.get('playlistrandom'):
1963 self
.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once
=True)
1964 elif self
.params
.get('playlistreverse'):
1966 elif self
.params
.get('playlistrandom'):
1967 random
.shuffle(entries
)
1969 self
.to_screen(f
'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
1970 f
'{format_field(ie_result, "playlist_count", " of %s")}')
1972 keep_resolved_entries
= self
.params
.get('extract_flat') != 'discard'
1973 if self
.params
.get('extract_flat') == 'discard_in_playlist':
1974 keep_resolved_entries
= ie_result
['_type'] != 'playlist'
1975 if keep_resolved_entries
:
1976 self
.write_debug('The information of all playlist entries will be held in memory')
1979 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
1980 for i
, (playlist_index
, entry
) in enumerate(entries
):
1982 resolved_entries
.append((playlist_index
, entry
))
1986 entry
['__x_forwarded_for_ip'] = ie_result
.get('__x_forwarded_for_ip')
1987 if not lazy
and 'playlist-index' in self
.params
['compat_opts']:
1988 playlist_index
= ie_result
['requested_entries'][i
]
1990 entry_copy
= collections
.ChainMap(entry
, {
1992 'n_entries': int_or_none(n_entries
),
1993 'playlist_index': playlist_index
,
1994 'playlist_autonumber': i
+ 1,
1997 if self
._match
_entry
(entry_copy
, incomplete
=True) is not None:
1998 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1999 resolved_entries
[i
] = (playlist_index
, NO_DEFAULT
)
2002 self
.to_screen('[download] Downloading item %s of %s' % (
2003 self
._format
_screen
(i
+ 1, self
.Styles
.ID
), self
._format
_screen
(n_entries
, self
.Styles
.EMPHASIS
)))
2005 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, collections
.ChainMap({
2006 'playlist_index': playlist_index
,
2007 'playlist_autonumber': i
+ 1,
2009 if not entry_result
:
2011 if failures
>= max_failures
:
2013 f
'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
2015 if keep_resolved_entries
:
2016 resolved_entries
[i
] = (playlist_index
, entry_result
)
2018 # Update with processed data
2019 ie_result
['entries'] = [e
for _
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2020 ie_result
['requested_entries'] = [i
for i
, e
in resolved_entries
if e
is not NO_DEFAULT
]
2021 if ie_result
['requested_entries'] == try_call(lambda: list(range(1, ie_result
['playlist_count'] + 1))):
2022 # Do not set for full playlist
2023 ie_result
.pop('requested_entries')
2025 # Write the updated info to json
2026 if _infojson_written
is True and self
._write
_info
_json
(
2027 'updated playlist', ie_result
,
2028 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
2031 ie_result
= self
.run_all_pps('playlist', ie_result
)
2032 self
.to_screen(f
'[download] Finished downloading playlist: {title}')
2035 @_handle_extraction_exceptions
2036 def __process_iterable_entry(self
, entry
, download
, extra_info
):
2037 return self
.process_ie_result(
2038 entry
, download
=download
, extra_info
=extra_info
)
2040 def _build_format_filter(self
, filter_spec
):
2041 " Returns a function to filter the formats according to the filter_spec "
2051 operator_rex
= re
.compile(r
'''(?x)\s*
2053 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
2054 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
2055 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
2056 m
= operator_rex
.fullmatch(filter_spec
)
2059 comparison_value
= int(m
.group('value'))
2061 comparison_value
= parse_filesize(m
.group('value'))
2062 if comparison_value
is None:
2063 comparison_value
= parse_filesize(m
.group('value') + 'B')
2064 if comparison_value
is None:
2066 'Invalid value %r in format specification %r' % (
2067 m
.group('value'), filter_spec
))
2068 op
= OPERATORS
[m
.group('op')]
2073 '^=': lambda attr
, value
: attr
.startswith(value
),
2074 '$=': lambda attr
, value
: attr
.endswith(value
),
2075 '*=': lambda attr
, value
: value
in attr
,
2076 '~=': lambda attr
, value
: value
.search(attr
) is not None
2078 str_operator_rex
= re
.compile(r
'''(?x)\s*
2079 (?P<key>[a-zA-Z0-9._-]+)\s*
2080 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
2082 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
2083 (?(quote)(?P=quote))\s*
2084 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
2085 m
= str_operator_rex
.fullmatch(filter_spec
)
2087 if m
.group('op') == '~=':
2088 comparison_value
= re
.compile(m
.group('value'))
2090 comparison_value
= re
.sub(r
'''\\([\\"'])''', r
'\1', m
.group('value'))
2091 str_op
= STR_OPERATORS
[m
.group('op')]
2092 if m
.group('negation'):
2093 op
= lambda attr
, value
: not str_op(attr
, value
)
2098 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
2101 actual_value
= f
.get(m
.group('key'))
2102 if actual_value
is None:
2103 return m
.group('none_inclusive')
2104 return op(actual_value
, comparison_value
)
2107 def _check_formats(self
, formats
):
2109 self
.to_screen('[info] Testing format %s' % f
['format_id'])
2110 path
= self
.get_output_path('temp')
2111 if not self
._ensure
_dir
_exists
(f
'{path}/'):
2113 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
2116 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
2117 except (DownloadError
, OSError, ValueError) + network_exceptions
:
2120 if os
.path
.exists(temp_file
.name
):
2122 os
.remove(temp_file
.name
)
2124 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
2128 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
2130 def _default_format_spec(self
, info_dict
, download
=True):
2133 merger
= FFmpegMergerPP(self
)
2134 return merger
.available
and merger
.can_merge()
2137 not self
.params
.get('simulate')
2141 or info_dict
.get('is_live') and not self
.params
.get('live_from_start')
2142 or self
.params
['outtmpl']['default'] == '-'))
2145 or self
.params
.get('allow_multiple_audio_streams', False)
2146 or 'format-spec' in self
.params
['compat_opts'])
2149 'best/bestvideo+bestaudio' if prefer_best
2150 else 'bestvideo*+bestaudio/best' if not compat
2151 else 'bestvideo+bestaudio/best')
2153 def build_format_selector(self
, format_spec
):
2154 def syntax_error(note
, start
):
2156 'Invalid format specification: '
2157 '{}\n\t{}\n\t{}^'.format(note
, format_spec
, ' ' * start
[1]))
2158 return SyntaxError(message
)
2160 PICKFIRST
= 'PICKFIRST'
2164 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2166 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
2167 'video': self
.params
.get('allow_multiple_video_streams', False)}
2169 def _parse_filter(tokens
):
2171 for type, string_
, start
, _
, _
in tokens
:
2172 if type == tokenize
.OP
and string_
== ']':
2173 return ''.join(filter_parts
)
2175 filter_parts
.append(string_
)
2177 def _remove_unused_ops(tokens
):
2178 # Remove operators that we don't use and join them with the surrounding strings.
2179 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2180 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
2181 last_string
, last_start
, last_end
, last_line
= None, None, None, None
2182 for type, string_
, start
, end
, line
in tokens
:
2183 if type == tokenize
.OP
and string_
== '[':
2185 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2187 yield type, string_
, start
, end
, line
2188 # everything inside brackets will be handled by _parse_filter
2189 for type, string_
, start
, end
, line
in tokens
:
2190 yield type, string_
, start
, end
, line
2191 if type == tokenize
.OP
and string_
== ']':
2193 elif type == tokenize
.OP
and string_
in ALLOWED_OPS
:
2195 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2197 yield type, string_
, start
, end
, line
2198 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
2200 last_string
= string_
2204 last_string
+= string_
2206 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2208 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
2210 current_selector
= None
2211 for type, string_
, start
, _
, _
in tokens
:
2212 # ENCODING is only defined in python 3.x
2213 if type == getattr(tokenize
, 'ENCODING', None):
2215 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
2216 current_selector
= FormatSelector(SINGLE
, string_
, [])
2217 elif type == tokenize
.OP
:
2219 if not inside_group
:
2220 # ')' will be handled by the parentheses group
2221 tokens
.restore_last_token()
2223 elif inside_merge
and string_
in ['/', ',']:
2224 tokens
.restore_last_token()
2226 elif inside_choice
and string_
== ',':
2227 tokens
.restore_last_token()
2229 elif string_
== ',':
2230 if not current_selector
:
2231 raise syntax_error('"," must follow a format selector', start
)
2232 selectors
.append(current_selector
)
2233 current_selector
= None
2234 elif string_
== '/':
2235 if not current_selector
:
2236 raise syntax_error('"/" must follow a format selector', start
)
2237 first_choice
= current_selector
2238 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
2239 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
2240 elif string_
== '[':
2241 if not current_selector
:
2242 current_selector
= FormatSelector(SINGLE
, 'best', [])
2243 format_filter
= _parse_filter(tokens
)
2244 current_selector
.filters
.append(format_filter
)
2245 elif string_
== '(':
2246 if current_selector
:
2247 raise syntax_error('Unexpected "("', start
)
2248 group
= _parse_format_selection(tokens
, inside_group
=True)
2249 current_selector
= FormatSelector(GROUP
, group
, [])
2250 elif string_
== '+':
2251 if not current_selector
:
2252 raise syntax_error('Unexpected "+"', start
)
2253 selector_1
= current_selector
2254 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
2256 raise syntax_error('Expected a selector', start
)
2257 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
2259 raise syntax_error(f
'Operator not recognized: "{string_}"', start
)
2260 elif type == tokenize
.ENDMARKER
:
2262 if current_selector
:
2263 selectors
.append(current_selector
)
2266 def _merge(formats_pair
):
2267 format_1
, format_2
= formats_pair
2270 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
2271 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
2273 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
2274 get_no_more
= {'video': False, 'audio': False}
2275 for (i
, fmt_info
) in enumerate(formats_info
):
2276 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
2279 for aud_vid
in ['audio', 'video']:
2280 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
2281 if get_no_more
[aud_vid
]:
2284 get_no_more
[aud_vid
] = True
2286 if len(formats_info
) == 1:
2287 return formats_info
[0]
2289 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
2290 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
2292 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
2293 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
2295 output_ext
= get_compatible_ext(
2296 vcodecs
=[f
.get('vcodec') for f
in video_fmts
],
2297 acodecs
=[f
.get('acodec') for f
in audio_fmts
],
2298 vexts
=[f
['ext'] for f
in video_fmts
],
2299 aexts
=[f
['ext'] for f
in audio_fmts
],
2300 preferences
=(try_call(lambda: self
.params
['merge_output_format'].split('/'))
2301 or self
.params
.get('prefer_free_formats') and ('webm', 'mkv')))
2303 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2306 'requested_formats': formats_info
,
2307 'format': '+'.join(filtered('format')),
2308 'format_id': '+'.join(filtered('format_id')),
2310 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2311 'language': '+'.join(orderedSet(filtered('language'))) or None,
2312 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2313 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2314 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2319 'width': the_only_video
.get('width'),
2320 'height': the_only_video
.get('height'),
2321 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2322 'fps': the_only_video
.get('fps'),
2323 'dynamic_range': the_only_video
.get('dynamic_range'),
2324 'vcodec': the_only_video
.get('vcodec'),
2325 'vbr': the_only_video
.get('vbr'),
2326 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2327 'aspect_ratio': the_only_video
.get('aspect_ratio'),
2332 'acodec': the_only_audio
.get('acodec'),
2333 'abr': the_only_audio
.get('abr'),
2334 'asr': the_only_audio
.get('asr'),
2335 'audio_channels': the_only_audio
.get('audio_channels')
2340 def _check_formats(formats
):
2341 if (self
.params
.get('check_formats') is not None
2342 or self
.params
.get('allow_unplayable_formats')):
2345 elif self
.params
.get('check_formats') == 'selected':
2346 yield from self
._check
_formats
(formats
)
2350 if f
.get('has_drm'):
2351 yield from self
._check
_formats
([f
])
2355 def _build_selector_function(selector
):
2356 if isinstance(selector
, list): # ,
2357 fs
= [_build_selector_function(s
) for s
in selector
]
2359 def selector_function(ctx
):
2362 return selector_function
2364 elif selector
.type == GROUP
: # ()
2365 selector_function
= _build_selector_function(selector
.selector
)
2367 elif selector
.type == PICKFIRST
: # /
2368 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2370 def selector_function(ctx
):
2372 picked_formats
= list(f(ctx
))
2374 return picked_formats
2377 elif selector
.type == MERGE
: # +
2378 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2380 def selector_function(ctx
):
2381 for pair
in itertools
.product(selector_1(ctx
), selector_2(ctx
)):
2384 elif selector
.type == SINGLE
: # atom
2385 format_spec
= selector
.selector
or 'best'
2387 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2388 if format_spec
== 'all':
2389 def selector_function(ctx
):
2390 yield from _check_formats(ctx
['formats'][::-1])
2391 elif format_spec
== 'mergeall':
2392 def selector_function(ctx
):
2393 formats
= list(_check_formats(
2394 f
for f
in ctx
['formats'] if f
.get('vcodec') != 'none' or f
.get('acodec') != 'none'))
2397 merged_format
= formats
[-1]
2398 for f
in formats
[-2::-1]:
2399 merged_format
= _merge((merged_format
, f
))
2403 format_fallback
, seperate_fallback
, format_reverse
, format_idx
= False, None, True, 1
2405 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2407 if mobj
is not None:
2408 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2409 format_reverse
= mobj
.group('bw')[0] == 'b'
2410 format_type
= (mobj
.group('type') or [None])[0]
2411 not_format_type
= {'v': 'a', 'a': 'v'}
.get(format_type
)
2412 format_modified
= mobj
.group('mod') is not None
2414 format_fallback
= not format_type
and not format_modified
# for b, w
2416 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2417 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2418 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2419 if format_type
# bv, ba, wv, wa
2420 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2421 if not format_modified
# b, w
2422 else lambda f
: True) # b*, w*
2423 filter_f
= lambda f
: _filter_f(f
) and (
2424 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2426 if format_spec
in self
._format
_selection
_exts
['audio']:
2427 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2428 elif format_spec
in self
._format
_selection
_exts
['video']:
2429 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2430 seperate_fallback
= lambda f
: f
.get('ext') == format_spec
and f
.get('vcodec') != 'none'
2431 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2432 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2434 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2436 def selector_function(ctx
):
2437 formats
= list(ctx
['formats'])
2438 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2440 if format_fallback
and ctx
['incomplete_formats']:
2441 # for extractors with incomplete formats (audio only (soundcloud)
2442 # or video only (imgur)) best/worst will fallback to
2443 # best/worst {video,audio}-only format
2445 elif seperate_fallback
and not ctx
['has_merged_format']:
2446 # for compatibility with youtube-dl when there is no pre-merged format
2447 matches
= list(filter(seperate_fallback
, formats
))
2448 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2450 yield matches
[format_idx
- 1]
2451 except LazyList
.IndexError:
2454 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2456 def final_selector(ctx
):
2457 ctx_copy
= dict(ctx
)
2458 for _filter
in filters
:
2459 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2460 return selector_function(ctx_copy
)
2461 return final_selector
2463 stream
= io
.BytesIO(format_spec
.encode())
2465 tokens
= list(_remove_unused_ops(tokenize
.tokenize(stream
.readline
)))
2466 except tokenize
.TokenError
:
2467 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2469 class TokenIterator
:
2470 def __init__(self
, tokens
):
2471 self
.tokens
= tokens
2478 if self
.counter
>= len(self
.tokens
):
2479 raise StopIteration()
2480 value
= self
.tokens
[self
.counter
]
2486 def restore_last_token(self
):
2489 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2490 return _build_selector_function(parsed_selector
)
2492 def _calc_headers(self
, info_dict
, load_cookies
=False):
2493 res
= HTTPHeaderDict(self
.params
['http_headers'], info_dict
.get('http_headers'))
2496 if load_cookies
: # For --load-info-json
2497 self
._load
_cookies
(res
.get('Cookie'), autoscope
=info_dict
['url']) # compat
2498 self
._load
_cookies
(info_dict
.get('cookies'), autoscope
=False)
2499 # The `Cookie` header is removed to prevent leaks and unscoped cookies.
2500 # See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
2501 res
.pop('Cookie', None)
2502 cookies
= self
.cookiejar
.get_cookies_for_url(info_dict
['url'])
2504 encoder
= LenientSimpleCookie()
2506 for cookie
in cookies
:
2507 _
, value
= encoder
.value_encode(cookie
.value
)
2508 values
.append(f
'{cookie.name}={value}')
2510 values
.append(f
'Domain={cookie.domain}')
2512 values
.append(f
'Path={cookie.path}')
2514 values
.append('Secure')
2516 values
.append(f
'Expires={cookie.expires}')
2518 values
.append(f
'Version={cookie.version}')
2519 info_dict
['cookies'] = '; '.join(values
)
2521 if 'X-Forwarded-For' not in res
:
2522 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2523 if x_forwarded_for_ip
:
2524 res
['X-Forwarded-For'] = x_forwarded_for_ip
2528 def _calc_cookies(self
, url
):
2529 self
.deprecation_warning('"YoutubeDL._calc_cookies" is deprecated and may be removed in a future version')
2530 return self
.cookiejar
.get_cookie_header(url
)
2532 def _sort_thumbnails(self
, thumbnails
):
2533 thumbnails
.sort(key
=lambda t
: (
2534 t
.get('preference') if t
.get('preference') is not None else -1,
2535 t
.get('width') if t
.get('width') is not None else -1,
2536 t
.get('height') if t
.get('height') is not None else -1,
2537 t
.get('id') if t
.get('id') is not None else '',
2540 def _sanitize_thumbnails(self
, info_dict
):
2541 thumbnails
= info_dict
.get('thumbnails')
2542 if thumbnails
is None:
2543 thumbnail
= info_dict
.get('thumbnail')
2545 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail}
]
2549 def check_thumbnails(thumbnails
):
2550 for t
in thumbnails
:
2551 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2553 self
.urlopen(HEADRequest(t
['url']))
2554 except network_exceptions
as err
:
2555 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2559 self
._sort
_thumbnails
(thumbnails
)
2560 for i
, t
in enumerate(thumbnails
):
2561 if t
.get('id') is None:
2563 if t
.get('width') and t
.get('height'):
2564 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2565 t
['url'] = sanitize_url(t
['url'])
2567 if self
.params
.get('check_formats') is True:
2568 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2570 info_dict
['thumbnails'] = thumbnails
2572 def _fill_common_fields(self
, info_dict
, final
=True):
2573 # TODO: move sanitization here
2575 title
= info_dict
['fulltitle'] = info_dict
.get('title')
2578 self
.write_debug('Extractor gave empty title. Creating a generic title')
2580 self
.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2581 info_dict
['title'] = f
'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2583 if info_dict
.get('duration') is not None:
2584 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2586 for ts_key
, date_key
in (
2587 ('timestamp', 'upload_date'),
2588 ('release_timestamp', 'release_date'),
2589 ('modified_timestamp', 'modified_date'),
2591 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2592 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2593 # see http://bugs.python.org/issue1646728)
2594 with contextlib
.suppress(ValueError, OverflowError, OSError):
2595 upload_date
= datetime
.datetime
.utcfromtimestamp(info_dict
[ts_key
])
2596 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2598 live_keys
= ('is_live', 'was_live')
2599 live_status
= info_dict
.get('live_status')
2600 if live_status
is None:
2601 for key
in live_keys
:
2602 if info_dict
.get(key
) is False:
2604 if info_dict
.get(key
):
2607 if all(info_dict
.get(key
) is False for key
in live_keys
):
2608 live_status
= 'not_live'
2610 info_dict
['live_status'] = live_status
2611 for key
in live_keys
:
2612 if info_dict
.get(key
) is None:
2613 info_dict
[key
] = (live_status
== key
)
2614 if live_status
== 'post_live':
2615 info_dict
['was_live'] = True
2617 # Auto generate title fields corresponding to the *_number fields when missing
2618 # in order to always have clean titles. This is very common for TV series.
2619 for field
in ('chapter', 'season', 'episode'):
2620 if final
and info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2621 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2623 def _raise_pending_errors(self
, info
):
2624 err
= info
.pop('__pending_error', None)
2626 self
.report_error(err
, tb
=False)
2628 def sort_formats(self
, info_dict
):
2629 formats
= self
._get
_formats
(info_dict
)
2630 formats
.sort(key
=FormatSorter(
2631 self
, info_dict
.get('_format_sort_fields') or []).calculate_preference
)
2633 def process_video_result(self
, info_dict
, download
=True):
2634 assert info_dict
.get('_type', 'video') == 'video'
2635 self
._num
_videos
+= 1
2637 if 'id' not in info_dict
:
2638 raise ExtractorError('Missing "id" field in extractor result', ie
=info_dict
['extractor'])
2639 elif not info_dict
.get('id'):
2640 raise ExtractorError('Extractor failed to obtain "id"', ie
=info_dict
['extractor'])
2642 def report_force_conversion(field
, field_not
, conversion
):
2643 self
.report_warning(
2644 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2645 % (field
, field_not
, conversion
))
2647 def sanitize_string_field(info
, string_field
):
2648 field
= info
.get(string_field
)
2649 if field
is None or isinstance(field
, str):
2651 report_force_conversion(string_field
, 'a string', 'string')
2652 info
[string_field
] = str(field
)
2654 def sanitize_numeric_fields(info
):
2655 for numeric_field
in self
._NUMERIC
_FIELDS
:
2656 field
= info
.get(numeric_field
)
2657 if field
is None or isinstance(field
, (int, float)):
2659 report_force_conversion(numeric_field
, 'numeric', 'int')
2660 info
[numeric_field
] = int_or_none(field
)
2662 sanitize_string_field(info_dict
, 'id')
2663 sanitize_numeric_fields(info_dict
)
2664 if info_dict
.get('section_end') and info_dict
.get('section_start') is not None:
2665 info_dict
['duration'] = round(info_dict
['section_end'] - info_dict
['section_start'], 3)
2666 if (info_dict
.get('duration') or 0) <= 0 and info_dict
.pop('duration', None):
2667 self
.report_warning('"duration" field is negative, there is an error in extractor')
2669 chapters
= info_dict
.get('chapters') or []
2670 if chapters
and chapters
[0].get('start_time'):
2671 chapters
.insert(0, {'start_time': 0}
)
2673 dummy_chapter
= {'end_time': 0, 'start_time': info_dict.get('duration')}
2674 for idx
, (prev
, current
, next_
) in enumerate(zip(
2675 (dummy_chapter
, *chapters
), chapters
, (*chapters
[1:], dummy_chapter
)), 1):
2676 if current
.get('start_time') is None:
2677 current
['start_time'] = prev
.get('end_time')
2678 if not current
.get('end_time'):
2679 current
['end_time'] = next_
.get('start_time')
2680 if not current
.get('title'):
2681 current
['title'] = f
'<Untitled Chapter {idx}>'
2683 if 'playlist' not in info_dict
:
2684 # It isn't part of a playlist
2685 info_dict
['playlist'] = None
2686 info_dict
['playlist_index'] = None
2688 self
._sanitize
_thumbnails
(info_dict
)
2690 thumbnail
= info_dict
.get('thumbnail')
2691 thumbnails
= info_dict
.get('thumbnails')
2693 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2695 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2697 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2698 info_dict
['display_id'] = info_dict
['id']
2700 self
._fill
_common
_fields
(info_dict
)
2702 for cc_kind
in ('subtitles', 'automatic_captions'):
2703 cc
= info_dict
.get(cc_kind
)
2705 for _
, subtitle
in cc
.items():
2706 for subtitle_format
in subtitle
:
2707 if subtitle_format
.get('url'):
2708 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2709 if subtitle_format
.get('ext') is None:
2710 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2712 automatic_captions
= info_dict
.get('automatic_captions')
2713 subtitles
= info_dict
.get('subtitles')
2715 info_dict
['requested_subtitles'] = self
.process_subtitles(
2716 info_dict
['id'], subtitles
, automatic_captions
)
2718 formats
= self
._get
_formats
(info_dict
)
2720 # Backward compatibility with InfoExtractor._sort_formats
2721 field_preference
= (formats
or [{}])[0].pop('__sort_fields', None)
2722 if field_preference
:
2723 info_dict
['_format_sort_fields'] = field_preference
2725 info_dict
['_has_drm'] = any( # or None ensures --clean-infojson removes it
2726 f
.get('has_drm') and f
['has_drm'] != 'maybe' for f
in formats
) or None
2727 if not self
.params
.get('allow_unplayable_formats'):
2728 formats
= [f
for f
in formats
if not f
.get('has_drm') or f
['has_drm'] == 'maybe']
2730 if formats
and all(f
.get('acodec') == f
.get('vcodec') == 'none' for f
in formats
):
2731 self
.report_warning(
2732 f
'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2733 'only images are available for download. Use --list-formats to see them'.capitalize())
2735 get_from_start
= not info_dict
.get('is_live') or bool(self
.params
.get('live_from_start'))
2736 if not get_from_start
:
2737 info_dict
['title'] += ' ' + datetime
.datetime
.now().strftime('%Y-%m-%d %H:%M')
2738 if info_dict
.get('is_live') and formats
:
2739 formats
= [f
for f
in formats
if bool(f
.get('is_from_start')) == get_from_start
]
2740 if get_from_start
and not formats
:
2741 self
.raise_no_formats(info_dict
, msg
=(
2742 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2743 'If you want to download from the current time, use --no-live-from-start'))
2745 def is_wellformed(f
):
2748 self
.report_warning(
2749 '"url" field is missing or empty - skipping format, '
2750 'there is an error in extractor')
2752 if isinstance(url
, bytes):
2753 sanitize_string_field(f
, 'url')
2756 # Filter out malformed formats for better extraction robustness
2757 formats
= list(filter(is_wellformed
, formats
or []))
2760 self
.raise_no_formats(info_dict
)
2762 for format
in formats
:
2763 sanitize_string_field(format
, 'format_id')
2764 sanitize_numeric_fields(format
)
2765 format
['url'] = sanitize_url(format
['url'])
2766 if format
.get('ext') is None:
2767 format
['ext'] = determine_ext(format
['url']).lower()
2768 if format
.get('protocol') is None:
2769 format
['protocol'] = determine_protocol(format
)
2770 if format
.get('resolution') is None:
2771 format
['resolution'] = self
.format_resolution(format
, default
=None)
2772 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2773 format
['dynamic_range'] = 'SDR'
2774 if format
.get('aspect_ratio') is None:
2775 format
['aspect_ratio'] = try_call(lambda: round(format
['width'] / format
['height'], 2))
2776 if (not format
.get('manifest_url') # For fragmented formats, "tbr" is often max bitrate and not average
2777 and info_dict
.get('duration') and format
.get('tbr')
2778 and not format
.get('filesize') and not format
.get('filesize_approx')):
2779 format
['filesize_approx'] = int(info_dict
['duration'] * format
['tbr'] * (1024 / 8))
2780 format
['http_headers'] = self
._calc
_headers
(collections
.ChainMap(format
, info_dict
), load_cookies
=True)
2782 # Safeguard against old/insecure infojson when using --load-info-json
2783 if info_dict
.get('http_headers'):
2784 info_dict
['http_headers'] = HTTPHeaderDict(info_dict
['http_headers'])
2785 info_dict
['http_headers'].pop('Cookie', None)
2787 # This is copied to http_headers by the above _calc_headers and can now be removed
2788 if '__x_forwarded_for_ip' in info_dict
:
2789 del info_dict
['__x_forwarded_for_ip']
2793 '_format_sort_fields': info_dict
.get('_format_sort_fields')
2796 # Sanitize and group by format_id
2798 for i
, format
in enumerate(formats
):
2799 if not format
.get('format_id'):
2800 format
['format_id'] = str(i
)
2802 # Sanitize format_id from characters used in format selector expression
2803 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2804 formats_dict
.setdefault(format
['format_id'], []).append(format
)
2806 # Make sure all formats have unique format_id
2807 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2808 for format_id
, ambiguous_formats
in formats_dict
.items():
2809 ambigious_id
= len(ambiguous_formats
) > 1
2810 for i
, format
in enumerate(ambiguous_formats
):
2812 format
['format_id'] = '%s-%d' % (format_id
, i
)
2813 # Ensure there is no conflict between id and ext in format selection
2814 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2815 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2816 format
['format_id'] = 'f%s' % format
['format_id']
2818 if format
.get('format') is None:
2819 format
['format'] = '{id} - {res}{note}'.format(
2820 id=format
['format_id'],
2821 res
=self
.format_resolution(format
),
2822 note
=format_field(format
, 'format_note', ' (%s)'),
2825 if self
.params
.get('check_formats') is True:
2826 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2828 if not formats
or formats
[0] is not info_dict
:
2829 # only set the 'formats' fields if the original info_dict list them
2830 # otherwise we end up with a circular reference, the first (and unique)
2831 # element in the 'formats' field in info_dict is info_dict itself,
2832 # which can't be exported to json
2833 info_dict
['formats'] = formats
2835 info_dict
, _
= self
.pre_process(info_dict
)
2837 if self
._match
_entry
(info_dict
, incomplete
=self
._format
_fields
) is not None:
2840 self
.post_extract(info_dict
)
2841 info_dict
, _
= self
.pre_process(info_dict
, 'after_filter')
2843 # The pre-processors may have modified the formats
2844 formats
= self
._get
_formats
(info_dict
)
2846 list_only
= self
.params
.get('simulate') == 'list_only'
2847 interactive_format_selection
= not list_only
and self
.format_selector
== '-'
2848 if self
.params
.get('list_thumbnails'):
2849 self
.list_thumbnails(info_dict
)
2850 if self
.params
.get('listsubtitles'):
2851 if 'automatic_captions' in info_dict
:
2852 self
.list_subtitles(
2853 info_dict
['id'], automatic_captions
, 'automatic captions')
2854 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2855 if self
.params
.get('listformats') or interactive_format_selection
:
2856 self
.list_formats(info_dict
)
2858 # Without this printing, -F --print-json will not work
2859 self
.__forced
_printings
(info_dict
)
2862 format_selector
= self
.format_selector
2864 if interactive_format_selection
:
2865 req_format
= input(self
._format
_screen
('\nEnter format selector ', self
.Styles
.EMPHASIS
)
2866 + '(Press ENTER for default, or Ctrl+C to quit)'
2867 + self
._format
_screen
(': ', self
.Styles
.EMPHASIS
))
2869 format_selector
= self
.build_format_selector(req_format
) if req_format
else None
2870 except SyntaxError as err
:
2871 self
.report_error(err
, tb
=False, is_error
=False)
2874 if format_selector
is None:
2875 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2876 self
.write_debug(f
'Default format spec: {req_format}')
2877 format_selector
= self
.build_format_selector(req_format
)
2879 formats_to_download
= list(format_selector({
2881 'has_merged_format': any('none' not in (f
.get('acodec'), f
.get('vcodec')) for f
in formats
),
2882 'incomplete_formats': (all(f
.get('vcodec') == 'none' for f
in formats
) # No formats with video
2883 or all(f
.get('acodec') == 'none' for f
in formats
)), # OR, No formats with audio
2885 if interactive_format_selection
and not formats_to_download
:
2886 self
.report_error('Requested format is not available', tb
=False, is_error
=False)
2890 if not formats_to_download
:
2891 if not self
.params
.get('ignore_no_formats_error'):
2892 raise ExtractorError(
2893 'Requested format is not available. Use --list-formats for a list of available formats',
2894 expected
=True, video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2895 self
.report_warning('Requested format is not available')
2896 # Process what we can, even without any available formats.
2897 formats_to_download
= [{}]
2899 requested_ranges
= tuple(self
.params
.get('download_ranges', lambda *_
: [{}])(info_dict
, self
))
2900 best_format
, downloaded_formats
= formats_to_download
[-1], []
2902 if best_format
and requested_ranges
:
2903 def to_screen(*msg
):
2904 self
.to_screen(f
'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2906 to_screen(f
'Downloading {len(formats_to_download)} format(s):',
2907 (f
['format_id'] for f
in formats_to_download
))
2908 if requested_ranges
!= ({}, ):
2909 to_screen(f
'Downloading {len(requested_ranges)} time ranges:',
2910 (f
'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c
in requested_ranges
))
2911 max_downloads_reached
= False
2913 for fmt
, chapter
in itertools
.product(formats_to_download
, requested_ranges
):
2914 new_info
= self
._copy
_infodict
(info_dict
)
2915 new_info
.update(fmt
)
2916 offset
, duration
= info_dict
.get('section_start') or 0, info_dict
.get('duration') or float('inf')
2917 end_time
= offset
+ min(chapter
.get('end_time', duration
), duration
)
2918 # duration may not be accurate. So allow deviations <1sec
2919 if end_time
== float('inf') or end_time
> offset
+ duration
+ 1:
2921 if chapter
or offset
:
2923 'section_start': offset
+ chapter
.get('start_time', 0),
2924 'section_end': end_time
,
2925 'section_title': chapter
.get('title'),
2926 'section_number': chapter
.get('index'),
2928 downloaded_formats
.append(new_info
)
2930 self
.process_info(new_info
)
2931 except MaxDownloadsReached
:
2932 max_downloads_reached
= True
2933 self
._raise
_pending
_errors
(new_info
)
2934 # Remove copied info
2935 for key
, val
in tuple(new_info
.items()):
2936 if info_dict
.get(key
) == val
:
2938 if max_downloads_reached
:
2941 write_archive
= {f.get('__write_download_archive', False) for f in downloaded_formats}
2942 assert write_archive
.issubset({True, False, 'ignore'}
)
2943 if True in write_archive
and False not in write_archive
:
2944 self
.record_download_archive(info_dict
)
2946 info_dict
['requested_downloads'] = downloaded_formats
2947 info_dict
= self
.run_all_pps('after_video', info_dict
)
2948 if max_downloads_reached
:
2949 raise MaxDownloadsReached()
2951 # We update the info dict with the selected best quality format (backwards compatibility)
2952 info_dict
.update(best_format
)
2955 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
2956 """Select the requested subtitles and their format"""
2957 available_subs
, normal_sub_langs
= {}, []
2958 if normal_subtitles
and self
.params
.get('writesubtitles'):
2959 available_subs
.update(normal_subtitles
)
2960 normal_sub_langs
= tuple(normal_subtitles
.keys())
2961 if automatic_captions
and self
.params
.get('writeautomaticsub'):
2962 for lang
, cap_info
in automatic_captions
.items():
2963 if lang
not in available_subs
:
2964 available_subs
[lang
] = cap_info
2966 if not available_subs
or (
2967 not self
.params
.get('writesubtitles')
2968 and not self
.params
.get('writeautomaticsub')):
2971 all_sub_langs
= tuple(available_subs
.keys())
2972 if self
.params
.get('allsubtitles', False):
2973 requested_langs
= all_sub_langs
2974 elif self
.params
.get('subtitleslangs', False):
2976 requested_langs
= orderedSet_from_options(
2977 self
.params
.get('subtitleslangs'), {'all': all_sub_langs}
, use_regex
=True)
2978 except re
.error
as e
:
2979 raise ValueError(f
'Wrong regex for subtitlelangs: {e.pattern}')
2981 requested_langs
= LazyList(itertools
.chain(
2982 ['en'] if 'en' in normal_sub_langs
else [],
2983 filter(lambda f
: f
.startswith('en'), normal_sub_langs
),
2984 ['en'] if 'en' in all_sub_langs
else [],
2985 filter(lambda f
: f
.startswith('en'), all_sub_langs
),
2986 normal_sub_langs
, all_sub_langs
,
2989 self
.to_screen(f
'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
2991 formats_query
= self
.params
.get('subtitlesformat', 'best')
2992 formats_preference
= formats_query
.split('/') if formats_query
else []
2994 for lang
in requested_langs
:
2995 formats
= available_subs
.get(lang
)
2997 self
.report_warning(f
'{lang} subtitles not available for {video_id}')
2999 for ext
in formats_preference
:
3003 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
3009 self
.report_warning(
3010 'No subtitle format found matching "%s" for language %s, '
3011 'using %s' % (formats_query
, lang
, f
['ext']))
3015 def _forceprint(self
, key
, info_dict
):
3016 if info_dict
is None:
3018 info_copy
= info_dict
.copy()
3019 info_copy
.setdefault('filename', self
.prepare_filename(info_dict
))
3020 if info_dict
.get('requested_formats') is not None:
3021 # For RTMP URLs, also include the playpath
3022 info_copy
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
3023 elif info_dict
.get('url'):
3024 info_copy
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
3025 info_copy
['formats_table'] = self
.render_formats_table(info_dict
)
3026 info_copy
['thumbnails_table'] = self
.render_thumbnails_table(info_dict
)
3027 info_copy
['subtitles_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('subtitles'))
3028 info_copy
['automatic_captions_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('automatic_captions'))
3030 def format_tmpl(tmpl
):
3031 mobj
= re
.fullmatch(r
'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl
)
3036 if tmpl
.startswith('{'):
3037 tmpl
, fmt
= f
'.{tmpl}', '%({})j'
3038 if tmpl
.endswith('='):
3039 tmpl
, fmt
= tmpl
[:-1], '{0} = %({0})#j'
3040 return '\n'.join(map(fmt
.format
, [tmpl
] if mobj
.group('dict') else tmpl
.split(',')))
3042 for tmpl
in self
.params
['forceprint'].get(key
, []):
3043 self
.to_stdout(self
.evaluate_outtmpl(format_tmpl(tmpl
), info_copy
))
3045 for tmpl
, file_tmpl
in self
.params
['print_to_file'].get(key
, []):
3046 filename
= self
.prepare_filename(info_dict
, outtmpl
=file_tmpl
)
3047 tmpl
= format_tmpl(tmpl
)
3048 self
.to_screen(f
'[info] Writing {tmpl!r} to: {filename}')
3049 if self
._ensure
_dir
_exists
(filename
):
3050 with open(filename
, 'a', encoding
='utf-8', newline
='') as f
:
3051 f
.write(self
.evaluate_outtmpl(tmpl
, info_copy
) + os
.linesep
)
3055 def __forced_printings(self
, info_dict
, filename
=None, incomplete
=True):
3056 if (self
.params
.get('forcejson')
3057 or self
.params
['forceprint'].get('video')
3058 or self
.params
['print_to_file'].get('video')):
3059 self
.post_extract(info_dict
)
3061 info_dict
['filename'] = filename
3062 info_copy
= self
._forceprint
('video', info_dict
)
3064 def print_field(field
, actual_field
=None, optional
=False):
3065 if actual_field
is None:
3066 actual_field
= field
3067 if self
.params
.get(f
'force{field}') and (
3068 info_copy
.get(field
) is not None or (not optional
and not incomplete
)):
3069 self
.to_stdout(info_copy
[actual_field
])
3071 print_field('title')
3073 print_field('url', 'urls')
3074 print_field('thumbnail', optional
=True)
3075 print_field('description', optional
=True)
3076 print_field('filename')
3077 if self
.params
.get('forceduration') and info_copy
.get('duration') is not None:
3078 self
.to_stdout(formatSeconds(info_copy
['duration']))
3079 print_field('format')
3081 if self
.params
.get('forcejson'):
3082 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
3084 def dl(self
, name
, info
, subtitle
=False, test
=False):
3085 if not info
.get('url'):
3086 self
.raise_no_formats(info
, True)
3089 verbose
= self
.params
.get('verbose')
3092 'quiet': self
.params
.get('quiet') or not verbose
,
3094 'noprogress': not verbose
,
3096 'skip_unavailable_fragments': False,
3097 'keep_fragments': False,
3099 '_no_ytdl_file': True,
3102 params
= self
.params
3103 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
3105 for ph
in self
._progress
_hooks
:
3106 fd
.add_progress_hook(ph
)
3108 (f
['url'].split(',')[0] + ',<data>' if f
['url'].startswith('data:') else f
['url'])
3109 for f
in info
.get('requested_formats', []) or [info
])
3110 self
.write_debug(f
'Invoking {fd.FD_NAME} downloader on "{urls}"')
3112 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
3113 # But it may contain objects that are not deep-copyable
3114 new_info
= self
._copy
_infodict
(info
)
3115 if new_info
.get('http_headers') is None:
3116 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
3117 return fd
.download(name
, new_info
, subtitle
)
3119 def existing_file(self
, filepaths
, *, default_overwrite
=True):
3120 existing_files
= list(filter(os
.path
.exists
, orderedSet(filepaths
)))
3121 if existing_files
and not self
.params
.get('overwrites', default_overwrite
):
3122 return existing_files
[0]
3124 for file in existing_files
:
3125 self
.report_file_delete(file)
3129 def process_info(self
, info_dict
):
3130 """Process a single resolved IE result. (Modifies it in-place)"""
3132 assert info_dict
.get('_type', 'video') == 'video'
3133 original_infodict
= info_dict
3135 if 'format' not in info_dict
and 'ext' in info_dict
:
3136 info_dict
['format'] = info_dict
['ext']
3138 if self
._match
_entry
(info_dict
) is not None:
3139 info_dict
['__write_download_archive'] = 'ignore'
3142 # Does nothing under normal operation - for backward compatibility of process_info
3143 self
.post_extract(info_dict
)
3145 def replace_info_dict(new_info
):
3147 if new_info
== info_dict
:
3150 info_dict
.update(new_info
)
3152 new_info
, _
= self
.pre_process(info_dict
, 'video')
3153 replace_info_dict(new_info
)
3154 self
._num
_downloads
+= 1
3156 # info_dict['_filename'] needs to be set for backward compatibility
3157 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
3158 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
3162 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
3164 def check_max_downloads():
3165 if self
._num
_downloads
>= float(self
.params
.get('max_downloads') or 'inf'):
3166 raise MaxDownloadsReached()
3168 if self
.params
.get('simulate'):
3169 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3170 check_max_downloads()
3173 if full_filename
is None:
3175 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
3177 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
3180 if self
._write
_description
('video', info_dict
,
3181 self
.prepare_filename(info_dict
, 'description')) is None:
3184 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
3185 if sub_files
is None:
3187 files_to_move
.update(dict(sub_files
))
3189 thumb_files
= self
._write
_thumbnails
(
3190 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
3191 if thumb_files
is None:
3193 files_to_move
.update(dict(thumb_files
))
3195 infofn
= self
.prepare_filename(info_dict
, 'infojson')
3196 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
3197 if _infojson_written
:
3198 info_dict
['infojson_filename'] = infofn
3199 # For backward compatibility, even though it was a private field
3200 info_dict
['__infojson_filename'] = infofn
3201 elif _infojson_written
is None:
3204 # Note: Annotations are deprecated
3206 if self
.params
.get('writeannotations', False):
3207 annofn
= self
.prepare_filename(info_dict
, 'annotation')
3209 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
3211 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
3212 self
.to_screen('[info] Video annotations are already present')
3213 elif not info_dict
.get('annotations'):
3214 self
.report_warning('There are no annotations to write.')
3217 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
3218 with open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
3219 annofile
.write(info_dict
['annotations'])
3220 except (KeyError, TypeError):
3221 self
.report_warning('There are no annotations to write.')
3223 self
.report_error('Cannot write annotations file: ' + annofn
)
3226 # Write internet shortcut files
3227 def _write_link_file(link_type
):
3228 url
= try_get(info_dict
['webpage_url'], iri_to_uri
)
3230 self
.report_warning(
3231 f
'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3233 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
3234 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
3236 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
3237 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
3240 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3241 with open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
3242 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
3243 template_vars
= {'url': url}
3244 if link_type
== 'desktop':
3245 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
3246 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
3248 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
3253 'url': self
.params
.get('writeurllink'),
3254 'webloc': self
.params
.get('writewebloclink'),
3255 'desktop': self
.params
.get('writedesktoplink'),
3257 if self
.params
.get('writelink'):
3258 link_type
= ('webloc' if sys
.platform
== 'darwin'
3259 else 'desktop' if sys
.platform
.startswith('linux')
3261 write_links
[link_type
] = True
3263 if any(should_write
and not _write_link_file(link_type
)
3264 for link_type
, should_write
in write_links
.items()):
3267 new_info
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
3268 replace_info_dict(new_info
)
3270 if self
.params
.get('skip_download'):
3271 info_dict
['filepath'] = temp_filename
3272 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3273 info_dict
['__files_to_move'] = files_to_move
3274 replace_info_dict(self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
))
3275 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3278 info_dict
.setdefault('__postprocessors', [])
3281 def existing_video_file(*filepaths
):
3282 ext
= info_dict
.get('ext')
3283 converted
= lambda file: replace_extension(file, self
.params
.get('final_ext') or ext
, ext
)
3284 file = self
.existing_file(itertools
.chain(*zip(map(converted
, filepaths
), filepaths
)),
3285 default_overwrite
=False)
3287 info_dict
['ext'] = os
.path
.splitext(file)[1][1:]
3290 fd
, success
= None, True
3291 if info_dict
.get('protocol') or info_dict
.get('url'):
3292 fd
= get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-')
3293 if fd
is not FFmpegFD
and 'no-direct-merge' not in self
.params
['compat_opts'] and (
3294 info_dict
.get('section_start') or info_dict
.get('section_end')):
3295 msg
= ('This format cannot be partially downloaded' if FFmpegFD
.available()
3296 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3297 self
.report_error(f
'{msg}. Aborting')
3300 if info_dict
.get('requested_formats') is not None:
3301 old_ext
= info_dict
['ext']
3302 if self
.params
.get('merge_output_format') is None:
3303 if (info_dict
['ext'] == 'webm'
3304 and info_dict
.get('thumbnails')
3305 # check with type instead of pp_key, __name__, or isinstance
3306 # since we dont want any custom PPs to trigger this
3307 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])): # noqa: E721
3308 info_dict
['ext'] = 'mkv'
3309 self
.report_warning(
3310 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3311 new_ext
= info_dict
['ext']
3313 def correct_ext(filename
, ext
=new_ext
):
3316 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
3318 os
.path
.splitext(filename
)[0]
3319 if filename_real_ext
in (old_ext
, new_ext
)
3321 return f
'{filename_wo_ext}.{ext}'
3323 # Ensure filename always has a correct extension for successful merge
3324 full_filename
= correct_ext(full_filename
)
3325 temp_filename
= correct_ext(temp_filename
)
3326 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3328 info_dict
['__real_download'] = False
3329 # NOTE: Copy so that original format dicts are not modified
3330 info_dict
['requested_formats'] = list(map(dict, info_dict
['requested_formats']))
3332 merger
= FFmpegMergerPP(self
)
3334 if dl_filename
is not None:
3335 self
.report_file_already_downloaded(dl_filename
)
3337 for f
in info_dict
['requested_formats'] if fd
!= FFmpegFD
else []:
3338 f
['filepath'] = fname
= prepend_extension(
3339 correct_ext(temp_filename
, info_dict
['ext']),
3340 'f%s' % f
['format_id'], info_dict
['ext'])
3341 downloaded
.append(fname
)
3342 info_dict
['url'] = '\n'.join(f
['url'] for f
in info_dict
['requested_formats'])
3343 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3344 info_dict
['__real_download'] = real_download
3346 if self
.params
.get('allow_unplayable_formats'):
3347 self
.report_warning(
3348 'You have requested merging of multiple formats '
3349 'while also allowing unplayable formats to be downloaded. '
3350 'The formats won\'t be merged to prevent data corruption.')
3351 elif not merger
.available
:
3352 msg
= 'You have requested merging of multiple formats but ffmpeg is not installed'
3353 if not self
.params
.get('ignoreerrors'):
3354 self
.report_error(f
'{msg}. Aborting due to --abort-on-error')
3356 self
.report_warning(f
'{msg}. The formats won\'t be merged')
3358 if temp_filename
== '-':
3359 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
, self
.params
)
3360 else 'but the formats are incompatible for simultaneous download' if merger
.available
3361 else 'but ffmpeg is not installed')
3362 self
.report_warning(
3363 f
'You have requested downloading multiple formats to stdout {reason}. '
3364 'The formats will be streamed one after the other')
3365 fname
= temp_filename
3366 for f
in info_dict
['requested_formats']:
3367 new_info
= dict(info_dict
)
3368 del new_info
['requested_formats']
3370 if temp_filename
!= '-':
3371 fname
= prepend_extension(
3372 correct_ext(temp_filename
, new_info
['ext']),
3373 'f%s' % f
['format_id'], new_info
['ext'])
3374 if not self
._ensure
_dir
_exists
(fname
):
3376 f
['filepath'] = fname
3377 downloaded
.append(fname
)
3378 partial_success
, real_download
= self
.dl(fname
, new_info
)
3379 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
3380 success
= success
and partial_success
3382 if downloaded
and merger
.available
and not self
.params
.get('allow_unplayable_formats'):
3383 info_dict
['__postprocessors'].append(merger
)
3384 info_dict
['__files_to_merge'] = downloaded
3385 # Even if there were no downloads, it is being merged only now
3386 info_dict
['__real_download'] = True
3388 for file in downloaded
:
3389 files_to_move
[file] = None
3391 # Just a single file
3392 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3393 if dl_filename
is None or dl_filename
== temp_filename
:
3394 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3395 # So we should try to resume the download
3396 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3397 info_dict
['__real_download'] = real_download
3399 self
.report_file_already_downloaded(dl_filename
)
3401 dl_filename
= dl_filename
or temp_filename
3402 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3404 except network_exceptions
as err
:
3405 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
3407 except OSError as err
:
3408 raise UnavailableVideoError(err
)
3409 except (ContentTooShortError
, ) as err
:
3410 self
.report_error(f
'content too short (expected {err.expected} bytes and served {err.downloaded})')
3413 self
._raise
_pending
_errors
(info_dict
)
3414 if success
and full_filename
!= '-':
3418 fixup_policy
= self
.params
.get('fixup')
3419 vid
= info_dict
['id']
3421 if fixup_policy
in ('ignore', 'never'):
3423 elif fixup_policy
== 'warn':
3425 elif fixup_policy
!= 'force':
3426 assert fixup_policy
in ('detect_or_warn', None)
3427 if not info_dict
.get('__real_download'):
3430 def ffmpeg_fixup(cndn
, msg
, cls
):
3431 if not (do_fixup
and cndn
):
3433 elif do_fixup
== 'warn':
3434 self
.report_warning(f
'{vid}: {msg}')
3438 info_dict
['__postprocessors'].append(pp
)
3440 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
3442 stretched_ratio
= info_dict
.get('stretched_ratio')
3443 ffmpeg_fixup(stretched_ratio
not in (1, None),
3444 f
'Non-uniform pixel ratio {stretched_ratio}',
3445 FFmpegFixupStretchedPP
)
3447 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
3448 downloader
= downloader
.FD_NAME
if downloader
else None
3450 ext
= info_dict
.get('ext')
3451 postprocessed_by_ffmpeg
= info_dict
.get('requested_formats') or any((
3452 isinstance(pp
, FFmpegVideoConvertorPP
)
3453 and resolve_recode_mapping(ext
, pp
.mapping
)[0] not in (ext
, None)
3454 ) for pp
in self
._pps
['post_process'])
3456 if not postprocessed_by_ffmpeg
:
3457 ffmpeg_fixup(ext
== 'm4a' and info_dict
.get('container') == 'm4a_dash',
3458 'writing DASH m4a. Only some players support this container',
3460 ffmpeg_fixup(downloader
== 'hlsnative' and not self
.params
.get('hls_use_mpegts')
3461 or info_dict
.get('is_live') and self
.params
.get('hls_use_mpegts') is None,
3462 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3464 ffmpeg_fixup(info_dict
.get('is_live') and downloader
== 'dashsegments',
3465 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP
)
3467 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3468 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP
)
3472 replace_info_dict(self
.post_process(dl_filename
, info_dict
, files_to_move
))
3473 except PostProcessingError
as err
:
3474 self
.report_error('Postprocessing: %s' % str(err
))
3477 for ph
in self
._post
_hooks
:
3478 ph(info_dict
['filepath'])
3479 except Exception as err
:
3480 self
.report_error('post hooks: %s' % str(err
))
3482 info_dict
['__write_download_archive'] = True
3484 assert info_dict
is original_infodict
# Make sure the info_dict was modified in-place
3485 if self
.params
.get('force_write_download_archive'):
3486 info_dict
['__write_download_archive'] = True
3487 check_max_downloads()
3489 def __download_wrapper(self
, func
):
3490 @functools.wraps(func
)
3491 def wrapper(*args
, **kwargs
):
3493 res
= func(*args
, **kwargs
)
3494 except UnavailableVideoError
as e
:
3495 self
.report_error(e
)
3496 except DownloadCancelled
as e
:
3497 self
.to_screen(f
'[info] {e}')
3498 if not self
.params
.get('break_per_url'):
3500 self
._num
_downloads
= 0
3502 if self
.params
.get('dump_single_json', False):
3503 self
.post_extract(res
)
3504 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3507 def download(self
, url_list
):
3508 """Download a given list of URLs."""
3509 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3510 outtmpl
= self
.params
['outtmpl']['default']
3511 if (len(url_list
) > 1
3513 and '%' not in outtmpl
3514 and self
.params
.get('max_downloads') != 1):
3515 raise SameFileError(outtmpl
)
3517 for url
in url_list
:
3518 self
.__download
_wrapper
(self
.extract_info
)(
3519 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3521 return self
._download
_retcode
3523 def download_with_info_file(self
, info_filename
):
3524 with contextlib
.closing(fileinput
.FileInput(
3525 [info_filename
], mode
='r',
3526 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3527 # FileInput doesn't have a read method, we can't call json.load
3528 infos
= [self
.sanitize_info(info
, self
.params
.get('clean_infojson', True))
3529 for info
in variadic(json
.loads('\n'.join(f
)))]
3532 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3533 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3534 if not isinstance(e
, EntryNotInPlaylist
):
3535 self
.to_stderr('\r')
3536 webpage_url
= info
.get('webpage_url')
3537 if webpage_url
is None:
3539 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3540 self
.download([webpage_url
])
3541 return self
._download
_retcode
3544 def sanitize_info(info_dict
, remove_private_keys
=False):
3545 ''' Sanitize the infodict for converting to json '''
3546 if info_dict
is None:
3548 info_dict
.setdefault('epoch', int(time
.time()))
3549 info_dict
.setdefault('_type', 'video')
3550 info_dict
.setdefault('_version', {
3551 'version': __version__
,
3552 'current_git_head': current_git_head(),
3553 'release_git_head': RELEASE_GIT_HEAD
,
3554 'repository': REPOSITORY
,
3557 if remove_private_keys
:
3558 reject
= lambda k
, v
: v
is None or k
.startswith('__') or k
in {
3559 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3560 'entries', 'filepath', '_filename', 'filename', 'infojson_filename', 'original_url',
3561 'playlist_autonumber', '_format_sort_fields',
3564 reject
= lambda k
, v
: False
3567 if isinstance(obj
, dict):
3568 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3569 elif isinstance(obj
, (list, tuple, set, LazyList
)):
3570 return list(map(filter_fn
, obj
))
3571 elif obj
is None or isinstance(obj
, (str, int, float, bool)):
3576 return filter_fn(info_dict
)
3579 def filter_requested_info(info_dict
, actually_filter
=True):
3580 ''' Alias of sanitize_info for backward compatibility '''
3581 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3583 def _delete_downloaded_files(self
, *files_to_delete
, info
={}, msg
=None):
3584 for filename
in set(filter(None, files_to_delete
)):
3586 self
.to_screen(msg
% filename
)
3590 self
.report_warning(f
'Unable to delete file {filename}')
3591 if filename
in info
.get('__files_to_move', []): # NB: Delete even if None
3592 del info
['__files_to_move'][filename
]
3595 def post_extract(info_dict
):
3596 def actual_post_extract(info_dict
):
3597 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3598 for video_dict
in info_dict
.get('entries', {}):
3599 actual_post_extract(video_dict
or {})
3602 post_extractor
= info_dict
.pop('__post_extractor', None) or (lambda: {})
3603 info_dict
.update(post_extractor())
3605 actual_post_extract(info_dict
or {})
3607 def run_pp(self
, pp
, infodict
):
3608 files_to_delete
= []
3609 if '__files_to_move' not in infodict
:
3610 infodict
['__files_to_move'] = {}
3612 files_to_delete
, infodict
= pp
.run(infodict
)
3613 except PostProcessingError
as e
:
3614 # Must be True and not 'only_download'
3615 if self
.params
.get('ignoreerrors') is True:
3616 self
.report_error(e
)
3620 if not files_to_delete
:
3622 if self
.params
.get('keepvideo', False):
3623 for f
in files_to_delete
:
3624 infodict
['__files_to_move'].setdefault(f
, '')
3626 self
._delete
_downloaded
_files
(
3627 *files_to_delete
, info
=infodict
, msg
='Deleting original file %s (pass -k to keep)')
3630 def run_all_pps(self
, key
, info
, *, additional_pps
=None):
3632 self
._forceprint
(key
, info
)
3633 for pp
in (additional_pps
or []) + self
._pps
[key
]:
3634 info
= self
.run_pp(pp
, info
)
3637 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3638 info
= dict(ie_info
)
3639 info
['__files_to_move'] = files_to_move
or {}
3641 info
= self
.run_all_pps(key
, info
)
3642 except PostProcessingError
as err
:
3643 msg
= f
'Preprocessing: {err}'
3644 info
.setdefault('__pending_error', msg
)
3645 self
.report_error(msg
, is_error
=False)
3646 return info
, info
.pop('__files_to_move', None)
3648 def post_process(self
, filename
, info
, files_to_move
=None):
3649 """Run all the postprocessors on the given file."""
3650 info
['filepath'] = filename
3651 info
['__files_to_move'] = files_to_move
or {}
3652 info
= self
.run_all_pps('post_process', info
, additional_pps
=info
.get('__postprocessors'))
3653 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3654 del info
['__files_to_move']
3655 return self
.run_all_pps('after_move', info
)
3657 def _make_archive_id(self
, info_dict
):
3658 video_id
= info_dict
.get('id')
3661 # Future-proof against any change in case
3662 # and backwards compatibility with prior versions
3663 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3664 if extractor
is None:
3665 url
= str_or_none(info_dict
.get('url'))
3668 # Try to find matching extractor for the URL and take its ie_key
3669 for ie_key
, ie
in self
._ies
.items():
3670 if ie
.suitable(url
):
3675 return make_archive_id(extractor
, video_id
)
3677 def in_download_archive(self
, info_dict
):
3678 if not self
.archive
:
3681 vid_ids
= [self
._make
_archive
_id
(info_dict
)]
3682 vid_ids
.extend(info_dict
.get('_old_archive_ids') or [])
3683 return any(id_
in self
.archive
for id_
in vid_ids
)
3685 def record_download_archive(self
, info_dict
):
3686 fn
= self
.params
.get('download_archive')
3689 vid_id
= self
._make
_archive
_id
(info_dict
)
3692 self
.write_debug(f
'Adding to archive: {vid_id}')
3693 if is_path_like(fn
):
3694 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3695 archive_file
.write(vid_id
+ '\n')
3696 self
.archive
.add(vid_id
)
3699 def format_resolution(format
, default
='unknown'):
3700 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3702 if format
.get('resolution') is not None:
3703 return format
['resolution']
3704 if format
.get('width') and format
.get('height'):
3705 return '%dx%d' % (format
['width'], format
['height'])
3706 elif format
.get('height'):
3707 return '%sp' % format
['height']
3708 elif format
.get('width'):
3709 return '%dx?' % format
['width']
3712 def _list_format_headers(self
, *headers
):
3713 if self
.params
.get('listformats_table', True) is not False:
3714 return [self
._format
_out
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3717 def _format_note(self
, fdict
):
3719 if fdict
.get('ext') in ['f4f', 'f4m']:
3720 res
+= '(unsupported)'
3721 if fdict
.get('language'):
3724 res
+= '[%s]' % fdict
['language']
3725 if fdict
.get('format_note') is not None:
3728 res
+= fdict
['format_note']
3729 if fdict
.get('tbr') is not None:
3732 res
+= '%4dk' % fdict
['tbr']
3733 if fdict
.get('container') is not None:
3736 res
+= '%s container' % fdict
['container']
3737 if (fdict
.get('vcodec') is not None
3738 and fdict
.get('vcodec') != 'none'):
3741 res
+= fdict
['vcodec']
3742 if fdict
.get('vbr') is not None:
3744 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3746 if fdict
.get('vbr') is not None:
3747 res
+= '%4dk' % fdict
['vbr']
3748 if fdict
.get('fps') is not None:
3751 res
+= '%sfps' % fdict
['fps']
3752 if fdict
.get('acodec') is not None:
3755 if fdict
['acodec'] == 'none':
3758 res
+= '%-5s' % fdict
['acodec']
3759 elif fdict
.get('abr') is not None:
3763 if fdict
.get('abr') is not None:
3764 res
+= '@%3dk' % fdict
['abr']
3765 if fdict
.get('asr') is not None:
3766 res
+= ' (%5dHz)' % fdict
['asr']
3767 if fdict
.get('filesize') is not None:
3770 res
+= format_bytes(fdict
['filesize'])
3771 elif fdict
.get('filesize_approx') is not None:
3774 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3777 def _get_formats(self
, info_dict
):
3778 if info_dict
.get('formats') is None:
3779 if info_dict
.get('url') and info_dict
.get('_type', 'video') == 'video':
3782 return info_dict
['formats']
3784 def render_formats_table(self
, info_dict
):
3785 formats
= self
._get
_formats
(info_dict
)
3788 if not self
.params
.get('listformats_table', True) is not False:
3791 format_field(f
, 'format_id'),
3792 format_field(f
, 'ext'),
3793 self
.format_resolution(f
),
3794 self
._format
_note
(f
)
3795 ] for f
in formats
if (f
.get('preference') or 0) >= -1000]
3796 return render_table(['format code', 'extension', 'resolution', 'note'], table
, extra_gap
=1)
3798 def simplified_codec(f
, field
):
3799 assert field
in ('acodec', 'vcodec')
3800 codec
= f
.get(field
)
3803 elif codec
!= 'none':
3804 return '.'.join(codec
.split('.')[:4])
3806 if field
== 'vcodec' and f
.get('acodec') == 'none':
3808 elif field
== 'acodec' and f
.get('vcodec') == 'none':
3810 return self
._format
_out
('audio only' if field
== 'vcodec' else 'video only',
3811 self
.Styles
.SUPPRESS
)
3813 delim
= self
._format
_out
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3816 self
._format
_out
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3817 format_field(f
, 'ext'),
3818 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3819 format_field(f
, 'fps', '\t%d', func
=round),
3820 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3821 format_field(f
, 'audio_channels', '\t%s'),
3823 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
)
3824 or format_field(f
, 'filesize_approx', '≈\t%s', func
=format_bytes
)
3825 or format_field(try_call(lambda: format_bytes(int(info_dict
['duration'] * f
['tbr'] * (1024 / 8)))),
3826 None, self
._format
_out
('~\t%s', self
.Styles
.SUPPRESS
))),
3827 format_field(f
, 'tbr', '\t%dk', func
=round),
3828 shorten_protocol_name(f
.get('protocol', '')),
3830 simplified_codec(f
, 'vcodec'),
3831 format_field(f
, 'vbr', '\t%dk', func
=round),
3832 simplified_codec(f
, 'acodec'),
3833 format_field(f
, 'abr', '\t%dk', func
=round),
3834 format_field(f
, 'asr', '\t%s', func
=format_decimal_suffix
),
3835 join_nonempty(format_field(f
, 'language', '[%s]'), join_nonempty(
3836 self
._format
_out
('UNSUPPORTED', self
.Styles
.BAD_FORMAT
) if f
.get('ext') in ('f4f', 'f4m') else None,
3837 (self
._format
_out
('Maybe DRM', self
.Styles
.WARNING
) if f
.get('has_drm') == 'maybe'
3838 else self
._format
_out
('DRM', self
.Styles
.BAD_FORMAT
) if f
.get('has_drm') else None),
3839 format_field(f
, 'format_note'),
3840 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3841 delim
=', '), delim
=' '),
3842 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3843 header_line
= self
._list
_format
_headers
(
3844 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3845 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3847 return render_table(
3848 header_line
, table
, hide_empty
=True,
3849 delim
=self
._format
_out
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True))
3851 def render_thumbnails_table(self
, info_dict
):
3852 thumbnails
= list(info_dict
.get('thumbnails') or [])
3855 return render_table(
3856 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3857 [[t
.get('id'), t
.get('width') or 'unknown', t
.get('height') or 'unknown', t
['url']] for t
in thumbnails
])
3859 def render_subtitles_table(self
, video_id
, subtitles
):
3860 def _row(lang
, formats
):
3861 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3862 if len(set(names
)) == 1:
3863 names
= [] if names
[0] == 'unknown' else names
[:1]
3864 return [lang
, ', '.join(names
), ', '.join(exts
)]
3868 return render_table(
3869 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3870 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3873 def __list_table(self
, video_id
, name
, func
, *args
):
3876 self
.to_screen(f
'{video_id} has no {name}')
3878 self
.to_screen(f
'[info] Available {name} for {video_id}:')
3879 self
.to_stdout(table
)
3881 def list_formats(self
, info_dict
):
3882 self
.__list
_table
(info_dict
['id'], 'formats', self
.render_formats_table
, info_dict
)
3884 def list_thumbnails(self
, info_dict
):
3885 self
.__list
_table
(info_dict
['id'], 'thumbnails', self
.render_thumbnails_table
, info_dict
)
3887 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3888 self
.__list
_table
(video_id
, name
, self
.render_subtitles_table
, video_id
, subtitles
)
3890 def print_debug_header(self
):
3891 if not self
.params
.get('verbose'):
3894 from . import _IN_CLI
# Must be delayed import
3896 # These imports can be slow. So import them only as needed
3897 from .extractor
.extractors
import _LAZY_LOADER
3898 from .extractor
.extractors
import (
3899 _PLUGIN_CLASSES
as plugin_ies
,
3900 _PLUGIN_OVERRIDES
as plugin_ie_overrides
3903 def get_encoding(stream
):
3904 ret
= str(getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__))
3905 additional_info
= []
3906 if os
.environ
.get('TERM', '').lower() == 'dumb':
3907 additional_info
.append('dumb')
3908 if not supports_terminal_sequences(stream
):
3909 from .utils
import WINDOWS_VT_MODE
# Must be imported locally
3910 additional_info
.append('No VT' if WINDOWS_VT_MODE
is False else 'No ANSI')
3912 ret
= f
'{ret} ({",".join(additional_info)})'
3915 encoding_str
= 'Encodings: locale %s, fs %s, pref %s, %s' % (
3916 locale
.getpreferredencoding(),
3917 sys
.getfilesystemencoding(),
3918 self
.get_encoding(),
3920 f
'{key} {get_encoding(stream)}' for key
, stream
in self
._out
_files
.items_
3921 if stream
is not None and key
!= 'console')
3924 logger
= self
.params
.get('logger')
3926 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3927 write_debug(encoding_str
)
3929 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3930 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3932 source
= detect_variant()
3933 if VARIANT
not in (None, 'pip'):
3936 write_debug(join_nonempty(
3937 f
'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
3938 f
'{CHANNEL}@{__version__}',
3939 f
'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD
else '',
3940 '' if source
== 'unknown' else f
'({source})',
3941 '' if _IN_CLI
else 'API' if klass
== YoutubeDL
else f
'API:{self.__module__}.{klass.__qualname__}',
3945 write_debug(f
'params: {self.params}')
3947 if not _LAZY_LOADER
:
3948 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
3949 write_debug('Lazy loading extractors is forcibly disabled')
3951 write_debug('Lazy loading extractors is disabled')
3952 if self
.params
['compat_opts']:
3953 write_debug('Compatibility options: %s' % ', '.join(self
.params
['compat_opts']))
3955 if current_git_head():
3956 write_debug(f
'Git HEAD: {current_git_head()}')
3957 write_debug(system_identifier())
3959 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
3960 ffmpeg_features
= {key for key, val in ffmpeg_features.items() if val}
3962 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features
))
3964 exe_versions
['rtmpdump'] = rtmpdump_version()
3965 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
3966 exe_str
= ', '.join(
3967 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
3969 write_debug('exe versions: %s' % exe_str
)
3971 from .compat
.compat_utils
import get_package_info
3972 from .dependencies
import available_dependencies
3974 write_debug('Optional libraries: %s' % (', '.join(sorted({
3975 join_nonempty(*get_package_info(m
)) for m
in available_dependencies
.values()
3978 write_debug(f
'Proxy map: {self.proxies}')
3979 # write_debug(f'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers)}')
3980 for plugin_type
, plugins
in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}
.items():
3981 display_list
= ['%s%s' % (
3982 klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
3983 for name
, klass
in plugins
.items()]
3984 if plugin_type
== 'Extractor':
3985 display_list
.extend(f
'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
3986 for parent
, plugins
in plugin_ie_overrides
.items())
3987 if not display_list
:
3989 write_debug(f
'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
3991 plugin_dirs
= plugin_directories()
3993 write_debug(f
'Plugin directories: {plugin_dirs}')
3996 if False and self
.params
.get('call_home'):
3997 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode()
3998 write_debug('Public IP address: %s' % ipaddr
)
3999 latest_version
= self
.urlopen(
4000 'https://yt-dl.org/latest/version').read().decode()
4001 if version_tuple(latest_version
) > version_tuple(__version__
):
4002 self
.report_warning(
4003 'You are using an outdated version (newest version: %s)! '
4004 'See https://yt-dl.org/update if you need help updating.' %
4007 @functools.cached_property
4009 """Global proxy configuration"""
4010 opts_proxy
= self
.params
.get('proxy')
4011 if opts_proxy
is not None:
4012 if opts_proxy
== '':
4013 opts_proxy
= '__noproxy__'
4014 proxies
= {'all': opts_proxy}
4016 proxies
= urllib
.request
.getproxies()
4017 # compat. Set HTTPS_PROXY to __noproxy__ to revert
4018 if 'http' in proxies
and 'https' not in proxies
:
4019 proxies
['https'] = proxies
['http']
4023 @functools.cached_property
4024 def cookiejar(self
):
4025 """Global cookiejar instance"""
4026 return load_cookies(
4027 self
.params
.get('cookiefile'), self
.params
.get('cookiesfrombrowser'), self
)
4032 Get a urllib OpenerDirector from the Urllib handler (deprecated).
4034 self
.deprecation_warning('YoutubeDL._opener() is deprecated, use YoutubeDL.urlopen()')
4035 handler
= self
._request
_director
.handlers
['Urllib']
4036 return handler
._get
_instance
(cookiejar
=self
.cookiejar
, proxies
=self
.proxies
)
4038 def urlopen(self
, req
):
4039 """ Start an HTTP download """
4040 if isinstance(req
, str):
4042 elif isinstance(req
, urllib
.request
.Request
):
4043 self
.deprecation_warning(
4044 'Passing a urllib.request.Request object to YoutubeDL.urlopen() is deprecated. '
4045 'Use yt_dlp.networking.common.Request instead.')
4046 req
= urllib_req_to_req(req
)
4047 assert isinstance(req
, Request
)
4049 # compat: Assume user:pass url params are basic auth
4050 url
, basic_auth_header
= extract_basic_auth(req
.url
)
4051 if basic_auth_header
:
4052 req
.headers
['Authorization'] = basic_auth_header
4053 req
.url
= sanitize_url(url
)
4055 clean_proxies(proxies
=req
.proxies
, headers
=req
.headers
)
4056 clean_headers(req
.headers
)
4059 return self
._request
_director
.send(req
)
4060 except NoSupportingHandlers
as e
:
4061 for ue
in e
.unsupported_errors
:
4062 if not (ue
.handler
and ue
.msg
):
4064 if ue
.handler
.RH_KEY
== 'Urllib' and 'unsupported url scheme: "file"' in ue
.msg
.lower():
4066 'file:// URLs are disabled by default in yt-dlp for security reasons. '
4067 'Use --enable-file-urls to enable at your own risk.', cause
=ue
) from ue
4069 except SSLError
as e
:
4070 if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e
):
4071 raise RequestError('UNSAFE_LEGACY_RENEGOTIATION_DISABLED: Try using --legacy-server-connect', cause
=e
) from e
4072 elif 'SSLV3_ALERT_HANDSHAKE_FAILURE' in str(e
):
4074 'SSLV3_ALERT_HANDSHAKE_FAILURE: The server may not support the current cipher list. '
4075 'Try using --legacy-server-connect', cause
=e
) from e
4077 except HTTPError
as e
: # TODO: Remove in a future release
4078 raise _CompatHTTPError(e
) from e
4080 def build_request_director(self
, handlers
):
4081 logger
= _YDLLogger(self
)
4082 headers
= self
.params
.get('http_headers').copy()
4083 proxies
= self
.proxies
.copy()
4084 clean_headers(headers
)
4085 clean_proxies(proxies
, headers
)
4087 director
= RequestDirector(logger
=logger
, verbose
=self
.params
.get('debug_printtraffic'))
4088 for handler
in handlers
:
4089 director
.add_handler(handler(
4092 cookiejar
=self
.cookiejar
,
4094 prefer_system_certs
='no-certifi' in self
.params
['compat_opts'],
4095 verify
=not self
.params
.get('nocheckcertificate'),
4096 **traverse_obj(self
.params
, {
4097 'verbose': 'debug_printtraffic',
4098 'source_address': 'source_address',
4099 'timeout': 'socket_timeout',
4100 'legacy_ssl_support': 'legacyserverconnect',
4101 'enable_file_urls': 'enable_file_urls',
4103 'client_certificate': 'client_certificate',
4104 'client_certificate_key': 'client_certificate_key',
4105 'client_certificate_password': 'client_certificate_password',
4111 def encode(self
, s
):
4112 if isinstance(s
, bytes):
4113 return s
# Already encoded
4116 return s
.encode(self
.get_encoding())
4117 except UnicodeEncodeError as err
:
4118 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
4121 def get_encoding(self
):
4122 encoding
= self
.params
.get('encoding')
4123 if encoding
is None:
4124 encoding
= preferredencoding()
4127 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
4128 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
4129 if overwrite
is None:
4130 overwrite
= self
.params
.get('overwrites', True)
4131 if not self
.params
.get('writeinfojson'):
4134 self
.write_debug(f
'Skipping writing {label} infojson')
4136 elif not self
._ensure
_dir
_exists
(infofn
):
4138 elif not overwrite
and os
.path
.exists(infofn
):
4139 self
.to_screen(f
'[info] {label.title()} metadata is already present')
4142 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
4144 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
4147 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
4150 def _write_description(self
, label
, ie_result
, descfn
):
4151 ''' Write description and returns True = written, False = skip, None = error '''
4152 if not self
.params
.get('writedescription'):
4155 self
.write_debug(f
'Skipping writing {label} description')
4157 elif not self
._ensure
_dir
_exists
(descfn
):
4159 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
4160 self
.to_screen(f
'[info] {label.title()} description is already present')
4161 elif ie_result
.get('description') is None:
4162 self
.to_screen(f
'[info] There\'s no {label} description to write')
4166 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
4167 with open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
4168 descfile
.write(ie_result
['description'])
4170 self
.report_error(f
'Cannot write {label} description file {descfn}')
4174 def _write_subtitles(self
, info_dict
, filename
):
4175 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
4177 subtitles
= info_dict
.get('requested_subtitles')
4178 if not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
4179 # subtitles download errors are already managed as troubles in relevant IE
4180 # that way it will silently go on when used with unsupporting IE
4183 self
.to_screen('[info] There are no subtitles for the requested languages')
4185 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
4186 if not sub_filename_base
:
4187 self
.to_screen('[info] Skipping writing video subtitles')
4190 for sub_lang
, sub_info
in subtitles
.items():
4191 sub_format
= sub_info
['ext']
4192 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
4193 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
4194 existing_sub
= self
.existing_file((sub_filename_final
, sub_filename
))
4196 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
4197 sub_info
['filepath'] = existing_sub
4198 ret
.append((existing_sub
, sub_filename_final
))
4201 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
4202 if sub_info
.get('data') is not None:
4204 # Use newline='' to prevent conversion of newline characters
4205 # See https://github.com/ytdl-org/youtube-dl/issues/10268
4206 with open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
4207 subfile
.write(sub_info
['data'])
4208 sub_info
['filepath'] = sub_filename
4209 ret
.append((sub_filename
, sub_filename_final
))
4212 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
4216 sub_copy
= sub_info
.copy()
4217 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
4218 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
4219 sub_info
['filepath'] = sub_filename
4220 ret
.append((sub_filename
, sub_filename_final
))
4221 except (DownloadError
, ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
4222 msg
= f
'Unable to download video subtitles for {sub_lang!r}: {err}'
4223 if self
.params
.get('ignoreerrors') is not True: # False or 'only_download'
4224 if not self
.params
.get('ignoreerrors'):
4225 self
.report_error(msg
)
4226 raise DownloadError(msg
)
4227 self
.report_warning(msg
)
4230 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
4231 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
4232 write_all
= self
.params
.get('write_all_thumbnails', False)
4233 thumbnails
, ret
= [], []
4234 if write_all
or self
.params
.get('writethumbnail', False):
4235 thumbnails
= info_dict
.get('thumbnails') or []
4237 self
.to_screen(f
'[info] There are no {label} thumbnails to download')
4239 multiple
= write_all
and len(thumbnails
) > 1
4241 if thumb_filename_base
is None:
4242 thumb_filename_base
= filename
4243 if thumbnails
and not thumb_filename_base
:
4244 self
.write_debug(f
'Skipping writing {label} thumbnail')
4247 for idx
, t
in list(enumerate(thumbnails
))[::-1]:
4248 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
4249 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
4250 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
4251 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
4253 existing_thumb
= self
.existing_file((thumb_filename_final
, thumb_filename
))
4255 self
.to_screen('[info] %s is already present' % (
4256 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
4257 t
['filepath'] = existing_thumb
4258 ret
.append((existing_thumb
, thumb_filename_final
))
4260 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
4262 uf
= self
.urlopen(Request(t
['url'], headers
=t
.get('http_headers', {})))
4263 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
4264 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
4265 shutil
.copyfileobj(uf
, thumbf
)
4266 ret
.append((thumb_filename
, thumb_filename_final
))
4267 t
['filepath'] = thumb_filename
4268 except network_exceptions
as err
:
4269 if isinstance(err
, HTTPError
) and err
.status
== 404:
4270 self
.to_screen(f
'[info] {thumb_display_id.title()} does not exist')
4272 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
4274 if ret
and not write_all
: