24 from string
import Formatter
, ascii_letters
26 from .cache
import Cache
27 from .compat
import compat_os_name
, compat_shlex_quote
28 from .cookies
import load_cookies
29 from .downloader
import FFmpegFD
, get_suitable_downloader
, shorten_protocol_name
30 from .downloader
.rtmp
import rtmpdump_version
31 from .extractor
import gen_extractor_classes
, get_info_extractor
32 from .extractor
.common
import UnsupportedURLIE
33 from .extractor
.openload
import PhantomJSwrapper
34 from .minicurses
import format_text
35 from .plugins
import directories
as plugin_directories
36 from .postprocessor
import _PLUGIN_CLASSES
as plugin_pps
37 from .postprocessor
import (
39 FFmpegFixupDuplicateMoovPP
,
40 FFmpegFixupDurationPP
,
43 FFmpegFixupStretchedPP
,
44 FFmpegFixupTimestampPP
,
47 FFmpegVideoConvertorPP
,
48 MoveFilesAfterDownloadPP
,
51 from .postprocessor
.ffmpeg
import resolve_mapping
as resolve_recode_mapping
52 from .update
import REPOSITORY
, current_git_head
, detect_variant
79 PerRequestProxyHandler
,
86 UnavailableVideoError
,
88 YoutubeDLCookieProcessor
,
90 YoutubeDLRedirectHandler
,
106 format_decimal_suffix
,
123 orderedSet_from_options
,
127 register_socks_protocols
,
128 remove_terminal_sequences
,
139 supports_terminal_sequences
,
149 windows_enable_vt_mode
,
153 from .version
import CHANNEL
, RELEASE_GIT_HEAD
, VARIANT
, __version__
155 if compat_os_name
== 'nt':
162 YoutubeDL objects are the ones responsible of downloading the
163 actual video file and writing it to disk if the user has requested
164 it, among some other tasks. In most cases there should be one per
165 program. As, given a video URL, the downloader doesn't know how to
166 extract all the needed information, task that InfoExtractors do, it
167 has to pass the URL to one of them.
169 For this, YoutubeDL objects have a method that allows
170 InfoExtractors to be registered in a given order. When it is passed
171 a URL, the YoutubeDL object handles it to the first InfoExtractor it
172 finds that reports being able to handle it. The InfoExtractor extracts
173 all the information about the video or videos the URL refers to, and
174 YoutubeDL process the extracted information, possibly using a File
175 Downloader to download the video.
177 YoutubeDL objects accept a lot of parameters. In order not to saturate
178 the object constructor with arguments, it receives a dictionary of
179 options instead. These options are available through the params
180 attribute for the InfoExtractors to use. The YoutubeDL also
181 registers itself as the downloader in charge for the InfoExtractors
182 that are added to it, so this is a "mutual registration".
186 username: Username for authentication purposes.
187 password: Password for authentication purposes.
188 videopassword: Password for accessing a video.
189 ap_mso: Adobe Pass multiple-system operator identifier.
190 ap_username: Multiple-system operator account username.
191 ap_password: Multiple-system operator account password.
192 usenetrc: Use netrc for authentication instead.
193 verbose: Print additional info to stdout.
194 quiet: Do not print messages to stdout.
195 no_warnings: Do not print out anything for warnings.
196 forceprint: A dict with keys WHEN mapped to a list of templates to
197 print to stdout. The allowed keys are video or any of the
198 items in utils.POSTPROCESS_WHEN.
199 For compatibility, a single list is also accepted
200 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
201 a list of tuples with (template, filename)
202 forcejson: Force printing info_dict as JSON.
203 dump_single_json: Force printing the info_dict of the whole playlist
204 (or video) as a single JSON line.
205 force_write_download_archive: Force writing download archive regardless
206 of 'skip_download' or 'simulate'.
207 simulate: Do not download the video files. If unset (or None),
208 simulate only if listsubtitles, listformats or list_thumbnails is used
209 format: Video format code. see "FORMAT SELECTION" for more details.
210 You can also pass a function. The function takes 'ctx' as
211 argument and returns the formats to download.
212 See "build_format_selector" for an implementation
213 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
214 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
215 extracting metadata even if the video is not actually
216 available for download (experimental)
217 format_sort: A list of fields by which to sort the video formats.
218 See "Sorting Formats" for more details.
219 format_sort_force: Force the given format_sort. see "Sorting Formats"
221 prefer_free_formats: Whether to prefer video formats with free containers
222 over non-free ones of same quality.
223 allow_multiple_video_streams: Allow multiple video streams to be merged
225 allow_multiple_audio_streams: Allow multiple audio streams to be merged
227 check_formats Whether to test if the formats are downloadable.
228 Can be True (check all), False (check none),
229 'selected' (check selected formats),
230 or None (check only if requested by extractor)
231 paths: Dictionary of output paths. The allowed keys are 'home'
232 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
233 outtmpl: Dictionary of templates for output names. Allowed keys
234 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
235 For compatibility with youtube-dl, a single string can also be used
236 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
237 restrictfilenames: Do not allow "&" and spaces in file names
238 trim_file_name: Limit length of filename (extension excluded)
239 windowsfilenames: Force the filenames to be windows compatible
240 ignoreerrors: Do not stop on download/postprocessing errors.
241 Can be 'only_download' to ignore only download errors.
242 Default is 'only_download' for CLI, but False for API
243 skip_playlist_after_errors: Number of allowed failures until the rest of
244 the playlist is skipped
245 allowed_extractors: List of regexes to match against extractor names that are allowed
246 overwrites: Overwrite all video and metadata files if True,
247 overwrite only non-video files if None
248 and don't overwrite any file if False
249 For compatibility with youtube-dl,
250 "nooverwrites" may also be used instead
251 playlist_items: Specific indices of playlist to download.
252 playlistrandom: Download playlist items in random order.
253 lazy_playlist: Process playlist entries as they are received.
254 matchtitle: Download only matching titles.
255 rejecttitle: Reject downloads for matching titles.
256 logger: Log messages to a logging.Logger instance.
257 logtostderr: Print everything to stderr instead of stdout.
258 consoletitle: Display progress in console window's titlebar.
259 writedescription: Write the video description to a .description file
260 writeinfojson: Write the video description to a .info.json file
261 clean_infojson: Remove private fields from the infojson
262 getcomments: Extract video comments. This will not be written to disk
263 unless writeinfojson is also given
264 writeannotations: Write the video annotations to a .annotations.xml file
265 writethumbnail: Write the thumbnail image to a file
266 allow_playlist_files: Whether to write playlists' description, infojson etc
267 also to disk when using the 'write*' options
268 write_all_thumbnails: Write all thumbnail formats to files
269 writelink: Write an internet shortcut file, depending on the
270 current platform (.url/.webloc/.desktop)
271 writeurllink: Write a Windows internet shortcut file (.url)
272 writewebloclink: Write a macOS internet shortcut file (.webloc)
273 writedesktoplink: Write a Linux internet shortcut file (.desktop)
274 writesubtitles: Write the video subtitles to a file
275 writeautomaticsub: Write the automatically generated subtitles to a file
276 listsubtitles: Lists all available subtitles for the video
277 subtitlesformat: The format code for subtitles
278 subtitleslangs: List of languages of the subtitles to download (can be regex).
279 The list may contain "all" to refer to all the available
280 subtitles. The language can be prefixed with a "-" to
281 exclude it from the requested languages, e.g. ['all', '-live_chat']
282 keepvideo: Keep the video file after post-processing
283 daterange: A DateRange object, download only if the upload_date is in the range.
284 skip_download: Skip the actual download of the video file
285 cachedir: Location of the cache files in the filesystem.
286 False to disable filesystem cache.
287 noplaylist: Download single video instead of a playlist if in doubt.
288 age_limit: An integer representing the user's age in years.
289 Unsuitable videos for the given age are skipped.
290 min_views: An integer representing the minimum view count the video
291 must have in order to not be skipped.
292 Videos without view count information are always
293 downloaded. None for no limit.
294 max_views: An integer representing the maximum view count.
295 Videos that are more popular than that are not
297 Videos without view count information are always
298 downloaded. None for no limit.
299 download_archive: A set, or the name of a file where all downloads are recorded.
300 Videos already present in the file are not downloaded again.
301 break_on_existing: Stop the download process after attempting to download a
302 file that is in the archive.
303 break_per_url: Whether break_on_reject and break_on_existing
304 should act on each input URL as opposed to for the entire queue
305 cookiefile: File name or text stream from where cookies should be read and dumped to
306 cookiesfrombrowser: A tuple containing the name of the browser, the profile
307 name/path from where cookies are loaded, the name of the keyring,
308 and the container name, e.g. ('chrome', ) or
309 ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta')
310 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
311 support RFC 5746 secure renegotiation
312 nocheckcertificate: Do not verify SSL certificates
313 client_certificate: Path to client certificate file in PEM format. May include the private key
314 client_certificate_key: Path to private key file for client certificate
315 client_certificate_password: Password for client certificate private key, if encrypted.
316 If not provided and the key is encrypted, yt-dlp will ask interactively
317 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
318 (Only supported by some extractors)
319 enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
320 http_headers: A dictionary of custom headers to be used for all requests
321 proxy: URL of the proxy server to use
322 geo_verification_proxy: URL of the proxy to use for IP address verification
323 on geo-restricted sites.
324 socket_timeout: Time to wait for unresponsive hosts, in seconds
325 bidi_workaround: Work around buggy terminals without bidirectional text
326 support, using fridibi
327 debug_printtraffic:Print out sent and received HTTP traffic
328 default_search: Prepend this string if an input url is not valid.
329 'auto' for elaborate guessing
330 encoding: Use this encoding instead of the system-specified.
331 extract_flat: Whether to resolve and process url_results further
332 * False: Always process (default)
333 * True: Never process
334 * 'in_playlist': Do not process inside playlist/multi_video
335 * 'discard': Always process, but don't return the result
336 from inside playlist/multi_video
337 * 'discard_in_playlist': Same as "discard", but only for
338 playlists (not multi_video)
339 wait_for_video: If given, wait for scheduled streams to become available.
340 The value should be a tuple containing the range
341 (min_secs, max_secs) to wait between retries
342 postprocessors: A list of dictionaries, each with an entry
343 * key: The name of the postprocessor. See
344 yt_dlp/postprocessor/__init__.py for a list.
345 * when: When to run the postprocessor. Allowed values are
346 the entries of utils.POSTPROCESS_WHEN
347 Assumed to be 'post_process' if not given
348 progress_hooks: A list of functions that get called on download
349 progress, with a dictionary with the entries
350 * status: One of "downloading", "error", or "finished".
351 Check this first and ignore unknown values.
352 * info_dict: The extracted info_dict
354 If status is one of "downloading", or "finished", the
355 following properties may also be present:
356 * filename: The final filename (always present)
357 * tmpfilename: The filename we're currently writing to
358 * downloaded_bytes: Bytes on disk
359 * total_bytes: Size of the whole file, None if unknown
360 * total_bytes_estimate: Guess of the eventual file size,
362 * elapsed: The number of seconds since download started.
363 * eta: The estimated time in seconds, None if unknown
364 * speed: The download speed in bytes/second, None if
366 * fragment_index: The counter of the currently
367 downloaded video fragment.
368 * fragment_count: The number of fragments (= individual
369 files that will be merged)
371 Progress hooks are guaranteed to be called at least once
372 (with status "finished") if the download is successful.
373 postprocessor_hooks: A list of functions that get called on postprocessing
374 progress, with a dictionary with the entries
375 * status: One of "started", "processing", or "finished".
376 Check this first and ignore unknown values.
377 * postprocessor: Name of the postprocessor
378 * info_dict: The extracted info_dict
380 Progress hooks are guaranteed to be called at least twice
381 (with status "started" and "finished") if the processing is successful.
382 merge_output_format: "/" separated list of extensions to use when merging formats.
383 final_ext: Expected final extension; used to detect when the file was
384 already downloaded and converted
385 fixup: Automatically correct known faults of the file.
387 - "never": do nothing
388 - "warn": only emit a warning
389 - "detect_or_warn": check whether we can do anything
390 about it, warn otherwise (default)
391 source_address: Client-side IP address to bind to.
392 sleep_interval_requests: Number of seconds to sleep between requests
394 sleep_interval: Number of seconds to sleep before each download when
395 used alone or a lower bound of a range for randomized
396 sleep before each download (minimum possible number
397 of seconds to sleep) when used along with
399 max_sleep_interval:Upper bound of a range for randomized sleep before each
400 download (maximum possible number of seconds to sleep).
401 Must only be used along with sleep_interval.
402 Actual sleep time will be a random float from range
403 [sleep_interval; max_sleep_interval].
404 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
405 listformats: Print an overview of available video formats and exit.
406 list_thumbnails: Print a table of all thumbnails and exit.
407 match_filter: A function that gets called for every video with the signature
408 (info_dict, *, incomplete: bool) -> Optional[str]
409 For backward compatibility with youtube-dl, the signature
410 (info_dict) -> Optional[str] is also allowed.
411 - If it returns a message, the video is ignored.
412 - If it returns None, the video is downloaded.
413 - If it returns utils.NO_DEFAULT, the user is interactively
414 asked whether to download the video.
415 - Raise utils.DownloadCancelled(msg) to abort remaining
416 downloads when a video is rejected.
417 match_filter_func in utils.py is one example for this.
418 no_color: Do not emit color codes in output.
419 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
422 Two-letter ISO 3166-2 country code that will be used for
423 explicit geographic restriction bypassing via faking
424 X-Forwarded-For HTTP header
426 IP range in CIDR notation that will be used similarly to
428 external_downloader: A dictionary of protocol keys and the executable of the
429 external downloader to use for it. The allowed protocols
430 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
431 Set the value to 'native' to use the native downloader
432 compat_opts: Compatibility options. See "Differences in default behavior".
433 The following options do not work when used through the API:
434 filename, abort-on-error, multistreams, no-live-chat, format-sort
435 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
436 Refer __init__.py for their implementation
437 progress_template: Dictionary of templates for progress outputs.
438 Allowed keys are 'download', 'postprocess',
439 'download-title' (console title) and 'postprocess-title'.
440 The template is mapped on a dictionary with keys 'progress' and 'info'
441 retry_sleep_functions: Dictionary of functions that takes the number of attempts
442 as argument and returns the time to sleep in seconds.
443 Allowed keys are 'http', 'fragment', 'file_access'
444 download_ranges: A callback function that gets called for every video with
445 the signature (info_dict, ydl) -> Iterable[Section].
446 Only the returned sections will be downloaded.
447 Each Section is a dict with the following keys:
448 * start_time: Start time of the section in seconds
449 * end_time: End time of the section in seconds
450 * title: Section title (Optional)
451 * index: Section number (Optional)
452 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
453 noprogress: Do not print the progress bar
454 live_from_start: Whether to download livestreams videos from the start
456 The following parameters are not used by YoutubeDL itself, they are used by
457 the downloader (see yt_dlp/downloader/common.py):
458 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
459 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
460 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
461 external_downloader_args, concurrent_fragment_downloads.
463 The following options are used by the post processors:
464 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
465 to the binary or its containing directory.
466 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
467 and a list of additional command-line arguments for the
468 postprocessor/executable. The dict can also have "PP+EXE" keys
469 which are used when the given exe is used by the given PP.
470 Use 'default' as the name for arguments to passed to all PP
471 For compatibility with youtube-dl, a single list of args
474 The following options are used by the extractors:
475 extractor_retries: Number of times to retry for known errors
476 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
477 hls_split_discontinuity: Split HLS playlists to different formats at
478 discontinuities such as ad breaks (default: False)
479 extractor_args: A dictionary of arguments to be passed to the extractors.
480 See "EXTRACTOR ARGUMENTS" for details.
481 E.g. {'youtube': {'skip': ['dash', 'hls']}}
482 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
484 The following options are deprecated and may be removed in the future:
486 break_on_reject: Stop the download process when encountering a video that
487 has been filtered out.
488 - `raise DownloadCancelled(msg)` in match_filter instead
489 force_generic_extractor: Force downloader to use the generic extractor
490 - Use allowed_extractors = ['generic', 'default']
491 playliststart: - Use playlist_items
492 Playlist item to start at.
493 playlistend: - Use playlist_items
494 Playlist item to end at.
495 playlistreverse: - Use playlist_items
496 Download playlist items in reverse order.
497 forceurl: - Use forceprint
498 Force printing final URL.
499 forcetitle: - Use forceprint
500 Force printing title.
501 forceid: - Use forceprint
503 forcethumbnail: - Use forceprint
504 Force printing thumbnail URL.
505 forcedescription: - Use forceprint
506 Force printing description.
507 forcefilename: - Use forceprint
508 Force printing final filename.
509 forceduration: - Use forceprint
510 Force printing duration.
511 allsubtitles: - Use subtitleslangs = ['all']
512 Downloads all the subtitles of the video
513 (requires writesubtitles or writeautomaticsub)
514 include_ads: - Doesn't work
516 call_home: - Not implemented
517 Boolean, true iff we are allowed to contact the
518 yt-dlp servers for debugging.
519 post_hooks: - Register a custom postprocessor
520 A list of functions that get called as the final step
521 for each video file, after all postprocessors have been
522 called. The filename will be passed as the only argument.
523 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
524 Use the native HLS downloader instead of ffmpeg/avconv
525 if True, otherwise use ffmpeg/avconv if False, otherwise
526 use downloader suggested by extractor if None.
527 prefer_ffmpeg: - avconv support is deprecated
528 If False, use avconv instead of ffmpeg if both are available,
529 otherwise prefer ffmpeg.
530 youtube_include_dash_manifest: - Use extractor_args
531 If True (default), DASH manifests and related
532 data will be downloaded and processed by extractor.
533 You can reduce network I/O by disabling it if you don't
534 care about DASH. (only for youtube)
535 youtube_include_hls_manifest: - Use extractor_args
536 If True (default), HLS manifests and related
537 data will be downloaded and processed by extractor.
538 You can reduce network I/O by disabling it if you don't
539 care about HLS. (only for youtube)
543 'width', 'height', 'asr', 'audio_channels', 'fps',
544 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
545 'timestamp', 'release_timestamp',
546 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
547 'average_rating', 'comment_count', 'age_limit',
548 'start_time', 'end_time',
549 'chapter_number', 'season_number', 'episode_number',
550 'track_number', 'disc_number', 'release_year',
554 # NB: Keep in sync with the docstring of extractor/common.py
555 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
556 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
557 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
558 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
559 'preference', 'language', 'language_preference', 'quality', 'source_preference',
560 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
561 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
563 _format_selection_exts
= {
564 'audio': set(MEDIA_EXTENSIONS
.common_audio
),
565 'video': set(MEDIA_EXTENSIONS
.common_video
+ ('3gp', )),
566 'storyboards': set(MEDIA_EXTENSIONS
.storyboards
),
569 def __init__(self
, params
=None, auto_init
=True):
570 """Create a FileDownloader object with the given options.
571 @param auto_init Whether to load the default extractors and print header (if verbose).
572 Set to 'no_verbose_header' to not print the header
578 self
._ies
_instances
= {}
579 self
._pps
= {k: [] for k in POSTPROCESS_WHEN}
580 self
._printed
_messages
= set()
581 self
._first
_webpage
_request
= True
582 self
._post
_hooks
= []
583 self
._progress
_hooks
= []
584 self
._postprocessor
_hooks
= []
585 self
._download
_retcode
= 0
586 self
._num
_downloads
= 0
588 self
._playlist
_level
= 0
589 self
._playlist
_urls
= set()
590 self
.cache
= Cache(self
)
592 stdout
= sys
.stderr
if self
.params
.get('logtostderr') else sys
.stdout
593 self
._out
_files
= Namespace(
596 screen
=sys
.stderr
if self
.params
.get('quiet') else stdout
,
597 console
=None if compat_os_name
== 'nt' else next(
598 filter(supports_terminal_sequences
, (sys
.stderr
, sys
.stdout
)), None)
602 windows_enable_vt_mode()
603 except Exception as e
:
604 self
.write_debug(f
'Failed to enable VT mode: {e}')
606 self
._allow
_colors
= Namespace(**{
607 type_
: not self
.params
.get('no_color') and supports_terminal_sequences(stream
)
608 for type_
, stream
in self
._out
_files
.items_
if type_
!= 'console'
611 # The code is left like this to be reused for future deprecations
612 MIN_SUPPORTED
, MIN_RECOMMENDED
= (3, 7), (3, 7)
613 current_version
= sys
.version_info
[:2]
614 if current_version
< MIN_RECOMMENDED
:
615 msg
= ('Support for Python version %d.%d has been deprecated. '
616 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
617 '\n You will no longer receive updates on this version')
618 if current_version
< MIN_SUPPORTED
:
619 msg
= 'Python version %d.%d is no longer supported'
620 self
.deprecated_feature(
621 f
'{msg}! Please update to Python %d.%d or above' % (*current_version
, *MIN_RECOMMENDED
))
623 if self
.params
.get('allow_unplayable_formats'):
625 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
626 'This is a developer option intended for debugging. \n'
627 ' If you experience any issues while using this option, '
628 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
630 if self
.params
.get('bidi_workaround', False):
633 master
, slave
= pty
.openpty()
634 width
= shutil
.get_terminal_size().columns
635 width_args
= [] if width
is None else ['-w', str(width
)]
636 sp_kwargs
= {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
638 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
640 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
641 self
._output
_channel
= os
.fdopen(master
, 'rb')
642 except OSError as ose
:
643 if ose
.errno
== errno
.ENOENT
:
645 'Could not find fribidi executable, ignoring --bidi-workaround. '
646 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
650 self
.params
['compat_opts'] = set(self
.params
.get('compat_opts', ()))
651 if auto_init
and auto_init
!= 'no_verbose_header':
652 self
.print_debug_header()
654 def check_deprecated(param
, option
, suggestion
):
655 if self
.params
.get(param
) is not None:
656 self
.report_warning(f
'{option} is deprecated. Use {suggestion} instead')
660 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
661 if self
.params
.get('geo_verification_proxy') is None:
662 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
664 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
665 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
666 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
668 for msg
in self
.params
.get('_warnings', []):
669 self
.report_warning(msg
)
670 for msg
in self
.params
.get('_deprecation_warnings', []):
671 self
.deprecated_feature(msg
)
673 if 'list-formats' in self
.params
['compat_opts']:
674 self
.params
['listformats_table'] = False
676 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
677 # nooverwrites was unnecessarily changed to overwrites
678 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
679 # This ensures compatibility with both keys
680 self
.params
['overwrites'] = not self
.params
['nooverwrites']
681 elif self
.params
.get('overwrites') is None:
682 self
.params
.pop('overwrites', None)
684 self
.params
['nooverwrites'] = not self
.params
['overwrites']
686 if self
.params
.get('simulate') is None and any((
687 self
.params
.get('list_thumbnails'),
688 self
.params
.get('listformats'),
689 self
.params
.get('listsubtitles'),
691 self
.params
['simulate'] = 'list_only'
693 self
.params
.setdefault('forceprint', {})
694 self
.params
.setdefault('print_to_file', {})
696 # Compatibility with older syntax
697 if not isinstance(params
['forceprint'], dict):
698 self
.params
['forceprint'] = {'video': params['forceprint']}
701 self
.add_default_info_extractors()
703 if (sys
.platform
!= 'win32'
704 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
705 and not self
.params
.get('restrictfilenames', False)):
706 # Unicode filesystem API will throw errors (#1474, #13027)
708 'Assuming --restrict-filenames since file system encoding '
709 'cannot encode all characters. '
710 'Set the LC_ALL environment variable to fix this.')
711 self
.params
['restrictfilenames'] = True
713 self
._parse
_outtmpl
()
715 # Creating format selector here allows us to catch syntax errors before the extraction
716 self
.format_selector
= (
717 self
.params
.get('format') if self
.params
.get('format') in (None, '-')
718 else self
.params
['format'] if callable(self
.params
['format'])
719 else self
.build_format_selector(self
.params
['format']))
721 # Set http_headers defaults according to std_headers
722 self
.params
['http_headers'] = merge_headers(std_headers
, self
.params
.get('http_headers', {}))
725 'post_hooks': self
.add_post_hook
,
726 'progress_hooks': self
.add_progress_hook
,
727 'postprocessor_hooks': self
.add_postprocessor_hook
,
729 for opt
, fn
in hooks
.items():
730 for ph
in self
.params
.get(opt
, []):
733 for pp_def_raw
in self
.params
.get('postprocessors', []):
734 pp_def
= dict(pp_def_raw
)
735 when
= pp_def
.pop('when', 'post_process')
736 self
.add_post_processor(
737 get_postprocessor(pp_def
.pop('key'))(self
, **pp_def
),
741 register_socks_protocols()
743 def preload_download_archive(fn
):
744 """Preload the archive, if any is specified"""
748 elif not is_path_like(fn
):
751 self
.write_debug(f
'Loading archive file {fn!r}')
753 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
754 for line
in archive_file
:
755 archive
.add(line
.strip())
756 except OSError as ioe
:
757 if ioe
.errno
!= errno
.ENOENT
:
761 self
.archive
= preload_download_archive(self
.params
.get('download_archive'))
763 def warn_if_short_id(self
, argv
):
764 # short YouTube ID starting with dash?
766 i
for i
, a
in enumerate(argv
)
767 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
771 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
772 + ['--'] + [argv
[i
] for i
in idxs
]
775 'Long argument string detected. '
776 'Use -- to separate parameters and URLs, like this:\n%s' %
777 args_to_str(correct_argv
))
779 def add_info_extractor(self
, ie
):
780 """Add an InfoExtractor object to the end of the list."""
782 self
._ies
[ie_key
] = ie
783 if not isinstance(ie
, type):
784 self
._ies
_instances
[ie_key
] = ie
785 ie
.set_downloader(self
)
787 def get_info_extractor(self
, ie_key
):
789 Get an instance of an IE with name ie_key, it will try to get one from
790 the _ies list, if there's no instance it will create a new one and add
791 it to the extractor list.
793 ie
= self
._ies
_instances
.get(ie_key
)
795 ie
= get_info_extractor(ie_key
)()
796 self
.add_info_extractor(ie
)
799 def add_default_info_extractors(self
):
801 Add the InfoExtractors returned by gen_extractors to the end of the list
803 all_ies
= {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()}
804 all_ies
['end'] = UnsupportedURLIE()
806 ie_names
= orderedSet_from_options(
807 self
.params
.get('allowed_extractors', ['default']), {
808 'all': list(all_ies
),
809 'default': [name
for name
, ie
in all_ies
.items() if ie
._ENABLED
],
811 except re
.error
as e
:
812 raise ValueError(f
'Wrong regex for allowed_extractors: {e.pattern}')
813 for name
in ie_names
:
814 self
.add_info_extractor(all_ies
[name
])
815 self
.write_debug(f
'Loaded {len(ie_names)} extractors')
817 def add_post_processor(self
, pp
, when
='post_process'):
818 """Add a PostProcessor object to the end of the chain."""
819 assert when
in POSTPROCESS_WHEN
, f
'Invalid when={when}'
820 self
._pps
[when
].append(pp
)
821 pp
.set_downloader(self
)
823 def add_post_hook(self
, ph
):
824 """Add the post hook"""
825 self
._post
_hooks
.append(ph
)
827 def add_progress_hook(self
, ph
):
828 """Add the download progress hook"""
829 self
._progress
_hooks
.append(ph
)
831 def add_postprocessor_hook(self
, ph
):
832 """Add the postprocessing progress hook"""
833 self
._postprocessor
_hooks
.append(ph
)
834 for pps
in self
._pps
.values():
836 pp
.add_progress_hook(ph
)
838 def _bidi_workaround(self
, message
):
839 if not hasattr(self
, '_output_channel'):
842 assert hasattr(self
, '_output_process')
843 assert isinstance(message
, str)
844 line_count
= message
.count('\n') + 1
845 self
._output
_process
.stdin
.write((message
+ '\n').encode())
846 self
._output
_process
.stdin
.flush()
847 res
= ''.join(self
._output
_channel
.readline().decode()
848 for _
in range(line_count
))
849 return res
[:-len('\n')]
851 def _write_string(self
, message
, out
=None, only_once
=False):
853 if message
in self
._printed
_messages
:
855 self
._printed
_messages
.add(message
)
856 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
858 def to_stdout(self
, message
, skip_eol
=False, quiet
=None):
859 """Print message to stdout"""
860 if quiet
is not None:
861 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. '
862 'Use "YoutubeDL.to_screen" instead')
863 if skip_eol
is not False:
864 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. '
865 'Use "YoutubeDL.to_screen" instead')
866 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.out
)
868 def to_screen(self
, message
, skip_eol
=False, quiet
=None, only_once
=False):
869 """Print message to screen if not in quiet mode"""
870 if self
.params
.get('logger'):
871 self
.params
['logger'].debug(message
)
873 if (self
.params
.get('quiet') if quiet
is None else quiet
) and not self
.params
.get('verbose'):
876 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
877 self
._out
_files
.screen
, only_once
=only_once
)
879 def to_stderr(self
, message
, only_once
=False):
880 """Print message to stderr"""
881 assert isinstance(message
, str)
882 if self
.params
.get('logger'):
883 self
.params
['logger'].error(message
)
885 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.error
, only_once
=only_once
)
887 def _send_console_code(self
, code
):
888 if compat_os_name
== 'nt' or not self
._out
_files
.console
:
890 self
._write
_string
(code
, self
._out
_files
.console
)
892 def to_console_title(self
, message
):
893 if not self
.params
.get('consoletitle', False):
895 message
= remove_terminal_sequences(message
)
896 if compat_os_name
== 'nt':
897 if ctypes
.windll
.kernel32
.GetConsoleWindow():
898 # c_wchar_p() might not be necessary if `message` is
899 # already of type unicode()
900 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
902 self
._send
_console
_code
(f
'\033]0;{message}\007')
904 def save_console_title(self
):
905 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
907 self
._send
_console
_code
('\033[22;0t') # Save the title on stack
909 def restore_console_title(self
):
910 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
912 self
._send
_console
_code
('\033[23;0t') # Restore the title from stack
915 self
.save_console_title()
918 def __exit__(self
, *args
):
919 self
.restore_console_title()
921 if self
.params
.get('cookiefile') is not None:
922 self
.cookiejar
.save(ignore_discard
=True, ignore_expires
=True)
924 def trouble(self
, message
=None, tb
=None, is_error
=True):
925 """Determine action to take when a download problem appears.
927 Depending on if the downloader has been configured to ignore
928 download errors or not, this method may throw an exception or
929 not when errors are found, after printing the message.
931 @param tb If given, is additional traceback information
932 @param is_error Whether to raise error according to ignorerrors
934 if message
is not None:
935 self
.to_stderr(message
)
936 if self
.params
.get('verbose'):
938 if sys
.exc_info()[0]: # if .trouble has been called from an except block
940 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
941 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
942 tb
+= encode_compat_str(traceback
.format_exc())
944 tb_data
= traceback
.format_list(traceback
.extract_stack())
945 tb
= ''.join(tb_data
)
950 if not self
.params
.get('ignoreerrors'):
951 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
952 exc_info
= sys
.exc_info()[1].exc_info
954 exc_info
= sys
.exc_info()
955 raise DownloadError(message
, exc_info
)
956 self
._download
_retcode
= 1
960 EMPHASIS
='light blue',
966 SUPPRESS
='light black',
969 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
973 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
974 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', None) or 'ascii'
975 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
976 if fallback
is not None and text
!= original_text
:
978 return format_text(text
, f
) if allow_colors
else text
if fallback
is None else fallback
980 def _format_out(self
, *args
, **kwargs
):
981 return self
._format
_text
(self
._out
_files
.out
, self
._allow
_colors
.out
, *args
, **kwargs
)
983 def _format_screen(self
, *args
, **kwargs
):
984 return self
._format
_text
(self
._out
_files
.screen
, self
._allow
_colors
.screen
, *args
, **kwargs
)
986 def _format_err(self
, *args
, **kwargs
):
987 return self
._format
_text
(self
._out
_files
.error
, self
._allow
_colors
.error
, *args
, **kwargs
)
989 def report_warning(self
, message
, only_once
=False):
991 Print the message to stderr, it will be prefixed with 'WARNING:'
992 If stderr is a tty file the 'WARNING:' will be colored
994 if self
.params
.get('logger') is not None:
995 self
.params
['logger'].warning(message
)
997 if self
.params
.get('no_warnings'):
999 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
1001 def deprecation_warning(self
, message
, *, stacklevel
=0):
1002 deprecation_warning(
1003 message
, stacklevel
=stacklevel
+ 1, printer
=self
.report_error
, is_error
=False)
1005 def deprecated_feature(self
, message
):
1006 if self
.params
.get('logger') is not None:
1007 self
.params
['logger'].warning(f
'Deprecated Feature: {message}')
1008 self
.to_stderr(f
'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
1010 def report_error(self
, message
, *args
, **kwargs
):
1012 Do the same as trouble, but prefixes the message with 'ERROR:', colored
1013 in red if stderr is a tty file.
1015 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args
, **kwargs
)
1017 def write_debug(self
, message
, only_once
=False):
1018 '''Log debug message or Print message to stderr'''
1019 if not self
.params
.get('verbose', False):
1021 message
= f
'[debug] {message}'
1022 if self
.params
.get('logger'):
1023 self
.params
['logger'].debug(message
)
1025 self
.to_stderr(message
, only_once
)
1027 def report_file_already_downloaded(self
, file_name
):
1028 """Report file has already been fully downloaded."""
1030 self
.to_screen('[download] %s has already been downloaded' % file_name
)
1031 except UnicodeEncodeError:
1032 self
.to_screen('[download] The file has already been downloaded')
1034 def report_file_delete(self
, file_name
):
1035 """Report that existing file will be deleted."""
1037 self
.to_screen('Deleting existing file %s' % file_name
)
1038 except UnicodeEncodeError:
1039 self
.to_screen('Deleting existing file')
1041 def raise_no_formats(self
, info
, forced
=False, *, msg
=None):
1042 has_drm
= info
.get('_has_drm')
1043 ignored
, expected
= self
.params
.get('ignore_no_formats_error'), bool(msg
)
1044 msg
= msg
or has_drm
and 'This video is DRM protected' or 'No video formats found!'
1045 if forced
or not ignored
:
1046 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
1047 expected
=has_drm
or ignored
or expected
)
1049 self
.report_warning(msg
)
1051 def parse_outtmpl(self
):
1052 self
.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1053 self
._parse
_outtmpl
()
1054 return self
.params
['outtmpl']
1056 def _parse_outtmpl(self
):
1058 if self
.params
.get('restrictfilenames'): # Remove spaces in the default template
1059 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
1061 outtmpl
= self
.params
.setdefault('outtmpl', {})
1062 if not isinstance(outtmpl
, dict):
1063 self
.params
['outtmpl'] = outtmpl
= {'default': outtmpl}
1064 outtmpl
.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None}
)
1066 def get_output_path(self
, dir_type
='', filename
=None):
1067 paths
= self
.params
.get('paths', {})
1068 assert isinstance(paths
, dict), '"paths" parameter must be a dictionary'
1069 path
= os
.path
.join(
1070 expand_path(paths
.get('home', '').strip()),
1071 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
1073 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
1076 def _outtmpl_expandpath(outtmpl
):
1077 # expand_path translates '%%' into '%' and '$$' into '$'
1078 # correspondingly that is not what we want since we need to keep
1079 # '%%' intact for template dict substitution step. Working around
1080 # with boundary-alike separator hack.
1081 sep
= ''.join(random
.choices(ascii_letters
, k
=32))
1082 outtmpl
= outtmpl
.replace('%%', f
'%{sep}%').replace('$$', f
'${sep}$')
1084 # outtmpl should be expand_path'ed before template dict substitution
1085 # because meta fields may contain env variables we don't want to
1086 # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and
1087 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1088 return expand_path(outtmpl
).replace(sep
, '')
1091 def escape_outtmpl(outtmpl
):
1092 ''' Escape any remaining strings like %s, %abc% etc. '''
1094 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
1095 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
1099 def validate_outtmpl(cls
, outtmpl
):
1100 ''' @return None or Exception object '''
1102 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljhqBUDS]'),
1103 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1104 cls
._outtmpl
_expandpath
(outtmpl
))
1106 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1108 except ValueError as err
:
1112 def _copy_infodict(info_dict
):
1113 info_dict
= dict(info_dict
)
1114 info_dict
.pop('__postprocessors', None)
1115 info_dict
.pop('__pending_error', None)
1118 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=False):
1119 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1120 @param sanitize Whether to sanitize the output as a filename.
1121 For backward compatibility, a function can also be passed
1124 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1126 info_dict
= self
._copy
_infodict
(info_dict
)
1127 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1128 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1129 if info_dict
.get('duration', None) is not None
1131 info_dict
['autonumber'] = int(self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
)
1132 info_dict
['video_autonumber'] = self
._num
_videos
1133 if info_dict
.get('resolution') is None:
1134 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1136 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1137 # of %(field)s to %(field)0Nd for backward compatibility
1138 field_size_compat_map
= {
1139 'playlist_index': number_of_digits(info_dict
.get('__last_playlist_index') or 0),
1140 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1141 'autonumber': self
.params
.get('autonumber_size') or 5,
1145 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1150 # Field is of the form key1.key2...
1151 # where keys (except first) can be string, int, slice or "{field, ...}"
1152 FIELD_INNER_RE
= r
'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
1153 FIELD_RE
= r
'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
1154 'inner': FIELD_INNER_RE
,
1155 'field': rf
'\w*(?:\.{FIELD_INNER_RE})*'
1157 MATH_FIELD_RE
= rf
'(?:{FIELD_RE}|-?{NUMBER_RE})'
1158 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1159 INTERNAL_FORMAT_RE
= re
.compile(rf
'''(?xs)
1161 (?P<fields>{FIELD_RE})
1162 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1163 (?:>(?P<strf_format>.+?))?
1165 (?P<alternate>(?<!\\),[^|&)]+)?
1166 (?:&(?P<replacement>.*?))?
1167 (?:\|(?P<default>.*?))?
1170 def _traverse_infodict(fields
):
1171 fields
= [f
for x
in re
.split(r
'\.({.+?})\.?', fields
)
1172 for f
in ([x
] if x
.startswith('{') else x
.split('.'))]
1174 if fields
and not fields
[i
]:
1177 for i
, f
in enumerate(fields
):
1178 if not f
.startswith('{'):
1180 assert f
.endswith('}'), f
'No closing brace for {f} in {fields}'
1181 fields
[i
] = {k: k.split('.') for k in f[1:-1].split(',')}
1183 return traverse_obj(info_dict
, fields
, is_user_input
=True, traverse_string
=True)
1185 def get_value(mdict
):
1187 value
= _traverse_infodict(mdict
['fields'])
1190 value
= float_or_none(value
)
1191 if value
is not None:
1194 offset_key
= mdict
['maths']
1196 value
= float_or_none(value
)
1200 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1201 offset_key
).group(0)
1202 offset_key
= offset_key
[len(item
):]
1203 if operator
is None:
1204 operator
= MATH_FUNCTIONS
[item
]
1206 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1207 offset
= float_or_none(item
)
1209 offset
= float_or_none(_traverse_infodict(item
))
1211 value
= operator(value
, multiplier
* offset
)
1212 except (TypeError, ZeroDivisionError):
1215 # Datetime formatting
1216 if mdict
['strf_format']:
1217 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1219 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1220 if sanitize
and value
== '':
1224 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1226 def filename_sanitizer(key
, value
, restricted
=self
.params
.get('restrictfilenames')):
1227 return sanitize_filename(str(value
), restricted
=restricted
, is_id
=(
1228 bool(re
.search(r
'(^|[_.])id(\.|$)', key
))
1229 if 'filename-sanitization' in self
.params
['compat_opts']
1232 sanitizer
= sanitize
if callable(sanitize
) else filename_sanitizer
1233 sanitize
= bool(sanitize
)
1235 def _dumpjson_default(obj
):
1236 if isinstance(obj
, (set, LazyList
)):
1240 class _ReplacementFormatter(Formatter
):
1241 def get_field(self
, field_name
, args
, kwargs
):
1242 if field_name
.isdigit():
1244 raise ValueError('Unsupported field')
1246 replacement_formatter
= _ReplacementFormatter()
1248 def create_key(outer_mobj
):
1249 if not outer_mobj
.group('has_key'):
1250 return outer_mobj
.group(0)
1251 key
= outer_mobj
.group('key')
1252 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1253 initial_field
= mobj
.group('fields') if mobj
else ''
1254 value
, replacement
, default
= None, None, na
1256 mobj
= mobj
.groupdict()
1257 default
= mobj
['default'] if mobj
['default'] is not None else default
1258 value
= get_value(mobj
)
1259 replacement
= mobj
['replacement']
1260 if value
is None and mobj
['alternate']:
1261 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['remaining'][1:])
1265 fmt
= outer_mobj
.group('format')
1266 if fmt
== 's' and value
is not None and key
in field_size_compat_map
.keys():
1267 fmt
= f
'0{field_size_compat_map[key]:d}d'
1271 elif replacement
is not None:
1273 value
= replacement_formatter
.format(replacement
, value
)
1277 flags
= outer_mobj
.group('conversion') or ''
1278 str_fmt
= f
'{fmt[:-1]}s'
1279 if fmt
[-1] == 'l': # list
1280 delim
= '\n' if '#' in flags
else ', '
1281 value
, fmt
= delim
.join(map(str, variadic(value
, allowed_types
=(str, bytes)))), str_fmt
1282 elif fmt
[-1] == 'j': # json
1283 value
, fmt
= json
.dumps(
1284 value
, default
=_dumpjson_default
,
1285 indent
=4 if '#' in flags
else None, ensure_ascii
='+' not in flags
), str_fmt
1286 elif fmt
[-1] == 'h': # html
1287 value
, fmt
= escapeHTML(str(value
)), str_fmt
1288 elif fmt
[-1] == 'q': # quoted
1289 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1290 value
, fmt
= ' '.join(map(compat_shlex_quote
, value
)), str_fmt
1291 elif fmt
[-1] == 'B': # bytes
1292 value
= f
'%{str_fmt}'.encode() % str(value
).encode()
1293 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1294 elif fmt
[-1] == 'U': # unicode normalized
1295 value
, fmt
= unicodedata
.normalize(
1296 # "+" = compatibility equivalence, "#" = NFD
1297 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1299 elif fmt
[-1] == 'D': # decimal suffix
1300 num_fmt
, fmt
= fmt
[:-1].replace('#', ''), 's'
1301 value
= format_decimal_suffix(value
, f
'%{num_fmt}f%s' if num_fmt
else '%d%s',
1302 factor
=1024 if '#' in flags
else 1000)
1303 elif fmt
[-1] == 'S': # filename sanitization
1304 value
, fmt
= filename_sanitizer(initial_field
, value
, restricted
='#' in flags
), str_fmt
1305 elif fmt
[-1] == 'c':
1307 value
= str(value
)[0]
1310 elif fmt
[-1] not in 'rs': # numeric
1311 value
= float_or_none(value
)
1313 value
, fmt
= default
, 's'
1317 # If value is an object, sanitize might convert it to a string
1318 # So we convert it to repr first
1319 value
, fmt
= repr(value
), str_fmt
1320 if fmt
[-1] in 'csr':
1321 value
= sanitizer(initial_field
, value
)
1323 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1324 TMPL_DICT
[key
] = value
1325 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1327 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1329 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1330 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1331 return self
.escape_outtmpl(outtmpl
) % info_dict
1333 def _prepare_filename(self
, info_dict
, *, outtmpl
=None, tmpl_type
=None):
1334 assert None in (outtmpl
, tmpl_type
), 'outtmpl and tmpl_type are mutually exclusive'
1336 outtmpl
= self
.params
['outtmpl'].get(tmpl_type
or 'default', self
.params
['outtmpl']['default'])
1338 outtmpl
= self
._outtmpl
_expandpath
(outtmpl
)
1339 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, True)
1343 if tmpl_type
in ('', 'temp'):
1344 final_ext
, ext
= self
.params
.get('final_ext'), info_dict
.get('ext')
1345 if final_ext
and ext
and final_ext
!= ext
and filename
.endswith(f
'.{final_ext}'):
1346 filename
= replace_extension(filename
, ext
, final_ext
)
1348 force_ext
= OUTTMPL_TYPES
[tmpl_type
]
1350 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1352 # https://github.com/blackjack4494/youtube-dlc/issues/85
1353 trim_file_name
= self
.params
.get('trim_file_name', False)
1355 no_ext
, *ext
= filename
.rsplit('.', 2)
1356 filename
= join_nonempty(no_ext
[:trim_file_name
], *ext
, delim
='.')
1359 except ValueError as err
:
1360 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1363 def prepare_filename(self
, info_dict
, dir_type
='', *, outtmpl
=None, warn
=False):
1364 """Generate the output filename"""
1366 assert not dir_type
, 'outtmpl and dir_type are mutually exclusive'
1368 filename
= self
._prepare
_filename
(info_dict
, tmpl_type
=dir_type
, outtmpl
=outtmpl
)
1369 if not filename
and dir_type
not in ('', 'temp'):
1373 if not self
.params
.get('paths'):
1375 elif filename
== '-':
1376 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1377 elif os
.path
.isabs(filename
):
1378 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1379 if filename
== '-' or not filename
:
1382 return self
.get_output_path(dir_type
, filename
)
1384 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1385 """Returns None if the file should be downloaded"""
1386 _type
= info_dict
.get('_type', 'video')
1387 assert incomplete
or _type
== 'video', 'Only video result can be considered complete'
1389 video_title
= info_dict
.get('title', info_dict
.get('id', 'entry'))
1392 if _type
in ('playlist', 'multi_video'):
1394 elif _type
in ('url', 'url_transparent') and not try_call(
1395 lambda: self
.get_info_extractor(info_dict
['ie_key']).is_single_video(info_dict
['url'])):
1398 if 'title' in info_dict
:
1399 # This can happen when we're just evaluating the playlist
1400 title
= info_dict
['title']
1401 matchtitle
= self
.params
.get('matchtitle', False)
1403 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1404 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1405 rejecttitle
= self
.params
.get('rejecttitle', False)
1407 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1408 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1410 date
= info_dict
.get('upload_date')
1411 if date
is not None:
1412 dateRange
= self
.params
.get('daterange', DateRange())
1413 if date
not in dateRange
:
1414 return f
'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1415 view_count
= info_dict
.get('view_count')
1416 if view_count
is not None:
1417 min_views
= self
.params
.get('min_views')
1418 if min_views
is not None and view_count
< min_views
:
1419 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1420 max_views
= self
.params
.get('max_views')
1421 if max_views
is not None and view_count
> max_views
:
1422 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1423 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1424 return 'Skipping "%s" because it is age restricted' % video_title
1426 match_filter
= self
.params
.get('match_filter')
1427 if match_filter
is None:
1433 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1435 # For backward compatibility
1436 ret
= None if incomplete
else match_filter(info_dict
)
1437 except DownloadCancelled
as err
:
1438 if err
.msg
is not NO_DEFAULT
:
1440 ret
, cancelled
= err
.msg
, err
1442 if ret
is NO_DEFAULT
:
1444 filename
= self
._format
_screen
(self
.prepare_filename(info_dict
), self
.Styles
.FILENAME
)
1445 reply
= input(self
._format
_screen
(
1446 f
'Download "{filename}"? (Y/n): ', self
.Styles
.EMPHASIS
)).lower().strip()
1447 if reply
in {'y', ''}
:
1451 raise type(cancelled
)(f
'Skipping {video_title}')
1452 return f
'Skipping {video_title}'
1455 if self
.in_download_archive(info_dict
):
1456 reason
= '%s has already been recorded in the archive' % video_title
1457 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1460 reason
= check_filter()
1461 except DownloadCancelled
as e
:
1462 reason
, break_opt
, break_err
= e
.msg
, 'match_filter', type(e
)
1464 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1465 if reason
is not None:
1467 self
.to_screen('[download] ' + reason
)
1468 if self
.params
.get(break_opt
, False):
1473 def add_extra_info(info_dict
, extra_info
):
1474 '''Set the keys from extra_info in info dict if they are missing'''
1475 for key
, value
in extra_info
.items():
1476 info_dict
.setdefault(key
, value
)
1478 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1479 process
=True, force_generic_extractor
=False):
1481 Extract and return the information dictionary of the URL
1484 @param url URL to extract
1487 @param download Whether to download videos
1488 @param process Whether to resolve all unresolved references (URLs, playlist items).
1489 Must be True for download to work
1490 @param ie_key Use only the extractor with this key
1492 @param extra_info Dictionary containing the extra values to add to the info (For internal use only)
1493 @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic')
1496 if extra_info
is None:
1499 if not ie_key
and force_generic_extractor
:
1503 ies
= {ie_key: self._ies[ie_key]}
if ie_key
in self
._ies
else {}
1507 for key
, ie
in ies
.items():
1508 if not ie
.suitable(url
):
1511 if not ie
.working():
1512 self
.report_warning('The program functionality for this site has been marked as broken, '
1513 'and will probably not work.')
1515 temp_id
= ie
.get_temp_id(url
)
1516 if temp_id
is not None and self
.in_download_archive({'id': temp_id, 'ie_key': key}
):
1517 self
.to_screen(f
'[{key}] {temp_id}: has already been recorded in the archive')
1518 if self
.params
.get('break_on_existing', False):
1519 raise ExistingVideoReached()
1521 return self
.__extract
_info
(url
, self
.get_info_extractor(key
), download
, extra_info
, process
)
1523 extractors_restricted
= self
.params
.get('allowed_extractors') not in (None, ['default'])
1524 self
.report_error(f
'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}',
1525 tb
=False if extractors_restricted
else None)
1527 def _handle_extraction_exceptions(func
):
1528 @functools.wraps(func
)
1529 def wrapper(self
, *args
, **kwargs
):
1532 return func(self
, *args
, **kwargs
)
1533 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1535 except ReExtractInfo
as e
:
1537 self
.to_screen(f
'{e}; Re-extracting data')
1539 self
.to_stderr('\r')
1540 self
.report_warning(f
'{e}; Re-extracting data')
1542 except GeoRestrictedError
as e
:
1545 msg
+= '\nThis video is available in %s.' % ', '.join(
1546 map(ISO3166Utils
.short2full
, e
.countries
))
1547 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1548 self
.report_error(msg
)
1549 except ExtractorError
as e
: # An error we somewhat expected
1550 self
.report_error(str(e
), e
.format_traceback())
1551 except Exception as e
:
1552 if self
.params
.get('ignoreerrors'):
1553 self
.report_error(str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1559 def _wait_for_video(self
, ie_result
={}):
1560 if (not self
.params
.get('wait_for_video')
1561 or ie_result
.get('_type', 'video') != 'video'
1562 or ie_result
.get('formats') or ie_result
.get('url')):
1565 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1570 full_msg
= f
'{msg}\n'
1571 if not self
.params
.get('noprogress'):
1572 full_msg
= msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r'
1575 self
.to_screen(full_msg
, skip_eol
=True)
1578 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1579 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1580 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1581 diff
= round(random
.uniform(min_wait
, max_wait
) if (max_wait
and min_wait
) else (max_wait
or min_wait
), 0)
1582 self
.report_warning('Release time of video is not known')
1583 elif ie_result
and (diff
or 0) <= 0:
1584 self
.report_warning('Video should already be available according to extracted info')
1585 diff
= min(max(diff
or 0, min_wait
or 0), max_wait
or float('inf'))
1586 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1588 wait_till
= time
.time() + diff
1591 diff
= wait_till
- time
.time()
1594 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1595 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1597 except KeyboardInterrupt:
1599 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1600 except BaseException
as e
:
1601 if not isinstance(e
, ReExtractInfo
):
1605 @_handle_extraction_exceptions
1606 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1608 ie_result
= ie
.extract(url
)
1609 except UserNotLive
as e
:
1611 if self
.params
.get('wait_for_video'):
1612 self
.report_warning(e
)
1613 self
._wait
_for
_video
()
1615 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1616 self
.report_warning(f
'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1618 if isinstance(ie_result
, list):
1619 # Backwards compatibility: old IE result format
1621 '_type': 'compat_list',
1622 'entries': ie_result
,
1624 if extra_info
.get('original_url'):
1625 ie_result
.setdefault('original_url', extra_info
['original_url'])
1626 self
.add_default_extra_info(ie_result
, ie
, url
)
1628 self
._wait
_for
_video
(ie_result
)
1629 return self
.process_ie_result(ie_result
, download
, extra_info
)
1633 def add_default_extra_info(self
, ie_result
, ie
, url
):
1635 self
.add_extra_info(ie_result
, {
1637 'original_url': url
,
1639 webpage_url
= ie_result
.get('webpage_url')
1641 self
.add_extra_info(ie_result
, {
1642 'webpage_url_basename': url_basename(webpage_url
),
1643 'webpage_url_domain': get_domain(webpage_url
),
1646 self
.add_extra_info(ie_result
, {
1647 'extractor': ie
.IE_NAME
,
1648 'extractor_key': ie
.ie_key(),
1651 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1653 Take the result of the ie(may be modified) and resolve all unresolved
1654 references (URLs, playlist items).
1656 It will also download the videos if 'download'.
1657 Returns the resolved ie_result.
1659 if extra_info
is None:
1661 result_type
= ie_result
.get('_type', 'video')
1663 if result_type
in ('url', 'url_transparent'):
1664 ie_result
['url'] = sanitize_url(
1665 ie_result
['url'], scheme
='http' if self
.params
.get('prefer_insecure') else 'https')
1666 if ie_result
.get('original_url') and not extra_info
.get('original_url'):
1667 extra_info
= {'original_url': ie_result['original_url'], **extra_info}
1669 extract_flat
= self
.params
.get('extract_flat', False)
1670 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1671 or extract_flat
is True):
1672 info_copy
= ie_result
.copy()
1673 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1674 if ie
and not ie_result
.get('id'):
1675 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1676 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1677 self
.add_extra_info(info_copy
, extra_info
)
1678 info_copy
, _
= self
.pre_process(info_copy
)
1679 self
._fill
_common
_fields
(info_copy
, False)
1680 self
.__forced
_printings
(info_copy
, self
.prepare_filename(info_copy
), incomplete
=True)
1681 self
._raise
_pending
_errors
(info_copy
)
1682 if self
.params
.get('force_write_download_archive', False):
1683 self
.record_download_archive(info_copy
)
1686 if result_type
== 'video':
1687 self
.add_extra_info(ie_result
, extra_info
)
1688 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1689 self
._raise
_pending
_errors
(ie_result
)
1690 additional_urls
= (ie_result
or {}).get('additional_urls')
1692 # TODO: Improve MetadataParserPP to allow setting a list
1693 if isinstance(additional_urls
, str):
1694 additional_urls
= [additional_urls
]
1696 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1697 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1698 ie_result
['additional_entries'] = [
1700 url
, download
, extra_info
=extra_info
,
1701 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1702 for url
in additional_urls
1705 elif result_type
== 'url':
1706 # We have to add extra_info to the results because it may be
1707 # contained in a playlist
1708 return self
.extract_info(
1709 ie_result
['url'], download
,
1710 ie_key
=ie_result
.get('ie_key'),
1711 extra_info
=extra_info
)
1712 elif result_type
== 'url_transparent':
1713 # Use the information from the embedding page
1714 info
= self
.extract_info(
1715 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1716 extra_info
=extra_info
, download
=False, process
=False)
1718 # extract_info may return None when ignoreerrors is enabled and
1719 # extraction failed with an error, don't crash and return early
1724 exempted_fields
= {'_type', 'url', 'ie_key'}
1725 if not ie_result
.get('section_end') and ie_result
.get('section_start') is None:
1726 # For video clips, the id etc of the clip extractor should be used
1727 exempted_fields |
= {'id', 'extractor', 'extractor_key'}
1729 new_result
= info
.copy()
1730 new_result
.update(filter_dict(ie_result
, lambda k
, v
: v
is not None and k
not in exempted_fields
))
1732 # Extracted info may not be a video result (i.e.
1733 # info.get('_type', 'video') != video) but rather an url or
1734 # url_transparent. In such cases outer metadata (from ie_result)
1735 # should be propagated to inner one (info). For this to happen
1736 # _type of info should be overridden with url_transparent. This
1737 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1738 if new_result
.get('_type') == 'url':
1739 new_result
['_type'] = 'url_transparent'
1741 return self
.process_ie_result(
1742 new_result
, download
=download
, extra_info
=extra_info
)
1743 elif result_type
in ('playlist', 'multi_video'):
1744 # Protect from infinite recursion due to recursively nested playlists
1745 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1746 webpage_url
= ie_result
.get('webpage_url') # Playlists maynot have webpage_url
1747 if webpage_url
and webpage_url
in self
._playlist
_urls
:
1749 '[download] Skipping already downloaded playlist: %s'
1750 % ie_result
.get('title') or ie_result
.get('id'))
1753 self
._playlist
_level
+= 1
1754 self
._playlist
_urls
.add(webpage_url
)
1755 self
._fill
_common
_fields
(ie_result
, False)
1756 self
._sanitize
_thumbnails
(ie_result
)
1758 return self
.__process
_playlist
(ie_result
, download
)
1760 self
._playlist
_level
-= 1
1761 if not self
._playlist
_level
:
1762 self
._playlist
_urls
.clear()
1763 elif result_type
== 'compat_list':
1764 self
.report_warning(
1765 'Extractor %s returned a compat_list result. '
1766 'It needs to be updated.' % ie_result
.get('extractor'))
1769 self
.add_extra_info(r
, {
1770 'extractor': ie_result
['extractor'],
1771 'webpage_url': ie_result
['webpage_url'],
1772 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1773 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1774 'extractor_key': ie_result
['extractor_key'],
1777 ie_result
['entries'] = [
1778 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1779 for r
in ie_result
['entries']
1783 raise Exception('Invalid result type: %s' % result_type
)
1785 def _ensure_dir_exists(self
, path
):
1786 return make_dir(path
, self
.report_error
)
1789 def _playlist_infodict(ie_result
, strict
=False, **kwargs
):
1791 'playlist_count': ie_result
.get('playlist_count'),
1792 'playlist': ie_result
.get('title') or ie_result
.get('id'),
1793 'playlist_id': ie_result
.get('id'),
1794 'playlist_title': ie_result
.get('title'),
1795 'playlist_uploader': ie_result
.get('uploader'),
1796 'playlist_uploader_id': ie_result
.get('uploader_id'),
1801 if ie_result
.get('webpage_url'):
1803 'webpage_url': ie_result
['webpage_url'],
1804 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1805 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1809 'playlist_index': 0,
1810 '__last_playlist_index': max(ie_result
.get('requested_entries') or (0, 0)),
1811 'extractor': ie_result
['extractor'],
1812 'extractor_key': ie_result
['extractor_key'],
1815 def __process_playlist(self
, ie_result
, download
):
1816 """Process each entry in the playlist"""
1817 assert ie_result
['_type'] in ('playlist', 'multi_video')
1819 common_info
= self
._playlist
_infodict
(ie_result
, strict
=True)
1820 title
= common_info
.get('playlist') or '<Untitled>'
1821 if self
._match
_entry
(common_info
, incomplete
=True) is not None:
1823 self
.to_screen(f
'[download] Downloading {ie_result["_type"]}: {title}')
1825 all_entries
= PlaylistEntries(self
, ie_result
)
1826 entries
= orderedSet(all_entries
.get_requested_items(), lazy
=True)
1828 lazy
= self
.params
.get('lazy_playlist')
1830 resolved_entries
, n_entries
= [], 'N/A'
1831 ie_result
['requested_entries'], ie_result
['entries'] = None, None
1833 entries
= resolved_entries
= list(entries
)
1834 n_entries
= len(resolved_entries
)
1835 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1836 if not ie_result
.get('playlist_count'):
1837 # Better to do this after potentially exhausting entries
1838 ie_result
['playlist_count'] = all_entries
.get_full_count()
1840 extra
= self
._playlist
_infodict
(ie_result
, n_entries
=int_or_none(n_entries
))
1841 ie_copy
= collections
.ChainMap(ie_result
, extra
)
1843 _infojson_written
= False
1844 write_playlist_files
= self
.params
.get('allow_playlist_files', True)
1845 if write_playlist_files
and self
.params
.get('list_thumbnails'):
1846 self
.list_thumbnails(ie_result
)
1847 if write_playlist_files
and not self
.params
.get('simulate'):
1848 _infojson_written
= self
._write
_info
_json
(
1849 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1850 if _infojson_written
is None:
1852 if self
._write
_description
('playlist', ie_result
,
1853 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1855 # TODO: This should be passed to ThumbnailsConvertor if necessary
1856 self
._write
_thumbnails
('playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1859 if self
.params
.get('playlistreverse') or self
.params
.get('playlistrandom'):
1860 self
.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once
=True)
1861 elif self
.params
.get('playlistreverse'):
1863 elif self
.params
.get('playlistrandom'):
1864 random
.shuffle(entries
)
1866 self
.to_screen(f
'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items'
1867 f
'{format_field(ie_result, "playlist_count", " of %s")}')
1869 keep_resolved_entries
= self
.params
.get('extract_flat') != 'discard'
1870 if self
.params
.get('extract_flat') == 'discard_in_playlist':
1871 keep_resolved_entries
= ie_result
['_type'] != 'playlist'
1872 if keep_resolved_entries
:
1873 self
.write_debug('The information of all playlist entries will be held in memory')
1876 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
1877 for i
, (playlist_index
, entry
) in enumerate(entries
):
1879 resolved_entries
.append((playlist_index
, entry
))
1883 entry
['__x_forwarded_for_ip'] = ie_result
.get('__x_forwarded_for_ip')
1884 if not lazy
and 'playlist-index' in self
.params
.get('compat_opts', []):
1885 playlist_index
= ie_result
['requested_entries'][i
]
1887 entry_copy
= collections
.ChainMap(entry
, {
1889 'n_entries': int_or_none(n_entries
),
1890 'playlist_index': playlist_index
,
1891 'playlist_autonumber': i
+ 1,
1894 if self
._match
_entry
(entry_copy
, incomplete
=True) is not None:
1895 # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
1896 resolved_entries
[i
] = (playlist_index
, NO_DEFAULT
)
1899 self
.to_screen('[download] Downloading item %s of %s' % (
1900 self
._format
_screen
(i
+ 1, self
.Styles
.ID
), self
._format
_screen
(n_entries
, self
.Styles
.EMPHASIS
)))
1902 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, collections
.ChainMap({
1903 'playlist_index': playlist_index
,
1904 'playlist_autonumber': i
+ 1,
1906 if not entry_result
:
1908 if failures
>= max_failures
:
1910 f
'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1912 if keep_resolved_entries
:
1913 resolved_entries
[i
] = (playlist_index
, entry_result
)
1915 # Update with processed data
1916 ie_result
['entries'] = [e
for _
, e
in resolved_entries
if e
is not NO_DEFAULT
]
1917 ie_result
['requested_entries'] = [i
for i
, e
in resolved_entries
if e
is not NO_DEFAULT
]
1918 if ie_result
['requested_entries'] == try_call(lambda: list(range(1, ie_result
['playlist_count'] + 1))):
1919 # Do not set for full playlist
1920 ie_result
.pop('requested_entries')
1922 # Write the updated info to json
1923 if _infojson_written
is True and self
._write
_info
_json
(
1924 'updated playlist', ie_result
,
1925 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
1928 ie_result
= self
.run_all_pps('playlist', ie_result
)
1929 self
.to_screen(f
'[download] Finished downloading playlist: {title}')
1932 @_handle_extraction_exceptions
1933 def __process_iterable_entry(self
, entry
, download
, extra_info
):
1934 return self
.process_ie_result(
1935 entry
, download
=download
, extra_info
=extra_info
)
1937 def _build_format_filter(self
, filter_spec
):
1938 " Returns a function to filter the formats according to the filter_spec "
1948 operator_rex
= re
.compile(r
'''(?x)\s*
1950 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1951 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1952 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
1953 m
= operator_rex
.fullmatch(filter_spec
)
1956 comparison_value
= int(m
.group('value'))
1958 comparison_value
= parse_filesize(m
.group('value'))
1959 if comparison_value
is None:
1960 comparison_value
= parse_filesize(m
.group('value') + 'B')
1961 if comparison_value
is None:
1963 'Invalid value %r in format specification %r' % (
1964 m
.group('value'), filter_spec
))
1965 op
= OPERATORS
[m
.group('op')]
1970 '^=': lambda attr
, value
: attr
.startswith(value
),
1971 '$=': lambda attr
, value
: attr
.endswith(value
),
1972 '*=': lambda attr
, value
: value
in attr
,
1973 '~=': lambda attr
, value
: value
.search(attr
) is not None
1975 str_operator_rex
= re
.compile(r
'''(?x)\s*
1976 (?P<key>[a-zA-Z0-9._-]+)\s*
1977 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1979 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1980 (?(quote)(?P=quote))\s*
1981 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
1982 m
= str_operator_rex
.fullmatch(filter_spec
)
1984 if m
.group('op') == '~=':
1985 comparison_value
= re
.compile(m
.group('value'))
1987 comparison_value
= re
.sub(r
'''\\([\\"'])''', r
'\1', m
.group('value'))
1988 str_op
= STR_OPERATORS
[m
.group('op')]
1989 if m
.group('negation'):
1990 op
= lambda attr
, value
: not str_op(attr
, value
)
1995 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
1998 actual_value
= f
.get(m
.group('key'))
1999 if actual_value
is None:
2000 return m
.group('none_inclusive')
2001 return op(actual_value
, comparison_value
)
2004 def _check_formats(self
, formats
):
2006 self
.to_screen('[info] Testing format %s' % f
['format_id'])
2007 path
= self
.get_output_path('temp')
2008 if not self
._ensure
_dir
_exists
(f
'{path}/'):
2010 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
2013 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
2014 except (DownloadError
, OSError, ValueError) + network_exceptions
:
2017 if os
.path
.exists(temp_file
.name
):
2019 os
.remove(temp_file
.name
)
2021 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
2025 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
2027 def _default_format_spec(self
, info_dict
, download
=True):
2030 merger
= FFmpegMergerPP(self
)
2031 return merger
.available
and merger
.can_merge()
2034 not self
.params
.get('simulate')
2038 or info_dict
.get('is_live') and not self
.params
.get('live_from_start')
2039 or self
.params
['outtmpl']['default'] == '-'))
2042 or self
.params
.get('allow_multiple_audio_streams', False)
2043 or 'format-spec' in self
.params
['compat_opts'])
2046 'best/bestvideo+bestaudio' if prefer_best
2047 else 'bestvideo*+bestaudio/best' if not compat
2048 else 'bestvideo+bestaudio/best')
2050 def build_format_selector(self
, format_spec
):
2051 def syntax_error(note
, start
):
2053 'Invalid format specification: '
2054 '{}\n\t{}\n\t{}^'.format(note
, format_spec
, ' ' * start
[1]))
2055 return SyntaxError(message
)
2057 PICKFIRST
= 'PICKFIRST'
2061 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
2063 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
2064 'video': self
.params
.get('allow_multiple_video_streams', False)}
2066 check_formats
= self
.params
.get('check_formats') == 'selected'
2068 def _parse_filter(tokens
):
2070 for type, string
, start
, _
, _
in tokens
:
2071 if type == tokenize
.OP
and string
== ']':
2072 return ''.join(filter_parts
)
2074 filter_parts
.append(string
)
2076 def _remove_unused_ops(tokens
):
2077 # Remove operators that we don't use and join them with the surrounding strings.
2078 # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
2079 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
2080 last_string
, last_start
, last_end
, last_line
= None, None, None, None
2081 for type, string
, start
, end
, line
in tokens
:
2082 if type == tokenize
.OP
and string
== '[':
2084 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2086 yield type, string
, start
, end
, line
2087 # everything inside brackets will be handled by _parse_filter
2088 for type, string
, start
, end
, line
in tokens
:
2089 yield type, string
, start
, end
, line
2090 if type == tokenize
.OP
and string
== ']':
2092 elif type == tokenize
.OP
and string
in ALLOWED_OPS
:
2094 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2096 yield type, string
, start
, end
, line
2097 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
2099 last_string
= string
2103 last_string
+= string
2105 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2107 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
2109 current_selector
= None
2110 for type, string
, start
, _
, _
in tokens
:
2111 # ENCODING is only defined in python 3.x
2112 if type == getattr(tokenize
, 'ENCODING', None):
2114 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
2115 current_selector
= FormatSelector(SINGLE
, string
, [])
2116 elif type == tokenize
.OP
:
2118 if not inside_group
:
2119 # ')' will be handled by the parentheses group
2120 tokens
.restore_last_token()
2122 elif inside_merge
and string
in ['/', ',']:
2123 tokens
.restore_last_token()
2125 elif inside_choice
and string
== ',':
2126 tokens
.restore_last_token()
2129 if not current_selector
:
2130 raise syntax_error('"," must follow a format selector', start
)
2131 selectors
.append(current_selector
)
2132 current_selector
= None
2134 if not current_selector
:
2135 raise syntax_error('"/" must follow a format selector', start
)
2136 first_choice
= current_selector
2137 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
2138 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
2140 if not current_selector
:
2141 current_selector
= FormatSelector(SINGLE
, 'best', [])
2142 format_filter
= _parse_filter(tokens
)
2143 current_selector
.filters
.append(format_filter
)
2145 if current_selector
:
2146 raise syntax_error('Unexpected "("', start
)
2147 group
= _parse_format_selection(tokens
, inside_group
=True)
2148 current_selector
= FormatSelector(GROUP
, group
, [])
2150 if not current_selector
:
2151 raise syntax_error('Unexpected "+"', start
)
2152 selector_1
= current_selector
2153 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
2155 raise syntax_error('Expected a selector', start
)
2156 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
2158 raise syntax_error(f
'Operator not recognized: "{string}"', start
)
2159 elif type == tokenize
.ENDMARKER
:
2161 if current_selector
:
2162 selectors
.append(current_selector
)
2165 def _merge(formats_pair
):
2166 format_1
, format_2
= formats_pair
2169 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
2170 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
2172 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
2173 get_no_more
= {'video': False, 'audio': False}
2174 for (i
, fmt_info
) in enumerate(formats_info
):
2175 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
2178 for aud_vid
in ['audio', 'video']:
2179 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
2180 if get_no_more
[aud_vid
]:
2183 get_no_more
[aud_vid
] = True
2185 if len(formats_info
) == 1:
2186 return formats_info
[0]
2188 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
2189 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
2191 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
2192 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
2194 output_ext
= get_compatible_ext(
2195 vcodecs
=[f
.get('vcodec') for f
in video_fmts
],
2196 acodecs
=[f
.get('acodec') for f
in audio_fmts
],
2197 vexts
=[f
['ext'] for f
in video_fmts
],
2198 aexts
=[f
['ext'] for f
in audio_fmts
],
2199 preferences
=(try_call(lambda: self
.params
['merge_output_format'].split('/'))
2200 or self
.params
.get('prefer_free_formats') and ('webm', 'mkv')))
2202 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2205 'requested_formats': formats_info
,
2206 'format': '+'.join(filtered('format')),
2207 'format_id': '+'.join(filtered('format_id')),
2209 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2210 'language': '+'.join(orderedSet(filtered('language'))) or None,
2211 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2212 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2213 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2218 'width': the_only_video
.get('width'),
2219 'height': the_only_video
.get('height'),
2220 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2221 'fps': the_only_video
.get('fps'),
2222 'dynamic_range': the_only_video
.get('dynamic_range'),
2223 'vcodec': the_only_video
.get('vcodec'),
2224 'vbr': the_only_video
.get('vbr'),
2225 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2226 'aspect_ratio': the_only_video
.get('aspect_ratio'),
2231 'acodec': the_only_audio
.get('acodec'),
2232 'abr': the_only_audio
.get('abr'),
2233 'asr': the_only_audio
.get('asr'),
2234 'audio_channels': the_only_audio
.get('audio_channels')
2239 def _check_formats(formats
):
2240 if not check_formats
:
2243 yield from self
._check
_formats
(formats
)
2245 def _build_selector_function(selector
):
2246 if isinstance(selector
, list): # ,
2247 fs
= [_build_selector_function(s
) for s
in selector
]
2249 def selector_function(ctx
):
2252 return selector_function
2254 elif selector
.type == GROUP
: # ()
2255 selector_function
= _build_selector_function(selector
.selector
)
2257 elif selector
.type == PICKFIRST
: # /
2258 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2260 def selector_function(ctx
):
2262 picked_formats
= list(f(ctx
))
2264 return picked_formats
2267 elif selector
.type == MERGE
: # +
2268 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2270 def selector_function(ctx
):
2271 for pair
in itertools
.product(selector_1(ctx
), selector_2(ctx
)):
2274 elif selector
.type == SINGLE
: # atom
2275 format_spec
= selector
.selector
or 'best'
2277 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2278 if format_spec
== 'all':
2279 def selector_function(ctx
):
2280 yield from _check_formats(ctx
['formats'][::-1])
2281 elif format_spec
== 'mergeall':
2282 def selector_function(ctx
):
2283 formats
= list(_check_formats(
2284 f
for f
in ctx
['formats'] if f
.get('vcodec') != 'none' or f
.get('acodec') != 'none'))
2287 merged_format
= formats
[-1]
2288 for f
in formats
[-2::-1]:
2289 merged_format
= _merge((merged_format
, f
))
2293 format_fallback
, seperate_fallback
, format_reverse
, format_idx
= False, None, True, 1
2295 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2297 if mobj
is not None:
2298 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2299 format_reverse
= mobj
.group('bw')[0] == 'b'
2300 format_type
= (mobj
.group('type') or [None])[0]
2301 not_format_type
= {'v': 'a', 'a': 'v'}
.get(format_type
)
2302 format_modified
= mobj
.group('mod') is not None
2304 format_fallback
= not format_type
and not format_modified
# for b, w
2306 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2307 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2308 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2309 if format_type
# bv, ba, wv, wa
2310 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2311 if not format_modified
# b, w
2312 else lambda f
: True) # b*, w*
2313 filter_f
= lambda f
: _filter_f(f
) and (
2314 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2316 if format_spec
in self
._format
_selection
_exts
['audio']:
2317 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2318 elif format_spec
in self
._format
_selection
_exts
['video']:
2319 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2320 seperate_fallback
= lambda f
: f
.get('ext') == format_spec
and f
.get('vcodec') != 'none'
2321 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2322 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2324 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2326 def selector_function(ctx
):
2327 formats
= list(ctx
['formats'])
2328 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2330 if format_fallback
and ctx
['incomplete_formats']:
2331 # for extractors with incomplete formats (audio only (soundcloud)
2332 # or video only (imgur)) best/worst will fallback to
2333 # best/worst {video,audio}-only format
2335 elif seperate_fallback
and not ctx
['has_merged_format']:
2336 # for compatibility with youtube-dl when there is no pre-merged format
2337 matches
= list(filter(seperate_fallback
, formats
))
2338 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2340 yield matches
[format_idx
- 1]
2341 except LazyList
.IndexError:
2344 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2346 def final_selector(ctx
):
2347 ctx_copy
= dict(ctx
)
2348 for _filter
in filters
:
2349 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2350 return selector_function(ctx_copy
)
2351 return final_selector
2353 stream
= io
.BytesIO(format_spec
.encode())
2355 tokens
= list(_remove_unused_ops(tokenize
.tokenize(stream
.readline
)))
2356 except tokenize
.TokenError
:
2357 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2359 class TokenIterator
:
2360 def __init__(self
, tokens
):
2361 self
.tokens
= tokens
2368 if self
.counter
>= len(self
.tokens
):
2369 raise StopIteration()
2370 value
= self
.tokens
[self
.counter
]
2376 def restore_last_token(self
):
2379 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2380 return _build_selector_function(parsed_selector
)
2382 def _calc_headers(self
, info_dict
):
2383 res
= merge_headers(self
.params
['http_headers'], info_dict
.get('http_headers') or {})
2385 cookies
= self
._calc
_cookies
(info_dict
['url'])
2387 res
['Cookie'] = cookies
2389 if 'X-Forwarded-For' not in res
:
2390 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2391 if x_forwarded_for_ip
:
2392 res
['X-Forwarded-For'] = x_forwarded_for_ip
2396 def _calc_cookies(self
, url
):
2397 pr
= sanitized_Request(url
)
2398 self
.cookiejar
.add_cookie_header(pr
)
2399 return pr
.get_header('Cookie')
2401 def _sort_thumbnails(self
, thumbnails
):
2402 thumbnails
.sort(key
=lambda t
: (
2403 t
.get('preference') if t
.get('preference') is not None else -1,
2404 t
.get('width') if t
.get('width') is not None else -1,
2405 t
.get('height') if t
.get('height') is not None else -1,
2406 t
.get('id') if t
.get('id') is not None else '',
2409 def _sanitize_thumbnails(self
, info_dict
):
2410 thumbnails
= info_dict
.get('thumbnails')
2411 if thumbnails
is None:
2412 thumbnail
= info_dict
.get('thumbnail')
2414 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail}
]
2418 def check_thumbnails(thumbnails
):
2419 for t
in thumbnails
:
2420 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2422 self
.urlopen(HEADRequest(t
['url']))
2423 except network_exceptions
as err
:
2424 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2428 self
._sort
_thumbnails
(thumbnails
)
2429 for i
, t
in enumerate(thumbnails
):
2430 if t
.get('id') is None:
2432 if t
.get('width') and t
.get('height'):
2433 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2434 t
['url'] = sanitize_url(t
['url'])
2436 if self
.params
.get('check_formats') is True:
2437 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2439 info_dict
['thumbnails'] = thumbnails
2441 def _fill_common_fields(self
, info_dict
, final
=True):
2442 # TODO: move sanitization here
2444 title
= info_dict
['fulltitle'] = info_dict
.get('title')
2447 self
.write_debug('Extractor gave empty title. Creating a generic title')
2449 self
.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2450 info_dict
['title'] = f
'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2452 if info_dict
.get('duration') is not None:
2453 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2455 for ts_key
, date_key
in (
2456 ('timestamp', 'upload_date'),
2457 ('release_timestamp', 'release_date'),
2458 ('modified_timestamp', 'modified_date'),
2460 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2461 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2462 # see http://bugs.python.org/issue1646728)
2463 with contextlib
.suppress(ValueError, OverflowError, OSError):
2464 upload_date
= datetime
.datetime
.utcfromtimestamp(info_dict
[ts_key
])
2465 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2467 live_keys
= ('is_live', 'was_live')
2468 live_status
= info_dict
.get('live_status')
2469 if live_status
is None:
2470 for key
in live_keys
:
2471 if info_dict
.get(key
) is False:
2473 if info_dict
.get(key
):
2476 if all(info_dict
.get(key
) is False for key
in live_keys
):
2477 live_status
= 'not_live'
2479 info_dict
['live_status'] = live_status
2480 for key
in live_keys
:
2481 if info_dict
.get(key
) is None:
2482 info_dict
[key
] = (live_status
== key
)
2483 if live_status
== 'post_live':
2484 info_dict
['was_live'] = True
2486 # Auto generate title fields corresponding to the *_number fields when missing
2487 # in order to always have clean titles. This is very common for TV series.
2488 for field
in ('chapter', 'season', 'episode'):
2489 if final
and info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2490 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2492 def _raise_pending_errors(self
, info
):
2493 err
= info
.pop('__pending_error', None)
2495 self
.report_error(err
, tb
=False)
2497 def sort_formats(self
, info_dict
):
2498 formats
= self
._get
_formats
(info_dict
)
2499 formats
.sort(key
=FormatSorter(
2500 self
, info_dict
.get('_format_sort_fields') or []).calculate_preference
)
2502 def process_video_result(self
, info_dict
, download
=True):
2503 assert info_dict
.get('_type', 'video') == 'video'
2504 self
._num
_videos
+= 1
2506 if 'id' not in info_dict
:
2507 raise ExtractorError('Missing "id" field in extractor result', ie
=info_dict
['extractor'])
2508 elif not info_dict
.get('id'):
2509 raise ExtractorError('Extractor failed to obtain "id"', ie
=info_dict
['extractor'])
2511 def report_force_conversion(field
, field_not
, conversion
):
2512 self
.report_warning(
2513 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2514 % (field
, field_not
, conversion
))
2516 def sanitize_string_field(info
, string_field
):
2517 field
= info
.get(string_field
)
2518 if field
is None or isinstance(field
, str):
2520 report_force_conversion(string_field
, 'a string', 'string')
2521 info
[string_field
] = str(field
)
2523 def sanitize_numeric_fields(info
):
2524 for numeric_field
in self
._NUMERIC
_FIELDS
:
2525 field
= info
.get(numeric_field
)
2526 if field
is None or isinstance(field
, (int, float)):
2528 report_force_conversion(numeric_field
, 'numeric', 'int')
2529 info
[numeric_field
] = int_or_none(field
)
2531 sanitize_string_field(info_dict
, 'id')
2532 sanitize_numeric_fields(info_dict
)
2533 if info_dict
.get('section_end') and info_dict
.get('section_start') is not None:
2534 info_dict
['duration'] = round(info_dict
['section_end'] - info_dict
['section_start'], 3)
2535 if (info_dict
.get('duration') or 0) <= 0 and info_dict
.pop('duration', None):
2536 self
.report_warning('"duration" field is negative, there is an error in extractor')
2538 chapters
= info_dict
.get('chapters') or []
2539 if chapters
and chapters
[0].get('start_time'):
2540 chapters
.insert(0, {'start_time': 0}
)
2542 dummy_chapter
= {'end_time': 0, 'start_time': info_dict.get('duration')}
2543 for idx
, (prev
, current
, next_
) in enumerate(zip(
2544 (dummy_chapter
, *chapters
), chapters
, (*chapters
[1:], dummy_chapter
)), 1):
2545 if current
.get('start_time') is None:
2546 current
['start_time'] = prev
.get('end_time')
2547 if not current
.get('end_time'):
2548 current
['end_time'] = next_
.get('start_time')
2549 if not current
.get('title'):
2550 current
['title'] = f
'<Untitled Chapter {idx}>'
2552 if 'playlist' not in info_dict
:
2553 # It isn't part of a playlist
2554 info_dict
['playlist'] = None
2555 info_dict
['playlist_index'] = None
2557 self
._sanitize
_thumbnails
(info_dict
)
2559 thumbnail
= info_dict
.get('thumbnail')
2560 thumbnails
= info_dict
.get('thumbnails')
2562 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2564 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2566 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2567 info_dict
['display_id'] = info_dict
['id']
2569 self
._fill
_common
_fields
(info_dict
)
2571 for cc_kind
in ('subtitles', 'automatic_captions'):
2572 cc
= info_dict
.get(cc_kind
)
2574 for _
, subtitle
in cc
.items():
2575 for subtitle_format
in subtitle
:
2576 if subtitle_format
.get('url'):
2577 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2578 if subtitle_format
.get('ext') is None:
2579 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2581 automatic_captions
= info_dict
.get('automatic_captions')
2582 subtitles
= info_dict
.get('subtitles')
2584 info_dict
['requested_subtitles'] = self
.process_subtitles(
2585 info_dict
['id'], subtitles
, automatic_captions
)
2587 formats
= self
._get
_formats
(info_dict
)
2589 # Backward compatibility with InfoExtractor._sort_formats
2590 field_preference
= (formats
or [{}])[0].pop('__sort_fields', None)
2591 if field_preference
:
2592 info_dict
['_format_sort_fields'] = field_preference
2594 # or None ensures --clean-infojson removes it
2595 info_dict
['_has_drm'] = any(f
.get('has_drm') for f
in formats
) or None
2596 if not self
.params
.get('allow_unplayable_formats'):
2597 formats
= [f
for f
in formats
if not f
.get('has_drm')]
2599 if formats
and all(f
.get('acodec') == f
.get('vcodec') == 'none' for f
in formats
):
2600 self
.report_warning(
2601 f
'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}'
2602 'only images are available for download. Use --list-formats to see them'.capitalize())
2604 get_from_start
= not info_dict
.get('is_live') or bool(self
.params
.get('live_from_start'))
2605 if not get_from_start
:
2606 info_dict
['title'] += ' ' + datetime
.datetime
.now().strftime('%Y-%m-%d %H:%M')
2607 if info_dict
.get('is_live') and formats
:
2608 formats
= [f
for f
in formats
if bool(f
.get('is_from_start')) == get_from_start
]
2609 if get_from_start
and not formats
:
2610 self
.raise_no_formats(info_dict
, msg
=(
2611 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2612 'If you want to download from the current time, use --no-live-from-start'))
2614 def is_wellformed(f
):
2617 self
.report_warning(
2618 '"url" field is missing or empty - skipping format, '
2619 'there is an error in extractor')
2621 if isinstance(url
, bytes):
2622 sanitize_string_field(f
, 'url')
2625 # Filter out malformed formats for better extraction robustness
2626 formats
= list(filter(is_wellformed
, formats
or []))
2629 self
.raise_no_formats(info_dict
)
2631 for format
in formats
:
2632 sanitize_string_field(format
, 'format_id')
2633 sanitize_numeric_fields(format
)
2634 format
['url'] = sanitize_url(format
['url'])
2635 if format
.get('ext') is None:
2636 format
['ext'] = determine_ext(format
['url']).lower()
2637 if format
.get('protocol') is None:
2638 format
['protocol'] = determine_protocol(format
)
2639 if format
.get('resolution') is None:
2640 format
['resolution'] = self
.format_resolution(format
, default
=None)
2641 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2642 format
['dynamic_range'] = 'SDR'
2643 if format
.get('aspect_ratio') is None:
2644 format
['aspect_ratio'] = try_call(lambda: round(format
['width'] / format
['height'], 2))
2645 if (info_dict
.get('duration') and format
.get('tbr')
2646 and not format
.get('filesize') and not format
.get('filesize_approx')):
2647 format
['filesize_approx'] = int(info_dict
['duration'] * format
['tbr'] * (1024 / 8))
2648 format
['http_headers'] = self
._calc
_headers
(collections
.ChainMap(format
, info_dict
))
2650 # This is copied to http_headers by the above _calc_headers and can now be removed
2651 if '__x_forwarded_for_ip' in info_dict
:
2652 del info_dict
['__x_forwarded_for_ip']
2656 '_format_sort_fields': info_dict
.get('_format_sort_fields')
2659 # Sanitize and group by format_id
2661 for i
, format
in enumerate(formats
):
2662 if not format
.get('format_id'):
2663 format
['format_id'] = str(i
)
2665 # Sanitize format_id from characters used in format selector expression
2666 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2667 formats_dict
.setdefault(format
['format_id'], []).append(format
)
2669 # Make sure all formats have unique format_id
2670 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2671 for format_id
, ambiguous_formats
in formats_dict
.items():
2672 ambigious_id
= len(ambiguous_formats
) > 1
2673 for i
, format
in enumerate(ambiguous_formats
):
2675 format
['format_id'] = '%s-%d' % (format_id
, i
)
2676 # Ensure there is no conflict between id and ext in format selection
2677 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2678 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2679 format
['format_id'] = 'f%s' % format
['format_id']
2681 if format
.get('format') is None:
2682 format
['format'] = '{id} - {res}{note}'.format(
2683 id=format
['format_id'],
2684 res
=self
.format_resolution(format
),
2685 note
=format_field(format
, 'format_note', ' (%s)'),
2688 if self
.params
.get('check_formats') is True:
2689 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2691 if not formats
or formats
[0] is not info_dict
:
2692 # only set the 'formats' fields if the original info_dict list them
2693 # otherwise we end up with a circular reference, the first (and unique)
2694 # element in the 'formats' field in info_dict is info_dict itself,
2695 # which can't be exported to json
2696 info_dict
['formats'] = formats
2698 info_dict
, _
= self
.pre_process(info_dict
)
2700 if self
._match
_entry
(info_dict
, incomplete
=self
._format
_fields
) is not None:
2703 self
.post_extract(info_dict
)
2704 info_dict
, _
= self
.pre_process(info_dict
, 'after_filter')
2706 # The pre-processors may have modified the formats
2707 formats
= self
._get
_formats
(info_dict
)
2709 list_only
= self
.params
.get('simulate') == 'list_only'
2710 interactive_format_selection
= not list_only
and self
.format_selector
== '-'
2711 if self
.params
.get('list_thumbnails'):
2712 self
.list_thumbnails(info_dict
)
2713 if self
.params
.get('listsubtitles'):
2714 if 'automatic_captions' in info_dict
:
2715 self
.list_subtitles(
2716 info_dict
['id'], automatic_captions
, 'automatic captions')
2717 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2718 if self
.params
.get('listformats') or interactive_format_selection
:
2719 self
.list_formats(info_dict
)
2721 # Without this printing, -F --print-json will not work
2722 self
.__forced
_printings
(info_dict
, self
.prepare_filename(info_dict
), incomplete
=True)
2725 format_selector
= self
.format_selector
2726 if format_selector
is None:
2727 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2728 self
.write_debug('Default format spec: %s' % req_format
)
2729 format_selector
= self
.build_format_selector(req_format
)
2732 if interactive_format_selection
:
2734 self
._format
_screen
('\nEnter format selector: ', self
.Styles
.EMPHASIS
))
2736 format_selector
= self
.build_format_selector(req_format
)
2737 except SyntaxError as err
:
2738 self
.report_error(err
, tb
=False, is_error
=False)
2741 formats_to_download
= list(format_selector({
2743 'has_merged_format': any('none' not in (f
.get('acodec'), f
.get('vcodec')) for f
in formats
),
2744 'incomplete_formats': (
2745 # All formats are video-only or
2746 all(f
.get('vcodec') != 'none' and f
.get('acodec') == 'none' for f
in formats
)
2747 # all formats are audio-only
2748 or all(f
.get('vcodec') == 'none' and f
.get('acodec') != 'none' for f
in formats
)),
2750 if interactive_format_selection
and not formats_to_download
:
2751 self
.report_error('Requested format is not available', tb
=False, is_error
=False)
2755 if not formats_to_download
:
2756 if not self
.params
.get('ignore_no_formats_error'):
2757 raise ExtractorError(
2758 'Requested format is not available. Use --list-formats for a list of available formats',
2759 expected
=True, video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2760 self
.report_warning('Requested format is not available')
2761 # Process what we can, even without any available formats.
2762 formats_to_download
= [{}]
2764 requested_ranges
= tuple(self
.params
.get('download_ranges', lambda *_
: [{}])(info_dict
, self
))
2765 best_format
, downloaded_formats
= formats_to_download
[-1], []
2767 if best_format
and requested_ranges
:
2768 def to_screen(*msg
):
2769 self
.to_screen(f
'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2771 to_screen(f
'Downloading {len(formats_to_download)} format(s):',
2772 (f
['format_id'] for f
in formats_to_download
))
2773 if requested_ranges
!= ({}, ):
2774 to_screen(f
'Downloading {len(requested_ranges)} time ranges:',
2775 (f
'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c
in requested_ranges
))
2776 max_downloads_reached
= False
2778 for fmt
, chapter
in itertools
.product(formats_to_download
, requested_ranges
):
2779 new_info
= self
._copy
_infodict
(info_dict
)
2780 new_info
.update(fmt
)
2781 offset
, duration
= info_dict
.get('section_start') or 0, info_dict
.get('duration') or float('inf')
2782 end_time
= offset
+ min(chapter
.get('end_time', duration
), duration
)
2783 if chapter
or offset
:
2785 'section_start': offset
+ chapter
.get('start_time', 0),
2786 # duration may not be accurate. So allow deviations <1sec
2787 'section_end': end_time
if end_time
<= offset
+ duration
+ 1 else None,
2788 'section_title': chapter
.get('title'),
2789 'section_number': chapter
.get('index'),
2791 downloaded_formats
.append(new_info
)
2793 self
.process_info(new_info
)
2794 except MaxDownloadsReached
:
2795 max_downloads_reached
= True
2796 self
._raise
_pending
_errors
(new_info
)
2797 # Remove copied info
2798 for key
, val
in tuple(new_info
.items()):
2799 if info_dict
.get(key
) == val
:
2801 if max_downloads_reached
:
2804 write_archive
= {f.get('__write_download_archive', False) for f in downloaded_formats}
2805 assert write_archive
.issubset({True, False, 'ignore'}
)
2806 if True in write_archive
and False not in write_archive
:
2807 self
.record_download_archive(info_dict
)
2809 info_dict
['requested_downloads'] = downloaded_formats
2810 info_dict
= self
.run_all_pps('after_video', info_dict
)
2811 if max_downloads_reached
:
2812 raise MaxDownloadsReached()
2814 # We update the info dict with the selected best quality format (backwards compatibility)
2815 info_dict
.update(best_format
)
2818 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
2819 """Select the requested subtitles and their format"""
2820 available_subs
, normal_sub_langs
= {}, []
2821 if normal_subtitles
and self
.params
.get('writesubtitles'):
2822 available_subs
.update(normal_subtitles
)
2823 normal_sub_langs
= tuple(normal_subtitles
.keys())
2824 if automatic_captions
and self
.params
.get('writeautomaticsub'):
2825 for lang
, cap_info
in automatic_captions
.items():
2826 if lang
not in available_subs
:
2827 available_subs
[lang
] = cap_info
2829 if not available_subs
or (
2830 not self
.params
.get('writesubtitles')
2831 and not self
.params
.get('writeautomaticsub')):
2834 all_sub_langs
= tuple(available_subs
.keys())
2835 if self
.params
.get('allsubtitles', False):
2836 requested_langs
= all_sub_langs
2837 elif self
.params
.get('subtitleslangs', False):
2839 requested_langs
= orderedSet_from_options(
2840 self
.params
.get('subtitleslangs'), {'all': all_sub_langs}
, use_regex
=True)
2841 except re
.error
as e
:
2842 raise ValueError(f
'Wrong regex for subtitlelangs: {e.pattern}')
2844 requested_langs
= LazyList(itertools
.chain(
2845 ['en'] if 'en' in normal_sub_langs
else [],
2846 filter(lambda f
: f
.startswith('en'), normal_sub_langs
),
2847 ['en'] if 'en' in all_sub_langs
else [],
2848 filter(lambda f
: f
.startswith('en'), all_sub_langs
),
2849 normal_sub_langs
, all_sub_langs
,
2852 self
.to_screen(f
'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}')
2854 formats_query
= self
.params
.get('subtitlesformat', 'best')
2855 formats_preference
= formats_query
.split('/') if formats_query
else []
2857 for lang
in requested_langs
:
2858 formats
= available_subs
.get(lang
)
2860 self
.report_warning(f
'{lang} subtitles not available for {video_id}')
2862 for ext
in formats_preference
:
2866 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
2872 self
.report_warning(
2873 'No subtitle format found matching "%s" for language %s, '
2874 'using %s' % (formats_query
, lang
, f
['ext']))
2878 def _forceprint(self
, key
, info_dict
):
2879 if info_dict
is None:
2881 info_copy
= info_dict
.copy()
2882 info_copy
['formats_table'] = self
.render_formats_table(info_dict
)
2883 info_copy
['thumbnails_table'] = self
.render_thumbnails_table(info_dict
)
2884 info_copy
['subtitles_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('subtitles'))
2885 info_copy
['automatic_captions_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('automatic_captions'))
2887 def format_tmpl(tmpl
):
2888 mobj
= re
.fullmatch(r
'([\w.:,]|-\d|(?P<dict>{([\w.:,]|-\d)+}))+=?', tmpl
)
2893 if tmpl
.startswith('{'):
2895 if tmpl
.endswith('='):
2896 tmpl
, fmt
= tmpl
[:-1], '{0} = %({0})#j'
2897 return '\n'.join(map(fmt
.format
, [tmpl
] if mobj
.group('dict') else tmpl
.split(',')))
2899 for tmpl
in self
.params
['forceprint'].get(key
, []):
2900 self
.to_stdout(self
.evaluate_outtmpl(format_tmpl(tmpl
), info_copy
))
2902 for tmpl
, file_tmpl
in self
.params
['print_to_file'].get(key
, []):
2903 filename
= self
.prepare_filename(info_dict
, outtmpl
=file_tmpl
)
2904 tmpl
= format_tmpl(tmpl
)
2905 self
.to_screen(f
'[info] Writing {tmpl!r} to: {filename}')
2906 if self
._ensure
_dir
_exists
(filename
):
2907 with open(filename
, 'a', encoding
='utf-8', newline
='') as f
:
2908 f
.write(self
.evaluate_outtmpl(tmpl
, info_copy
) + os
.linesep
)
2910 def __forced_printings(self
, info_dict
, filename
, incomplete
):
2911 def print_mandatory(field
, actual_field
=None):
2912 if actual_field
is None:
2913 actual_field
= field
2914 if (self
.params
.get('force%s' % field
, False)
2915 and (not incomplete
or info_dict
.get(actual_field
) is not None)):
2916 self
.to_stdout(info_dict
[actual_field
])
2918 def print_optional(field
):
2919 if (self
.params
.get('force%s' % field
, False)
2920 and info_dict
.get(field
) is not None):
2921 self
.to_stdout(info_dict
[field
])
2923 info_dict
= info_dict
.copy()
2924 if filename
is not None:
2925 info_dict
['filename'] = filename
2926 if info_dict
.get('requested_formats') is not None:
2927 # For RTMP URLs, also include the playpath
2928 info_dict
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
2929 elif info_dict
.get('url'):
2930 info_dict
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
2932 if (self
.params
.get('forcejson')
2933 or self
.params
['forceprint'].get('video')
2934 or self
.params
['print_to_file'].get('video')):
2935 self
.post_extract(info_dict
)
2936 self
._forceprint
('video', info_dict
)
2938 print_mandatory('title')
2939 print_mandatory('id')
2940 print_mandatory('url', 'urls')
2941 print_optional('thumbnail')
2942 print_optional('description')
2943 print_optional('filename')
2944 if self
.params
.get('forceduration') and info_dict
.get('duration') is not None:
2945 self
.to_stdout(formatSeconds(info_dict
['duration']))
2946 print_mandatory('format')
2948 if self
.params
.get('forcejson'):
2949 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
2951 def dl(self
, name
, info
, subtitle
=False, test
=False):
2952 if not info
.get('url'):
2953 self
.raise_no_formats(info
, True)
2956 verbose
= self
.params
.get('verbose')
2959 'quiet': self
.params
.get('quiet') or not verbose
,
2961 'noprogress': not verbose
,
2963 'skip_unavailable_fragments': False,
2964 'keep_fragments': False,
2966 '_no_ytdl_file': True,
2969 params
= self
.params
2970 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
2972 for ph
in self
._progress
_hooks
:
2973 fd
.add_progress_hook(ph
)
2975 (f
['url'].split(',')[0] + ',<data>' if f
['url'].startswith('data:') else f
['url'])
2976 for f
in info
.get('requested_formats', []) or [info
])
2977 self
.write_debug(f
'Invoking {fd.FD_NAME} downloader on "{urls}"')
2979 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2980 # But it may contain objects that are not deep-copyable
2981 new_info
= self
._copy
_infodict
(info
)
2982 if new_info
.get('http_headers') is None:
2983 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
2984 return fd
.download(name
, new_info
, subtitle
)
2986 def existing_file(self
, filepaths
, *, default_overwrite
=True):
2987 existing_files
= list(filter(os
.path
.exists
, orderedSet(filepaths
)))
2988 if existing_files
and not self
.params
.get('overwrites', default_overwrite
):
2989 return existing_files
[0]
2991 for file in existing_files
:
2992 self
.report_file_delete(file)
2996 def process_info(self
, info_dict
):
2997 """Process a single resolved IE result. (Modifies it in-place)"""
2999 assert info_dict
.get('_type', 'video') == 'video'
3000 original_infodict
= info_dict
3002 if 'format' not in info_dict
and 'ext' in info_dict
:
3003 info_dict
['format'] = info_dict
['ext']
3005 if self
._match
_entry
(info_dict
) is not None:
3006 info_dict
['__write_download_archive'] = 'ignore'
3009 # Does nothing under normal operation - for backward compatibility of process_info
3010 self
.post_extract(info_dict
)
3012 def replace_info_dict(new_info
):
3014 if new_info
== info_dict
:
3017 info_dict
.update(new_info
)
3019 new_info
, _
= self
.pre_process(info_dict
, 'video')
3020 replace_info_dict(new_info
)
3021 self
._num
_downloads
+= 1
3023 # info_dict['_filename'] needs to be set for backward compatibility
3024 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
3025 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
3029 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
3031 def check_max_downloads():
3032 if self
._num
_downloads
>= float(self
.params
.get('max_downloads') or 'inf'):
3033 raise MaxDownloadsReached()
3035 if self
.params
.get('simulate'):
3036 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3037 check_max_downloads()
3040 if full_filename
is None:
3042 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
3044 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
3047 if self
._write
_description
('video', info_dict
,
3048 self
.prepare_filename(info_dict
, 'description')) is None:
3051 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
3052 if sub_files
is None:
3054 files_to_move
.update(dict(sub_files
))
3056 thumb_files
= self
._write
_thumbnails
(
3057 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
3058 if thumb_files
is None:
3060 files_to_move
.update(dict(thumb_files
))
3062 infofn
= self
.prepare_filename(info_dict
, 'infojson')
3063 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
3064 if _infojson_written
:
3065 info_dict
['infojson_filename'] = infofn
3066 # For backward compatibility, even though it was a private field
3067 info_dict
['__infojson_filename'] = infofn
3068 elif _infojson_written
is None:
3071 # Note: Annotations are deprecated
3073 if self
.params
.get('writeannotations', False):
3074 annofn
= self
.prepare_filename(info_dict
, 'annotation')
3076 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
3078 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
3079 self
.to_screen('[info] Video annotations are already present')
3080 elif not info_dict
.get('annotations'):
3081 self
.report_warning('There are no annotations to write.')
3084 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
3085 with open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
3086 annofile
.write(info_dict
['annotations'])
3087 except (KeyError, TypeError):
3088 self
.report_warning('There are no annotations to write.')
3090 self
.report_error('Cannot write annotations file: ' + annofn
)
3093 # Write internet shortcut files
3094 def _write_link_file(link_type
):
3095 url
= try_get(info_dict
['webpage_url'], iri_to_uri
)
3097 self
.report_warning(
3098 f
'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
3100 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
3101 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
3103 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
3104 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
3107 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3108 with open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
3109 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
3110 template_vars
= {'url': url}
3111 if link_type
== 'desktop':
3112 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
3113 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
3115 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
3120 'url': self
.params
.get('writeurllink'),
3121 'webloc': self
.params
.get('writewebloclink'),
3122 'desktop': self
.params
.get('writedesktoplink'),
3124 if self
.params
.get('writelink'):
3125 link_type
= ('webloc' if sys
.platform
== 'darwin'
3126 else 'desktop' if sys
.platform
.startswith('linux')
3128 write_links
[link_type
] = True
3130 if any(should_write
and not _write_link_file(link_type
)
3131 for link_type
, should_write
in write_links
.items()):
3134 new_info
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
3135 replace_info_dict(new_info
)
3137 if self
.params
.get('skip_download'):
3138 info_dict
['filepath'] = temp_filename
3139 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3140 info_dict
['__files_to_move'] = files_to_move
3141 replace_info_dict(self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
))
3142 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3145 info_dict
.setdefault('__postprocessors', [])
3148 def existing_video_file(*filepaths
):
3149 ext
= info_dict
.get('ext')
3150 converted
= lambda file: replace_extension(file, self
.params
.get('final_ext') or ext
, ext
)
3151 file = self
.existing_file(itertools
.chain(*zip(map(converted
, filepaths
), filepaths
)),
3152 default_overwrite
=False)
3154 info_dict
['ext'] = os
.path
.splitext(file)[1][1:]
3157 fd
, success
= None, True
3158 if info_dict
.get('protocol') or info_dict
.get('url'):
3159 fd
= get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-')
3160 if fd
is not FFmpegFD
and 'no-direct-merge' not in self
.params
['compat_opts'] and (
3161 info_dict
.get('section_start') or info_dict
.get('section_end')):
3162 msg
= ('This format cannot be partially downloaded' if FFmpegFD
.available()
3163 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3164 self
.report_error(f
'{msg}. Aborting')
3167 if info_dict
.get('requested_formats') is not None:
3168 requested_formats
= info_dict
['requested_formats']
3169 old_ext
= info_dict
['ext']
3170 if self
.params
.get('merge_output_format') is None:
3171 if (info_dict
['ext'] == 'webm'
3172 and info_dict
.get('thumbnails')
3173 # check with type instead of pp_key, __name__, or isinstance
3174 # since we dont want any custom PPs to trigger this
3175 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])): # noqa: E721
3176 info_dict
['ext'] = 'mkv'
3177 self
.report_warning(
3178 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3179 new_ext
= info_dict
['ext']
3181 def correct_ext(filename
, ext
=new_ext
):
3184 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
3186 os
.path
.splitext(filename
)[0]
3187 if filename_real_ext
in (old_ext
, new_ext
)
3189 return f
'{filename_wo_ext}.{ext}'
3191 # Ensure filename always has a correct extension for successful merge
3192 full_filename
= correct_ext(full_filename
)
3193 temp_filename
= correct_ext(temp_filename
)
3194 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3195 info_dict
['__real_download'] = False
3197 merger
= FFmpegMergerPP(self
)
3199 if dl_filename
is not None:
3200 self
.report_file_already_downloaded(dl_filename
)
3202 for f
in requested_formats
if fd
!= FFmpegFD
else []:
3203 f
['filepath'] = fname
= prepend_extension(
3204 correct_ext(temp_filename
, info_dict
['ext']),
3205 'f%s' % f
['format_id'], info_dict
['ext'])
3206 downloaded
.append(fname
)
3207 info_dict
['url'] = '\n'.join(f
['url'] for f
in requested_formats
)
3208 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3209 info_dict
['__real_download'] = real_download
3211 if self
.params
.get('allow_unplayable_formats'):
3212 self
.report_warning(
3213 'You have requested merging of multiple formats '
3214 'while also allowing unplayable formats to be downloaded. '
3215 'The formats won\'t be merged to prevent data corruption.')
3216 elif not merger
.available
:
3217 msg
= 'You have requested merging of multiple formats but ffmpeg is not installed'
3218 if not self
.params
.get('ignoreerrors'):
3219 self
.report_error(f
'{msg}. Aborting due to --abort-on-error')
3221 self
.report_warning(f
'{msg}. The formats won\'t be merged')
3223 if temp_filename
== '-':
3224 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
, self
.params
)
3225 else 'but the formats are incompatible for simultaneous download' if merger
.available
3226 else 'but ffmpeg is not installed')
3227 self
.report_warning(
3228 f
'You have requested downloading multiple formats to stdout {reason}. '
3229 'The formats will be streamed one after the other')
3230 fname
= temp_filename
3231 for f
in requested_formats
:
3232 new_info
= dict(info_dict
)
3233 del new_info
['requested_formats']
3235 if temp_filename
!= '-':
3236 fname
= prepend_extension(
3237 correct_ext(temp_filename
, new_info
['ext']),
3238 'f%s' % f
['format_id'], new_info
['ext'])
3239 if not self
._ensure
_dir
_exists
(fname
):
3241 f
['filepath'] = fname
3242 downloaded
.append(fname
)
3243 partial_success
, real_download
= self
.dl(fname
, new_info
)
3244 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
3245 success
= success
and partial_success
3247 if downloaded
and merger
.available
and not self
.params
.get('allow_unplayable_formats'):
3248 info_dict
['__postprocessors'].append(merger
)
3249 info_dict
['__files_to_merge'] = downloaded
3250 # Even if there were no downloads, it is being merged only now
3251 info_dict
['__real_download'] = True
3253 for file in downloaded
:
3254 files_to_move
[file] = None
3256 # Just a single file
3257 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3258 if dl_filename
is None or dl_filename
== temp_filename
:
3259 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3260 # So we should try to resume the download
3261 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3262 info_dict
['__real_download'] = real_download
3264 self
.report_file_already_downloaded(dl_filename
)
3266 dl_filename
= dl_filename
or temp_filename
3267 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3269 except network_exceptions
as err
:
3270 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
3272 except OSError as err
:
3273 raise UnavailableVideoError(err
)
3274 except (ContentTooShortError
, ) as err
:
3275 self
.report_error(f
'content too short (expected {err.expected} bytes and served {err.downloaded})')
3278 self
._raise
_pending
_errors
(info_dict
)
3279 if success
and full_filename
!= '-':
3283 fixup_policy
= self
.params
.get('fixup')
3284 vid
= info_dict
['id']
3286 if fixup_policy
in ('ignore', 'never'):
3288 elif fixup_policy
== 'warn':
3290 elif fixup_policy
!= 'force':
3291 assert fixup_policy
in ('detect_or_warn', None)
3292 if not info_dict
.get('__real_download'):
3295 def ffmpeg_fixup(cndn
, msg
, cls
):
3296 if not (do_fixup
and cndn
):
3298 elif do_fixup
== 'warn':
3299 self
.report_warning(f
'{vid}: {msg}')
3303 info_dict
['__postprocessors'].append(pp
)
3305 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
3307 stretched_ratio
= info_dict
.get('stretched_ratio')
3308 ffmpeg_fixup(stretched_ratio
not in (1, None),
3309 f
'Non-uniform pixel ratio {stretched_ratio}',
3310 FFmpegFixupStretchedPP
)
3312 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
3313 downloader
= downloader
.FD_NAME
if downloader
else None
3315 ext
= info_dict
.get('ext')
3316 postprocessed_by_ffmpeg
= info_dict
.get('requested_formats') or any((
3317 isinstance(pp
, FFmpegVideoConvertorPP
)
3318 and resolve_recode_mapping(ext
, pp
.mapping
)[0] not in (ext
, None)
3319 ) for pp
in self
._pps
['post_process'])
3321 if not postprocessed_by_ffmpeg
:
3322 ffmpeg_fixup(ext
== 'm4a' and info_dict
.get('container') == 'm4a_dash',
3323 'writing DASH m4a. Only some players support this container',
3325 ffmpeg_fixup(downloader
== 'hlsnative' and not self
.params
.get('hls_use_mpegts')
3326 or info_dict
.get('is_live') and self
.params
.get('hls_use_mpegts') is None,
3327 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3329 ffmpeg_fixup(info_dict
.get('is_live') and downloader
== 'dashsegments',
3330 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP
)
3332 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3333 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP
)
3337 replace_info_dict(self
.post_process(dl_filename
, info_dict
, files_to_move
))
3338 except PostProcessingError
as err
:
3339 self
.report_error('Postprocessing: %s' % str(err
))
3342 for ph
in self
._post
_hooks
:
3343 ph(info_dict
['filepath'])
3344 except Exception as err
:
3345 self
.report_error('post hooks: %s' % str(err
))
3347 info_dict
['__write_download_archive'] = True
3349 assert info_dict
is original_infodict
# Make sure the info_dict was modified in-place
3350 if self
.params
.get('force_write_download_archive'):
3351 info_dict
['__write_download_archive'] = True
3352 check_max_downloads()
3354 def __download_wrapper(self
, func
):
3355 @functools.wraps(func
)
3356 def wrapper(*args
, **kwargs
):
3358 res
= func(*args
, **kwargs
)
3359 except UnavailableVideoError
as e
:
3360 self
.report_error(e
)
3361 except DownloadCancelled
as e
:
3362 self
.to_screen(f
'[info] {e}')
3363 if not self
.params
.get('break_per_url'):
3365 self
._num
_downloads
= 0
3367 if self
.params
.get('dump_single_json', False):
3368 self
.post_extract(res
)
3369 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3372 def download(self
, url_list
):
3373 """Download a given list of URLs."""
3374 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3375 outtmpl
= self
.params
['outtmpl']['default']
3376 if (len(url_list
) > 1
3378 and '%' not in outtmpl
3379 and self
.params
.get('max_downloads') != 1):
3380 raise SameFileError(outtmpl
)
3382 for url
in url_list
:
3383 self
.__download
_wrapper
(self
.extract_info
)(
3384 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3386 return self
._download
_retcode
3388 def download_with_info_file(self
, info_filename
):
3389 with contextlib
.closing(fileinput
.FileInput(
3390 [info_filename
], mode
='r',
3391 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3392 # FileInput doesn't have a read method, we can't call json.load
3393 infos
= [self
.sanitize_info(info
, self
.params
.get('clean_infojson', True))
3394 for info
in variadic(json
.loads('\n'.join(f
)))]
3397 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3398 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3399 if not isinstance(e
, EntryNotInPlaylist
):
3400 self
.to_stderr('\r')
3401 webpage_url
= info
.get('webpage_url')
3402 if webpage_url
is None:
3404 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3405 self
.download([webpage_url
])
3406 return self
._download
_retcode
3409 def sanitize_info(info_dict
, remove_private_keys
=False):
3410 ''' Sanitize the infodict for converting to json '''
3411 if info_dict
is None:
3413 info_dict
.setdefault('epoch', int(time
.time()))
3414 info_dict
.setdefault('_type', 'video')
3415 info_dict
.setdefault('_version', {
3416 'version': __version__
,
3417 'current_git_head': current_git_head(),
3418 'release_git_head': RELEASE_GIT_HEAD
,
3419 'repository': REPOSITORY
,
3422 if remove_private_keys
:
3423 reject
= lambda k
, v
: v
is None or k
.startswith('__') or k
in {
3424 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3425 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
3426 '_format_sort_fields',
3429 reject
= lambda k
, v
: False
3432 if isinstance(obj
, dict):
3433 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3434 elif isinstance(obj
, (list, tuple, set, LazyList
)):
3435 return list(map(filter_fn
, obj
))
3436 elif obj
is None or isinstance(obj
, (str, int, float, bool)):
3441 return filter_fn(info_dict
)
3444 def filter_requested_info(info_dict
, actually_filter
=True):
3445 ''' Alias of sanitize_info for backward compatibility '''
3446 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3448 def _delete_downloaded_files(self
, *files_to_delete
, info
={}, msg
=None):
3449 for filename
in set(filter(None, files_to_delete
)):
3451 self
.to_screen(msg
% filename
)
3455 self
.report_warning(f
'Unable to delete file {filename}')
3456 if filename
in info
.get('__files_to_move', []): # NB: Delete even if None
3457 del info
['__files_to_move'][filename
]
3460 def post_extract(info_dict
):
3461 def actual_post_extract(info_dict
):
3462 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3463 for video_dict
in info_dict
.get('entries', {}):
3464 actual_post_extract(video_dict
or {})
3467 post_extractor
= info_dict
.pop('__post_extractor', None) or (lambda: {})
3468 info_dict
.update(post_extractor())
3470 actual_post_extract(info_dict
or {})
3472 def run_pp(self
, pp
, infodict
):
3473 files_to_delete
= []
3474 if '__files_to_move' not in infodict
:
3475 infodict
['__files_to_move'] = {}
3477 files_to_delete
, infodict
= pp
.run(infodict
)
3478 except PostProcessingError
as e
:
3479 # Must be True and not 'only_download'
3480 if self
.params
.get('ignoreerrors') is True:
3481 self
.report_error(e
)
3485 if not files_to_delete
:
3487 if self
.params
.get('keepvideo', False):
3488 for f
in files_to_delete
:
3489 infodict
['__files_to_move'].setdefault(f
, '')
3491 self
._delete
_downloaded
_files
(
3492 *files_to_delete
, info
=infodict
, msg
='Deleting original file %s (pass -k to keep)')
3495 def run_all_pps(self
, key
, info
, *, additional_pps
=None):
3497 self
._forceprint
(key
, info
)
3498 for pp
in (additional_pps
or []) + self
._pps
[key
]:
3499 info
= self
.run_pp(pp
, info
)
3502 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3503 info
= dict(ie_info
)
3504 info
['__files_to_move'] = files_to_move
or {}
3506 info
= self
.run_all_pps(key
, info
)
3507 except PostProcessingError
as err
:
3508 msg
= f
'Preprocessing: {err}'
3509 info
.setdefault('__pending_error', msg
)
3510 self
.report_error(msg
, is_error
=False)
3511 return info
, info
.pop('__files_to_move', None)
3513 def post_process(self
, filename
, info
, files_to_move
=None):
3514 """Run all the postprocessors on the given file."""
3515 info
['filepath'] = filename
3516 info
['__files_to_move'] = files_to_move
or {}
3517 info
= self
.run_all_pps('post_process', info
, additional_pps
=info
.get('__postprocessors'))
3518 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3519 del info
['__files_to_move']
3520 return self
.run_all_pps('after_move', info
)
3522 def _make_archive_id(self
, info_dict
):
3523 video_id
= info_dict
.get('id')
3526 # Future-proof against any change in case
3527 # and backwards compatibility with prior versions
3528 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3529 if extractor
is None:
3530 url
= str_or_none(info_dict
.get('url'))
3533 # Try to find matching extractor for the URL and take its ie_key
3534 for ie_key
, ie
in self
._ies
.items():
3535 if ie
.suitable(url
):
3540 return make_archive_id(extractor
, video_id
)
3542 def in_download_archive(self
, info_dict
):
3543 if not self
.archive
:
3546 vid_ids
= [self
._make
_archive
_id
(info_dict
)]
3547 vid_ids
.extend(info_dict
.get('_old_archive_ids') or [])
3548 return any(id_
in self
.archive
for id_
in vid_ids
)
3550 def record_download_archive(self
, info_dict
):
3551 fn
= self
.params
.get('download_archive')
3554 vid_id
= self
._make
_archive
_id
(info_dict
)
3557 self
.write_debug(f
'Adding to archive: {vid_id}')
3558 if is_path_like(fn
):
3559 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3560 archive_file
.write(vid_id
+ '\n')
3561 self
.archive
.add(vid_id
)
3564 def format_resolution(format
, default
='unknown'):
3565 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3567 if format
.get('resolution') is not None:
3568 return format
['resolution']
3569 if format
.get('width') and format
.get('height'):
3570 return '%dx%d' % (format
['width'], format
['height'])
3571 elif format
.get('height'):
3572 return '%sp' % format
['height']
3573 elif format
.get('width'):
3574 return '%dx?' % format
['width']
3577 def _list_format_headers(self
, *headers
):
3578 if self
.params
.get('listformats_table', True) is not False:
3579 return [self
._format
_out
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3582 def _format_note(self
, fdict
):
3584 if fdict
.get('ext') in ['f4f', 'f4m']:
3585 res
+= '(unsupported)'
3586 if fdict
.get('language'):
3589 res
+= '[%s]' % fdict
['language']
3590 if fdict
.get('format_note') is not None:
3593 res
+= fdict
['format_note']
3594 if fdict
.get('tbr') is not None:
3597 res
+= '%4dk' % fdict
['tbr']
3598 if fdict
.get('container') is not None:
3601 res
+= '%s container' % fdict
['container']
3602 if (fdict
.get('vcodec') is not None
3603 and fdict
.get('vcodec') != 'none'):
3606 res
+= fdict
['vcodec']
3607 if fdict
.get('vbr') is not None:
3609 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3611 if fdict
.get('vbr') is not None:
3612 res
+= '%4dk' % fdict
['vbr']
3613 if fdict
.get('fps') is not None:
3616 res
+= '%sfps' % fdict
['fps']
3617 if fdict
.get('acodec') is not None:
3620 if fdict
['acodec'] == 'none':
3623 res
+= '%-5s' % fdict
['acodec']
3624 elif fdict
.get('abr') is not None:
3628 if fdict
.get('abr') is not None:
3629 res
+= '@%3dk' % fdict
['abr']
3630 if fdict
.get('asr') is not None:
3631 res
+= ' (%5dHz)' % fdict
['asr']
3632 if fdict
.get('filesize') is not None:
3635 res
+= format_bytes(fdict
['filesize'])
3636 elif fdict
.get('filesize_approx') is not None:
3639 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3642 def _get_formats(self
, info_dict
):
3643 if info_dict
.get('formats') is None:
3644 if info_dict
.get('url') and info_dict
.get('_type', 'video') == 'video':
3647 return info_dict
['formats']
3649 def render_formats_table(self
, info_dict
):
3650 formats
= self
._get
_formats
(info_dict
)
3653 if not self
.params
.get('listformats_table', True) is not False:
3656 format_field(f
, 'format_id'),
3657 format_field(f
, 'ext'),
3658 self
.format_resolution(f
),
3659 self
._format
_note
(f
)
3660 ] for f
in formats
if (f
.get('preference') or 0) >= -1000]
3661 return render_table(['format code', 'extension', 'resolution', 'note'], table
, extra_gap
=1)
3663 def simplified_codec(f
, field
):
3664 assert field
in ('acodec', 'vcodec')
3665 codec
= f
.get(field
, 'unknown')
3668 elif codec
!= 'none':
3669 return '.'.join(codec
.split('.')[:4])
3671 if field
== 'vcodec' and f
.get('acodec') == 'none':
3673 elif field
== 'acodec' and f
.get('vcodec') == 'none':
3675 return self
._format
_out
('audio only' if field
== 'vcodec' else 'video only',
3676 self
.Styles
.SUPPRESS
)
3678 delim
= self
._format
_out
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3681 self
._format
_out
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3682 format_field(f
, 'ext'),
3683 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3684 format_field(f
, 'fps', '\t%d', func
=round),
3685 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3686 format_field(f
, 'audio_channels', '\t%s'),
3688 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
) + format_field(f
, 'filesize_approx', '~\t%s', func
=format_bytes
),
3689 format_field(f
, 'tbr', '\t%dk', func
=round),
3690 shorten_protocol_name(f
.get('protocol', '')),
3692 simplified_codec(f
, 'vcodec'),
3693 format_field(f
, 'vbr', '\t%dk', func
=round),
3694 simplified_codec(f
, 'acodec'),
3695 format_field(f
, 'abr', '\t%dk', func
=round),
3696 format_field(f
, 'asr', '\t%s', func
=format_decimal_suffix
),
3698 self
._format
_out
('UNSUPPORTED', 'light red') if f
.get('ext') in ('f4f', 'f4m') else None,
3699 self
._format
_out
('DRM', 'light red') if f
.get('has_drm') else None,
3700 format_field(f
, 'language', '[%s]'),
3701 join_nonempty(format_field(f
, 'format_note'),
3702 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3705 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3706 header_line
= self
._list
_format
_headers
(
3707 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3708 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3710 return render_table(
3711 header_line
, table
, hide_empty
=True,
3712 delim
=self
._format
_out
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True))
3714 def render_thumbnails_table(self
, info_dict
):
3715 thumbnails
= list(info_dict
.get('thumbnails') or [])
3718 return render_table(
3719 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3720 [[t
.get('id'), t
.get('width') or 'unknown', t
.get('height') or 'unknown', t
['url']] for t
in thumbnails
])
3722 def render_subtitles_table(self
, video_id
, subtitles
):
3723 def _row(lang
, formats
):
3724 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3725 if len(set(names
)) == 1:
3726 names
= [] if names
[0] == 'unknown' else names
[:1]
3727 return [lang
, ', '.join(names
), ', '.join(exts
)]
3731 return render_table(
3732 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3733 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3736 def __list_table(self
, video_id
, name
, func
, *args
):
3739 self
.to_screen(f
'{video_id} has no {name}')
3741 self
.to_screen(f
'[info] Available {name} for {video_id}:')
3742 self
.to_stdout(table
)
3744 def list_formats(self
, info_dict
):
3745 self
.__list
_table
(info_dict
['id'], 'formats', self
.render_formats_table
, info_dict
)
3747 def list_thumbnails(self
, info_dict
):
3748 self
.__list
_table
(info_dict
['id'], 'thumbnails', self
.render_thumbnails_table
, info_dict
)
3750 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3751 self
.__list
_table
(video_id
, name
, self
.render_subtitles_table
, video_id
, subtitles
)
3753 def urlopen(self
, req
):
3754 """ Start an HTTP download """
3755 if isinstance(req
, str):
3756 req
= sanitized_Request(req
)
3757 return self
._opener
.open(req
, timeout
=self
._socket
_timeout
)
3759 def print_debug_header(self
):
3760 if not self
.params
.get('verbose'):
3763 from . import _IN_CLI
# Must be delayed import
3765 # These imports can be slow. So import them only as needed
3766 from .extractor
.extractors
import _LAZY_LOADER
3767 from .extractor
.extractors
import (
3768 _PLUGIN_CLASSES
as plugin_ies
,
3769 _PLUGIN_OVERRIDES
as plugin_ie_overrides
3772 def get_encoding(stream
):
3773 ret
= str(getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__))
3774 if not supports_terminal_sequences(stream
):
3775 from .utils
import WINDOWS_VT_MODE
# Must be imported locally
3776 ret
+= ' (No VT)' if WINDOWS_VT_MODE
is False else ' (No ANSI)'
3779 encoding_str
= 'Encodings: locale %s, fs %s, pref %s, %s' % (
3780 locale
.getpreferredencoding(),
3781 sys
.getfilesystemencoding(),
3782 self
.get_encoding(),
3784 f
'{key} {get_encoding(stream)}' for key
, stream
in self
._out
_files
.items_
3785 if stream
is not None and key
!= 'console')
3788 logger
= self
.params
.get('logger')
3790 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3791 write_debug(encoding_str
)
3793 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3794 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3796 source
= detect_variant()
3797 if VARIANT
not in (None, 'pip'):
3800 write_debug(join_nonempty(
3801 f
'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version',
3802 f
'{CHANNEL}@{__version__}',
3803 f
'[{RELEASE_GIT_HEAD[:9]}]' if RELEASE_GIT_HEAD
else '',
3804 '' if source
== 'unknown' else f
'({source})',
3805 '' if _IN_CLI
else 'API' if klass
== YoutubeDL
else f
'API:{self.__module__}.{klass.__qualname__}',
3809 write_debug(f
'params: {self.params}')
3811 if not _LAZY_LOADER
:
3812 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
3813 write_debug('Lazy loading extractors is forcibly disabled')
3815 write_debug('Lazy loading extractors is disabled')
3816 if self
.params
['compat_opts']:
3817 write_debug('Compatibility options: %s' % ', '.join(self
.params
['compat_opts']))
3819 if current_git_head():
3820 write_debug(f
'Git HEAD: {current_git_head()}')
3821 write_debug(system_identifier())
3823 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
3824 ffmpeg_features
= {key for key, val in ffmpeg_features.items() if val}
3826 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features
))
3828 exe_versions
['rtmpdump'] = rtmpdump_version()
3829 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
3830 exe_str
= ', '.join(
3831 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
3833 write_debug('exe versions: %s' % exe_str
)
3835 from .compat
.compat_utils
import get_package_info
3836 from .dependencies
import available_dependencies
3838 write_debug('Optional libraries: %s' % (', '.join(sorted({
3839 join_nonempty(*get_package_info(m
)) for m
in available_dependencies
.values()
3842 self
._setup
_opener
()
3844 for handler
in self
._opener
.handlers
:
3845 if hasattr(handler
, 'proxies'):
3846 proxy_map
.update(handler
.proxies
)
3847 write_debug(f
'Proxy map: {proxy_map}')
3849 for plugin_type
, plugins
in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}
.items():
3850 display_list
= ['%s%s' % (
3851 klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
3852 for name
, klass
in plugins
.items()]
3853 if plugin_type
== 'Extractor':
3854 display_list
.extend(f
'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
3855 for parent
, plugins
in plugin_ie_overrides
.items())
3856 if not display_list
:
3858 write_debug(f
'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
3860 plugin_dirs
= plugin_directories()
3862 write_debug(f
'Plugin directories: {plugin_dirs}')
3865 if False and self
.params
.get('call_home'):
3866 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode()
3867 write_debug('Public IP address: %s' % ipaddr
)
3868 latest_version
= self
.urlopen(
3869 'https://yt-dl.org/latest/version').read().decode()
3870 if version_tuple(latest_version
) > version_tuple(__version__
):
3871 self
.report_warning(
3872 'You are using an outdated version (newest version: %s)! '
3873 'See https://yt-dl.org/update if you need help updating.' %
3876 def _setup_opener(self
):
3877 if hasattr(self
, '_opener'):
3879 timeout_val
= self
.params
.get('socket_timeout')
3880 self
._socket
_timeout
= 20 if timeout_val
is None else float(timeout_val
)
3882 opts_cookiesfrombrowser
= self
.params
.get('cookiesfrombrowser')
3883 opts_cookiefile
= self
.params
.get('cookiefile')
3884 opts_proxy
= self
.params
.get('proxy')
3886 self
.cookiejar
= load_cookies(opts_cookiefile
, opts_cookiesfrombrowser
, self
)
3888 cookie_processor
= YoutubeDLCookieProcessor(self
.cookiejar
)
3889 if opts_proxy
is not None:
3890 if opts_proxy
== '':
3893 proxies
= {'http': opts_proxy, 'https': opts_proxy}
3895 proxies
= urllib
.request
.getproxies()
3896 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3897 if 'http' in proxies
and 'https' not in proxies
:
3898 proxies
['https'] = proxies
['http']
3899 proxy_handler
= PerRequestProxyHandler(proxies
)
3901 debuglevel
= 1 if self
.params
.get('debug_printtraffic') else 0
3902 https_handler
= make_HTTPS_handler(self
.params
, debuglevel
=debuglevel
)
3903 ydlh
= YoutubeDLHandler(self
.params
, debuglevel
=debuglevel
)
3904 redirect_handler
= YoutubeDLRedirectHandler()
3905 data_handler
= urllib
.request
.DataHandler()
3907 # When passing our own FileHandler instance, build_opener won't add the
3908 # default FileHandler and allows us to disable the file protocol, which
3909 # can be used for malicious purposes (see
3910 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3911 file_handler
= urllib
.request
.FileHandler()
3913 if not self
.params
.get('enable_file_urls'):
3914 def file_open(*args
, **kwargs
):
3915 raise urllib
.error
.URLError(
3916 'file:// URLs are explicitly disabled in yt-dlp for security reasons. '
3917 'Use --enable-file-urls to enable at your own risk.')
3918 file_handler
.file_open
= file_open
3920 opener
= urllib
.request
.build_opener(
3921 proxy_handler
, https_handler
, cookie_processor
, ydlh
, redirect_handler
, data_handler
, file_handler
)
3923 # Delete the default user-agent header, which would otherwise apply in
3924 # cases where our custom HTTP handler doesn't come into play
3925 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3926 opener
.addheaders
= []
3927 self
._opener
= opener
3929 def encode(self
, s
):
3930 if isinstance(s
, bytes):
3931 return s
# Already encoded
3934 return s
.encode(self
.get_encoding())
3935 except UnicodeEncodeError as err
:
3936 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
3939 def get_encoding(self
):
3940 encoding
= self
.params
.get('encoding')
3941 if encoding
is None:
3942 encoding
= preferredencoding()
3945 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
3946 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3947 if overwrite
is None:
3948 overwrite
= self
.params
.get('overwrites', True)
3949 if not self
.params
.get('writeinfojson'):
3952 self
.write_debug(f
'Skipping writing {label} infojson')
3954 elif not self
._ensure
_dir
_exists
(infofn
):
3956 elif not overwrite
and os
.path
.exists(infofn
):
3957 self
.to_screen(f
'[info] {label.title()} metadata is already present')
3960 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
3962 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
3965 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
3968 def _write_description(self
, label
, ie_result
, descfn
):
3969 ''' Write description and returns True = written, False = skip, None = error '''
3970 if not self
.params
.get('writedescription'):
3973 self
.write_debug(f
'Skipping writing {label} description')
3975 elif not self
._ensure
_dir
_exists
(descfn
):
3977 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
3978 self
.to_screen(f
'[info] {label.title()} description is already present')
3979 elif ie_result
.get('description') is None:
3980 self
.to_screen(f
'[info] There\'s no {label} description to write')
3984 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
3985 with open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
3986 descfile
.write(ie_result
['description'])
3988 self
.report_error(f
'Cannot write {label} description file {descfn}')
3992 def _write_subtitles(self
, info_dict
, filename
):
3993 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3995 subtitles
= info_dict
.get('requested_subtitles')
3996 if not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
3997 # subtitles download errors are already managed as troubles in relevant IE
3998 # that way it will silently go on when used with unsupporting IE
4001 self
.to_screen('[info] There\'s no subtitles for the requested languages')
4003 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
4004 if not sub_filename_base
:
4005 self
.to_screen('[info] Skipping writing video subtitles')
4008 for sub_lang
, sub_info
in subtitles
.items():
4009 sub_format
= sub_info
['ext']
4010 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
4011 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
4012 existing_sub
= self
.existing_file((sub_filename_final
, sub_filename
))
4014 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
4015 sub_info
['filepath'] = existing_sub
4016 ret
.append((existing_sub
, sub_filename_final
))
4019 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
4020 if sub_info
.get('data') is not None:
4022 # Use newline='' to prevent conversion of newline characters
4023 # See https://github.com/ytdl-org/youtube-dl/issues/10268
4024 with open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
4025 subfile
.write(sub_info
['data'])
4026 sub_info
['filepath'] = sub_filename
4027 ret
.append((sub_filename
, sub_filename_final
))
4030 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
4034 sub_copy
= sub_info
.copy()
4035 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
4036 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
4037 sub_info
['filepath'] = sub_filename
4038 ret
.append((sub_filename
, sub_filename_final
))
4039 except (DownloadError
, ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
4040 msg
= f
'Unable to download video subtitles for {sub_lang!r}: {err}'
4041 if self
.params
.get('ignoreerrors') is not True: # False or 'only_download'
4042 if not self
.params
.get('ignoreerrors'):
4043 self
.report_error(msg
)
4044 raise DownloadError(msg
)
4045 self
.report_warning(msg
)
4048 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
4049 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
4050 write_all
= self
.params
.get('write_all_thumbnails', False)
4051 thumbnails
, ret
= [], []
4052 if write_all
or self
.params
.get('writethumbnail', False):
4053 thumbnails
= info_dict
.get('thumbnails') or []
4055 self
.to_screen(f
'[info] There\'s no {label} thumbnails to download')
4057 multiple
= write_all
and len(thumbnails
) > 1
4059 if thumb_filename_base
is None:
4060 thumb_filename_base
= filename
4061 if thumbnails
and not thumb_filename_base
:
4062 self
.write_debug(f
'Skipping writing {label} thumbnail')
4065 for idx
, t
in list(enumerate(thumbnails
))[::-1]:
4066 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
4067 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
4068 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
4069 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
4071 existing_thumb
= self
.existing_file((thumb_filename_final
, thumb_filename
))
4073 self
.to_screen('[info] %s is already present' % (
4074 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
4075 t
['filepath'] = existing_thumb
4076 ret
.append((existing_thumb
, thumb_filename_final
))
4078 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
4080 uf
= self
.urlopen(sanitized_Request(t
['url'], headers
=t
.get('http_headers', {})))
4081 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
4082 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
4083 shutil
.copyfileobj(uf
, thumbf
)
4084 ret
.append((thumb_filename
, thumb_filename_final
))
4085 t
['filepath'] = thumb_filename
4086 except network_exceptions
as err
:
4088 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
4089 if ret
and not write_all
: