24 from string
import ascii_letters
26 from .cache
import Cache
27 from .compat
import compat_os_name
, compat_shlex_quote
28 from .cookies
import load_cookies
29 from .downloader
import FFmpegFD
, get_suitable_downloader
, shorten_protocol_name
30 from .downloader
.rtmp
import rtmpdump_version
31 from .extractor
import gen_extractor_classes
, get_info_extractor
32 from .extractor
.openload
import PhantomJSwrapper
33 from .minicurses
import format_text
34 from .postprocessor
import _PLUGIN_CLASSES
as plugin_postprocessors
35 from .postprocessor
import (
37 FFmpegFixupDuplicateMoovPP
,
38 FFmpegFixupDurationPP
,
41 FFmpegFixupStretchedPP
,
42 FFmpegFixupTimestampPP
,
45 FFmpegVideoConvertorPP
,
46 MoveFilesAfterDownloadPP
,
49 from .postprocessor
.ffmpeg
import resolve_mapping
as resolve_recode_mapping
50 from .update
import detect_variant
75 PerRequestProxyHandler
,
82 UnavailableVideoError
,
83 YoutubeDLCookieProcessor
,
85 YoutubeDLRedirectHandler
,
100 format_decimal_suffix
,
117 register_socks_protocols
,
118 remove_terminal_sequences
,
129 supports_terminal_sequences
,
138 windows_enable_vt_mode
,
142 from .version
import RELEASE_GIT_HEAD
, __version__
144 if compat_os_name
== 'nt':
151 YoutubeDL objects are the ones responsible of downloading the
152 actual video file and writing it to disk if the user has requested
153 it, among some other tasks. In most cases there should be one per
154 program. As, given a video URL, the downloader doesn't know how to
155 extract all the needed information, task that InfoExtractors do, it
156 has to pass the URL to one of them.
158 For this, YoutubeDL objects have a method that allows
159 InfoExtractors to be registered in a given order. When it is passed
160 a URL, the YoutubeDL object handles it to the first InfoExtractor it
161 finds that reports being able to handle it. The InfoExtractor extracts
162 all the information about the video or videos the URL refers to, and
163 YoutubeDL process the extracted information, possibly using a File
164 Downloader to download the video.
166 YoutubeDL objects accept a lot of parameters. In order not to saturate
167 the object constructor with arguments, it receives a dictionary of
168 options instead. These options are available through the params
169 attribute for the InfoExtractors to use. The YoutubeDL also
170 registers itself as the downloader in charge for the InfoExtractors
171 that are added to it, so this is a "mutual registration".
175 username: Username for authentication purposes.
176 password: Password for authentication purposes.
177 videopassword: Password for accessing a video.
178 ap_mso: Adobe Pass multiple-system operator identifier.
179 ap_username: Multiple-system operator account username.
180 ap_password: Multiple-system operator account password.
181 usenetrc: Use netrc for authentication instead.
182 verbose: Print additional info to stdout.
183 quiet: Do not print messages to stdout.
184 no_warnings: Do not print out anything for warnings.
185 forceprint: A dict with keys WHEN mapped to a list of templates to
186 print to stdout. The allowed keys are video or any of the
187 items in utils.POSTPROCESS_WHEN.
188 For compatibility, a single list is also accepted
189 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
190 a list of tuples with (template, filename)
191 forcejson: Force printing info_dict as JSON.
192 dump_single_json: Force printing the info_dict of the whole playlist
193 (or video) as a single JSON line.
194 force_write_download_archive: Force writing download archive regardless
195 of 'skip_download' or 'simulate'.
196 simulate: Do not download the video files. If unset (or None),
197 simulate only if listsubtitles, listformats or list_thumbnails is used
198 format: Video format code. see "FORMAT SELECTION" for more details.
199 You can also pass a function. The function takes 'ctx' as
200 argument and returns the formats to download.
201 See "build_format_selector" for an implementation
202 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
203 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
204 extracting metadata even if the video is not actually
205 available for download (experimental)
206 format_sort: A list of fields by which to sort the video formats.
207 See "Sorting Formats" for more details.
208 format_sort_force: Force the given format_sort. see "Sorting Formats"
210 prefer_free_formats: Whether to prefer video formats with free containers
211 over non-free ones of same quality.
212 allow_multiple_video_streams: Allow multiple video streams to be merged
214 allow_multiple_audio_streams: Allow multiple audio streams to be merged
216 check_formats Whether to test if the formats are downloadable.
217 Can be True (check all), False (check none),
218 'selected' (check selected formats),
219 or None (check only if requested by extractor)
220 paths: Dictionary of output paths. The allowed keys are 'home'
221 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
222 outtmpl: Dictionary of templates for output names. Allowed keys
223 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
224 For compatibility with youtube-dl, a single string can also be used
225 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
226 restrictfilenames: Do not allow "&" and spaces in file names
227 trim_file_name: Limit length of filename (extension excluded)
228 windowsfilenames: Force the filenames to be windows compatible
229 ignoreerrors: Do not stop on download/postprocessing errors.
230 Can be 'only_download' to ignore only download errors.
231 Default is 'only_download' for CLI, but False for API
232 skip_playlist_after_errors: Number of allowed failures until the rest of
233 the playlist is skipped
234 force_generic_extractor: Force downloader to use the generic extractor
235 overwrites: Overwrite all video and metadata files if True,
236 overwrite only non-video files if None
237 and don't overwrite any file if False
238 For compatibility with youtube-dl,
239 "nooverwrites" may also be used instead
240 playlist_items: Specific indices of playlist to download.
241 playlistrandom: Download playlist items in random order.
242 lazy_playlist: Process playlist entries as they are received.
243 matchtitle: Download only matching titles.
244 rejecttitle: Reject downloads for matching titles.
245 logger: Log messages to a logging.Logger instance.
246 logtostderr: Log messages to stderr instead of stdout.
247 consoletitle: Display progress in console window's titlebar.
248 writedescription: Write the video description to a .description file
249 writeinfojson: Write the video description to a .info.json file
250 clean_infojson: Remove private fields from the infojson
251 getcomments: Extract video comments. This will not be written to disk
252 unless writeinfojson is also given
253 writeannotations: Write the video annotations to a .annotations.xml file
254 writethumbnail: Write the thumbnail image to a file
255 allow_playlist_files: Whether to write playlists' description, infojson etc
256 also to disk when using the 'write*' options
257 write_all_thumbnails: Write all thumbnail formats to files
258 writelink: Write an internet shortcut file, depending on the
259 current platform (.url/.webloc/.desktop)
260 writeurllink: Write a Windows internet shortcut file (.url)
261 writewebloclink: Write a macOS internet shortcut file (.webloc)
262 writedesktoplink: Write a Linux internet shortcut file (.desktop)
263 writesubtitles: Write the video subtitles to a file
264 writeautomaticsub: Write the automatically generated subtitles to a file
265 listsubtitles: Lists all available subtitles for the video
266 subtitlesformat: The format code for subtitles
267 subtitleslangs: List of languages of the subtitles to download (can be regex).
268 The list may contain "all" to refer to all the available
269 subtitles. The language can be prefixed with a "-" to
270 exclude it from the requested languages. Eg: ['all', '-live_chat']
271 keepvideo: Keep the video file after post-processing
272 daterange: A DateRange object, download only if the upload_date is in the range.
273 skip_download: Skip the actual download of the video file
274 cachedir: Location of the cache files in the filesystem.
275 False to disable filesystem cache.
276 noplaylist: Download single video instead of a playlist if in doubt.
277 age_limit: An integer representing the user's age in years.
278 Unsuitable videos for the given age are skipped.
279 min_views: An integer representing the minimum view count the video
280 must have in order to not be skipped.
281 Videos without view count information are always
282 downloaded. None for no limit.
283 max_views: An integer representing the maximum view count.
284 Videos that are more popular than that are not
286 Videos without view count information are always
287 downloaded. None for no limit.
288 download_archive: File name of a file where all downloads are recorded.
289 Videos already present in the file are not downloaded
291 break_on_existing: Stop the download process after attempting to download a
292 file that is in the archive.
293 break_on_reject: Stop the download process when encountering a video that
294 has been filtered out.
295 break_per_url: Whether break_on_reject and break_on_existing
296 should act on each input URL as opposed to for the entire queue
297 cookiefile: File name or text stream from where cookies should be read and dumped to
298 cookiesfrombrowser: A tuple containing the name of the browser, the profile
299 name/pathfrom where cookies are loaded, and the name of the
300 keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
301 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
302 support RFC 5746 secure renegotiation
303 nocheckcertificate: Do not verify SSL certificates
304 client_certificate: Path to client certificate file in PEM format. May include the private key
305 client_certificate_key: Path to private key file for client certificate
306 client_certificate_password: Password for client certificate private key, if encrypted.
307 If not provided and the key is encrypted, yt-dlp will ask interactively
308 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
309 (Only supported by some extractors)
310 http_headers: A dictionary of custom headers to be used for all requests
311 proxy: URL of the proxy server to use
312 geo_verification_proxy: URL of the proxy to use for IP address verification
313 on geo-restricted sites.
314 socket_timeout: Time to wait for unresponsive hosts, in seconds
315 bidi_workaround: Work around buggy terminals without bidirectional text
316 support, using fridibi
317 debug_printtraffic:Print out sent and received HTTP traffic
318 default_search: Prepend this string if an input url is not valid.
319 'auto' for elaborate guessing
320 encoding: Use this encoding instead of the system-specified.
321 extract_flat: Whether to resolve and process url_results further
322 * False: Always process (default)
323 * True: Never process
324 * 'in_playlist': Do not process inside playlist/multi_video
325 * 'discard': Always process, but don't return the result
326 from inside playlist/multi_video
327 * 'discard_in_playlist': Same as "discard", but only for
328 playlists (not multi_video)
329 wait_for_video: If given, wait for scheduled streams to become available.
330 The value should be a tuple containing the range
331 (min_secs, max_secs) to wait between retries
332 postprocessors: A list of dictionaries, each with an entry
333 * key: The name of the postprocessor. See
334 yt_dlp/postprocessor/__init__.py for a list.
335 * when: When to run the postprocessor. Allowed values are
336 the entries of utils.POSTPROCESS_WHEN
337 Assumed to be 'post_process' if not given
338 progress_hooks: A list of functions that get called on download
339 progress, with a dictionary with the entries
340 * status: One of "downloading", "error", or "finished".
341 Check this first and ignore unknown values.
342 * info_dict: The extracted info_dict
344 If status is one of "downloading", or "finished", the
345 following properties may also be present:
346 * filename: The final filename (always present)
347 * tmpfilename: The filename we're currently writing to
348 * downloaded_bytes: Bytes on disk
349 * total_bytes: Size of the whole file, None if unknown
350 * total_bytes_estimate: Guess of the eventual file size,
352 * elapsed: The number of seconds since download started.
353 * eta: The estimated time in seconds, None if unknown
354 * speed: The download speed in bytes/second, None if
356 * fragment_index: The counter of the currently
357 downloaded video fragment.
358 * fragment_count: The number of fragments (= individual
359 files that will be merged)
361 Progress hooks are guaranteed to be called at least once
362 (with status "finished") if the download is successful.
363 postprocessor_hooks: A list of functions that get called on postprocessing
364 progress, with a dictionary with the entries
365 * status: One of "started", "processing", or "finished".
366 Check this first and ignore unknown values.
367 * postprocessor: Name of the postprocessor
368 * info_dict: The extracted info_dict
370 Progress hooks are guaranteed to be called at least twice
371 (with status "started" and "finished") if the processing is successful.
372 merge_output_format: Extension to use when merging formats.
373 final_ext: Expected final extension; used to detect when the file was
374 already downloaded and converted
375 fixup: Automatically correct known faults of the file.
377 - "never": do nothing
378 - "warn": only emit a warning
379 - "detect_or_warn": check whether we can do anything
380 about it, warn otherwise (default)
381 source_address: Client-side IP address to bind to.
382 sleep_interval_requests: Number of seconds to sleep between requests
384 sleep_interval: Number of seconds to sleep before each download when
385 used alone or a lower bound of a range for randomized
386 sleep before each download (minimum possible number
387 of seconds to sleep) when used along with
389 max_sleep_interval:Upper bound of a range for randomized sleep before each
390 download (maximum possible number of seconds to sleep).
391 Must only be used along with sleep_interval.
392 Actual sleep time will be a random float from range
393 [sleep_interval; max_sleep_interval].
394 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
395 listformats: Print an overview of available video formats and exit.
396 list_thumbnails: Print a table of all thumbnails and exit.
397 match_filter: A function that gets called for every video with the signature
398 (info_dict, *, incomplete: bool) -> Optional[str]
399 For backward compatibility with youtube-dl, the signature
400 (info_dict) -> Optional[str] is also allowed.
401 - If it returns a message, the video is ignored.
402 - If it returns None, the video is downloaded.
403 - If it returns utils.NO_DEFAULT, the user is interactively
404 asked whether to download the video.
405 match_filter_func in utils.py is one example for this.
406 no_color: Do not emit color codes in output.
407 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
410 Two-letter ISO 3166-2 country code that will be used for
411 explicit geographic restriction bypassing via faking
412 X-Forwarded-For HTTP header
414 IP range in CIDR notation that will be used similarly to
416 external_downloader: A dictionary of protocol keys and the executable of the
417 external downloader to use for it. The allowed protocols
418 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
419 Set the value to 'native' to use the native downloader
420 compat_opts: Compatibility options. See "Differences in default behavior".
421 The following options do not work when used through the API:
422 filename, abort-on-error, multistreams, no-live-chat, format-sort
423 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
424 Refer __init__.py for their implementation
425 progress_template: Dictionary of templates for progress outputs.
426 Allowed keys are 'download', 'postprocess',
427 'download-title' (console title) and 'postprocess-title'.
428 The template is mapped on a dictionary with keys 'progress' and 'info'
429 retry_sleep_functions: Dictionary of functions that takes the number of attempts
430 as argument and returns the time to sleep in seconds.
431 Allowed keys are 'http', 'fragment', 'file_access'
432 download_ranges: A callback function that gets called for every video with
433 the signature (info_dict, ydl) -> Iterable[Section].
434 Only the returned sections will be downloaded.
435 Each Section is a dict with the following keys:
436 * start_time: Start time of the section in seconds
437 * end_time: End time of the section in seconds
438 * title: Section title (Optional)
439 * index: Section number (Optional)
440 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
441 noprogress: Do not print the progress bar
443 The following parameters are not used by YoutubeDL itself, they are used by
444 the downloader (see yt_dlp/downloader/common.py):
445 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
446 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
447 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
448 external_downloader_args, concurrent_fragment_downloads.
450 The following options are used by the post processors:
451 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
452 to the binary or its containing directory.
453 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
454 and a list of additional command-line arguments for the
455 postprocessor/executable. The dict can also have "PP+EXE" keys
456 which are used when the given exe is used by the given PP.
457 Use 'default' as the name for arguments to passed to all PP
458 For compatibility with youtube-dl, a single list of args
461 The following options are used by the extractors:
462 extractor_retries: Number of times to retry for known errors
463 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
464 hls_split_discontinuity: Split HLS playlists to different formats at
465 discontinuities such as ad breaks (default: False)
466 extractor_args: A dictionary of arguments to be passed to the extractors.
467 See "EXTRACTOR ARGUMENTS" for details.
468 Eg: {'youtube': {'skip': ['dash', 'hls']}}
469 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
471 The following options are deprecated and may be removed in the future:
473 playliststart: - Use playlist_items
474 Playlist item to start at.
475 playlistend: - Use playlist_items
476 Playlist item to end at.
477 playlistreverse: - Use playlist_items
478 Download playlist items in reverse order.
479 forceurl: - Use forceprint
480 Force printing final URL.
481 forcetitle: - Use forceprint
482 Force printing title.
483 forceid: - Use forceprint
485 forcethumbnail: - Use forceprint
486 Force printing thumbnail URL.
487 forcedescription: - Use forceprint
488 Force printing description.
489 forcefilename: - Use forceprint
490 Force printing final filename.
491 forceduration: - Use forceprint
492 Force printing duration.
493 allsubtitles: - Use subtitleslangs = ['all']
494 Downloads all the subtitles of the video
495 (requires writesubtitles or writeautomaticsub)
496 include_ads: - Doesn't work
498 call_home: - Not implemented
499 Boolean, true iff we are allowed to contact the
500 yt-dlp servers for debugging.
501 post_hooks: - Register a custom postprocessor
502 A list of functions that get called as the final step
503 for each video file, after all postprocessors have been
504 called. The filename will be passed as the only argument.
505 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
506 Use the native HLS downloader instead of ffmpeg/avconv
507 if True, otherwise use ffmpeg/avconv if False, otherwise
508 use downloader suggested by extractor if None.
509 prefer_ffmpeg: - avconv support is deprecated
510 If False, use avconv instead of ffmpeg if both are available,
511 otherwise prefer ffmpeg.
512 youtube_include_dash_manifest: - Use extractor_args
513 If True (default), DASH manifests and related
514 data will be downloaded and processed by extractor.
515 You can reduce network I/O by disabling it if you don't
516 care about DASH. (only for youtube)
517 youtube_include_hls_manifest: - Use extractor_args
518 If True (default), HLS manifests and related
519 data will be downloaded and processed by extractor.
520 You can reduce network I/O by disabling it if you don't
521 care about HLS. (only for youtube)
525 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
526 'timestamp', 'release_timestamp',
527 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
528 'average_rating', 'comment_count', 'age_limit',
529 'start_time', 'end_time',
530 'chapter_number', 'season_number', 'episode_number',
531 'track_number', 'disc_number', 'release_year',
535 # NB: Keep in sync with the docstring of extractor/common.py
536 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
537 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
538 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
539 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
540 'preference', 'language', 'language_preference', 'quality', 'source_preference',
541 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
542 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
544 _format_selection_exts
= {
545 'audio': {'m4a', 'mp3', 'ogg', 'aac'}
,
546 'video': {'mp4', 'flv', 'webm', '3gp'}
,
547 'storyboards': {'mhtml'}
,
550 def __init__(self
, params
=None, auto_init
=True):
551 """Create a FileDownloader object with the given options.
552 @param auto_init Whether to load the default extractors and print header (if verbose).
553 Set to 'no_verbose_header' to not print the header
559 self
._ies
_instances
= {}
560 self
._pps
= {k: [] for k in POSTPROCESS_WHEN}
561 self
._printed
_messages
= set()
562 self
._first
_webpage
_request
= True
563 self
._post
_hooks
= []
564 self
._progress
_hooks
= []
565 self
._postprocessor
_hooks
= []
566 self
._download
_retcode
= 0
567 self
._num
_downloads
= 0
569 self
._playlist
_level
= 0
570 self
._playlist
_urls
= set()
571 self
.cache
= Cache(self
)
573 windows_enable_vt_mode()
574 stdout
= sys
.stderr
if self
.params
.get('logtostderr') else sys
.stdout
575 self
._out
_files
= Namespace(
578 screen
=sys
.stderr
if self
.params
.get('quiet') else stdout
,
579 console
=None if compat_os_name
== 'nt' else next(
580 filter(supports_terminal_sequences
, (sys
.stderr
, sys
.stdout
)), None)
582 self
._allow
_colors
= Namespace(**{
583 type_
: not self
.params
.get('no_color') and supports_terminal_sequences(stream
)
584 for type_
, stream
in self
._out
_files
.items_
if type_
!= 'console'
587 # The code is left like this to be reused for future deprecations
588 MIN_SUPPORTED
, MIN_RECOMMENDED
= (3, 7), (3, 7)
589 current_version
= sys
.version_info
[:2]
590 if current_version
< MIN_RECOMMENDED
:
591 msg
= ('Support for Python version %d.%d has been deprecated. '
592 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
593 '\n You will no longer receive updates on this version')
594 if current_version
< MIN_SUPPORTED
:
595 msg
= 'Python version %d.%d is no longer supported'
596 self
.deprecation_warning(
597 f
'{msg}! Please update to Python %d.%d or above' % (*current_version
, *MIN_RECOMMENDED
))
599 if self
.params
.get('allow_unplayable_formats'):
601 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
602 'This is a developer option intended for debugging. \n'
603 ' If you experience any issues while using this option, '
604 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
606 def check_deprecated(param
, option
, suggestion
):
607 if self
.params
.get(param
) is not None:
608 self
.report_warning(f
'{option} is deprecated. Use {suggestion} instead')
612 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
613 if self
.params
.get('geo_verification_proxy') is None:
614 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
616 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
617 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
618 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
620 for msg
in self
.params
.get('_warnings', []):
621 self
.report_warning(msg
)
622 for msg
in self
.params
.get('_deprecation_warnings', []):
623 self
.deprecation_warning(msg
)
625 self
.params
['compat_opts'] = set(self
.params
.get('compat_opts', ()))
626 if 'list-formats' in self
.params
['compat_opts']:
627 self
.params
['listformats_table'] = False
629 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
630 # nooverwrites was unnecessarily changed to overwrites
631 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
632 # This ensures compatibility with both keys
633 self
.params
['overwrites'] = not self
.params
['nooverwrites']
634 elif self
.params
.get('overwrites') is None:
635 self
.params
.pop('overwrites', None)
637 self
.params
['nooverwrites'] = not self
.params
['overwrites']
639 self
.params
.setdefault('forceprint', {})
640 self
.params
.setdefault('print_to_file', {})
642 # Compatibility with older syntax
643 if not isinstance(params
['forceprint'], dict):
644 self
.params
['forceprint'] = {'video': params['forceprint']}
646 if self
.params
.get('bidi_workaround', False):
649 master
, slave
= pty
.openpty()
650 width
= shutil
.get_terminal_size().columns
651 width_args
= [] if width
is None else ['-w', str(width
)]
652 sp_kwargs
= {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
654 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
656 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
657 self
._output
_channel
= os
.fdopen(master
, 'rb')
658 except OSError as ose
:
659 if ose
.errno
== errno
.ENOENT
:
661 'Could not find fribidi executable, ignoring --bidi-workaround. '
662 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
667 if auto_init
!= 'no_verbose_header':
668 self
.print_debug_header()
669 self
.add_default_info_extractors()
671 if (sys
.platform
!= 'win32'
672 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
673 and not self
.params
.get('restrictfilenames', False)):
674 # Unicode filesystem API will throw errors (#1474, #13027)
676 'Assuming --restrict-filenames since file system encoding '
677 'cannot encode all characters. '
678 'Set the LC_ALL environment variable to fix this.')
679 self
.params
['restrictfilenames'] = True
681 self
._parse
_outtmpl
()
683 # Creating format selector here allows us to catch syntax errors before the extraction
684 self
.format_selector
= (
685 self
.params
.get('format') if self
.params
.get('format') in (None, '-')
686 else self
.params
['format'] if callable(self
.params
['format'])
687 else self
.build_format_selector(self
.params
['format']))
689 # Set http_headers defaults according to std_headers
690 self
.params
['http_headers'] = merge_headers(std_headers
, self
.params
.get('http_headers', {}))
693 'post_hooks': self
.add_post_hook
,
694 'progress_hooks': self
.add_progress_hook
,
695 'postprocessor_hooks': self
.add_postprocessor_hook
,
697 for opt
, fn
in hooks
.items():
698 for ph
in self
.params
.get(opt
, []):
701 for pp_def_raw
in self
.params
.get('postprocessors', []):
702 pp_def
= dict(pp_def_raw
)
703 when
= pp_def
.pop('when', 'post_process')
704 self
.add_post_processor(
705 get_postprocessor(pp_def
.pop('key'))(self
, **pp_def
),
709 register_socks_protocols()
711 def preload_download_archive(fn
):
712 """Preload the archive, if any is specified"""
715 self
.write_debug(f
'Loading archive file {fn!r}')
717 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
718 for line
in archive_file
:
719 self
.archive
.add(line
.strip())
720 except OSError as ioe
:
721 if ioe
.errno
!= errno
.ENOENT
:
727 preload_download_archive(self
.params
.get('download_archive'))
729 def warn_if_short_id(self
, argv
):
730 # short YouTube ID starting with dash?
732 i
for i
, a
in enumerate(argv
)
733 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
737 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
738 + ['--'] + [argv
[i
] for i
in idxs
]
741 'Long argument string detected. '
742 'Use -- to separate parameters and URLs, like this:\n%s' %
743 args_to_str(correct_argv
))
745 def add_info_extractor(self
, ie
):
746 """Add an InfoExtractor object to the end of the list."""
748 self
._ies
[ie_key
] = ie
749 if not isinstance(ie
, type):
750 self
._ies
_instances
[ie_key
] = ie
751 ie
.set_downloader(self
)
753 def _get_info_extractor_class(self
, ie_key
):
754 ie
= self
._ies
.get(ie_key
)
756 ie
= get_info_extractor(ie_key
)
757 self
.add_info_extractor(ie
)
760 def get_info_extractor(self
, ie_key
):
762 Get an instance of an IE with name ie_key, it will try to get one from
763 the _ies list, if there's no instance it will create a new one and add
764 it to the extractor list.
766 ie
= self
._ies
_instances
.get(ie_key
)
768 ie
= get_info_extractor(ie_key
)()
769 self
.add_info_extractor(ie
)
772 def add_default_info_extractors(self
):
774 Add the InfoExtractors returned by gen_extractors to the end of the list
776 for ie
in gen_extractor_classes():
777 self
.add_info_extractor(ie
)
779 def add_post_processor(self
, pp
, when
='post_process'):
780 """Add a PostProcessor object to the end of the chain."""
781 assert when
in POSTPROCESS_WHEN
, f
'Invalid when={when}'
782 self
._pps
[when
].append(pp
)
783 pp
.set_downloader(self
)
785 def add_post_hook(self
, ph
):
786 """Add the post hook"""
787 self
._post
_hooks
.append(ph
)
789 def add_progress_hook(self
, ph
):
790 """Add the download progress hook"""
791 self
._progress
_hooks
.append(ph
)
793 def add_postprocessor_hook(self
, ph
):
794 """Add the postprocessing progress hook"""
795 self
._postprocessor
_hooks
.append(ph
)
796 for pps
in self
._pps
.values():
798 pp
.add_progress_hook(ph
)
800 def _bidi_workaround(self
, message
):
801 if not hasattr(self
, '_output_channel'):
804 assert hasattr(self
, '_output_process')
805 assert isinstance(message
, str)
806 line_count
= message
.count('\n') + 1
807 self
._output
_process
.stdin
.write((message
+ '\n').encode())
808 self
._output
_process
.stdin
.flush()
809 res
= ''.join(self
._output
_channel
.readline().decode()
810 for _
in range(line_count
))
811 return res
[:-len('\n')]
813 def _write_string(self
, message
, out
=None, only_once
=False):
815 if message
in self
._printed
_messages
:
817 self
._printed
_messages
.add(message
)
818 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
820 def to_stdout(self
, message
, skip_eol
=False, quiet
=None):
821 """Print message to stdout"""
822 if quiet
is not None:
823 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
824 if skip_eol
is not False:
825 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
826 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.out
)
828 def to_screen(self
, message
, skip_eol
=False, quiet
=None):
829 """Print message to screen if not in quiet mode"""
830 if self
.params
.get('logger'):
831 self
.params
['logger'].debug(message
)
833 if (self
.params
.get('quiet') if quiet
is None else quiet
) and not self
.params
.get('verbose'):
836 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
837 self
._out
_files
.screen
)
839 def to_stderr(self
, message
, only_once
=False):
840 """Print message to stderr"""
841 assert isinstance(message
, str)
842 if self
.params
.get('logger'):
843 self
.params
['logger'].error(message
)
845 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.error
, only_once
=only_once
)
847 def _send_console_code(self
, code
):
848 if compat_os_name
== 'nt' or not self
._out
_files
.console
:
850 self
._write
_string
(code
, self
._out
_files
.console
)
852 def to_console_title(self
, message
):
853 if not self
.params
.get('consoletitle', False):
855 message
= remove_terminal_sequences(message
)
856 if compat_os_name
== 'nt':
857 if ctypes
.windll
.kernel32
.GetConsoleWindow():
858 # c_wchar_p() might not be necessary if `message` is
859 # already of type unicode()
860 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
862 self
._send
_console
_code
(f
'\033]0;{message}\007')
864 def save_console_title(self
):
865 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
867 self
._send
_console
_code
('\033[22;0t') # Save the title on stack
869 def restore_console_title(self
):
870 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
872 self
._send
_console
_code
('\033[23;0t') # Restore the title from stack
875 self
.save_console_title()
878 def __exit__(self
, *args
):
879 self
.restore_console_title()
881 if self
.params
.get('cookiefile') is not None:
882 self
.cookiejar
.save(ignore_discard
=True, ignore_expires
=True)
884 def trouble(self
, message
=None, tb
=None, is_error
=True):
885 """Determine action to take when a download problem appears.
887 Depending on if the downloader has been configured to ignore
888 download errors or not, this method may throw an exception or
889 not when errors are found, after printing the message.
891 @param tb If given, is additional traceback information
892 @param is_error Whether to raise error according to ignorerrors
894 if message
is not None:
895 self
.to_stderr(message
)
896 if self
.params
.get('verbose'):
898 if sys
.exc_info()[0]: # if .trouble has been called from an except block
900 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
901 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
902 tb
+= encode_compat_str(traceback
.format_exc())
904 tb_data
= traceback
.format_list(traceback
.extract_stack())
905 tb
= ''.join(tb_data
)
910 if not self
.params
.get('ignoreerrors'):
911 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
912 exc_info
= sys
.exc_info()[1].exc_info
914 exc_info
= sys
.exc_info()
915 raise DownloadError(message
, exc_info
)
916 self
._download
_retcode
= 1
920 EMPHASIS
='light blue',
926 SUPPRESS
='light black',
929 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
933 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
934 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', None) or 'ascii'
935 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
936 if fallback
is not None and text
!= original_text
:
938 return format_text(text
, f
) if allow_colors
else text
if fallback
is None else fallback
940 def _format_out(self
, *args
, **kwargs
):
941 return self
._format
_text
(self
._out
_files
.out
, self
._allow
_colors
.out
, *args
, **kwargs
)
943 def _format_screen(self
, *args
, **kwargs
):
944 return self
._format
_text
(self
._out
_files
.screen
, self
._allow
_colors
.screen
, *args
, **kwargs
)
946 def _format_err(self
, *args
, **kwargs
):
947 return self
._format
_text
(self
._out
_files
.error
, self
._allow
_colors
.error
, *args
, **kwargs
)
949 def report_warning(self
, message
, only_once
=False):
951 Print the message to stderr, it will be prefixed with 'WARNING:'
952 If stderr is a tty file the 'WARNING:' will be colored
954 if self
.params
.get('logger') is not None:
955 self
.params
['logger'].warning(message
)
957 if self
.params
.get('no_warnings'):
959 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
961 def deprecation_warning(self
, message
):
962 if self
.params
.get('logger') is not None:
963 self
.params
['logger'].warning(f
'DeprecationWarning: {message}')
965 self
.to_stderr(f
'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
967 def report_error(self
, message
, *args
, **kwargs
):
969 Do the same as trouble, but prefixes the message with 'ERROR:', colored
970 in red if stderr is a tty file.
972 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args
, **kwargs
)
974 def write_debug(self
, message
, only_once
=False):
975 '''Log debug message or Print message to stderr'''
976 if not self
.params
.get('verbose', False):
978 message
= f
'[debug] {message}'
979 if self
.params
.get('logger'):
980 self
.params
['logger'].debug(message
)
982 self
.to_stderr(message
, only_once
)
984 def report_file_already_downloaded(self
, file_name
):
985 """Report file has already been fully downloaded."""
987 self
.to_screen('[download] %s has already been downloaded' % file_name
)
988 except UnicodeEncodeError:
989 self
.to_screen('[download] The file has already been downloaded')
991 def report_file_delete(self
, file_name
):
992 """Report that existing file will be deleted."""
994 self
.to_screen('Deleting existing file %s' % file_name
)
995 except UnicodeEncodeError:
996 self
.to_screen('Deleting existing file')
998 def raise_no_formats(self
, info
, forced
=False, *, msg
=None):
999 has_drm
= info
.get('_has_drm')
1000 ignored
, expected
= self
.params
.get('ignore_no_formats_error'), bool(msg
)
1001 msg
= msg
or has_drm
and 'This video is DRM protected' or 'No video formats found!'
1002 if forced
or not ignored
:
1003 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
1004 expected
=has_drm
or ignored
or expected
)
1006 self
.report_warning(msg
)
1008 def parse_outtmpl(self
):
1009 self
.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1010 self
._parse
_outtmpl
()
1011 return self
.params
['outtmpl']
1013 def _parse_outtmpl(self
):
1015 if self
.params
.get('restrictfilenames'): # Remove spaces in the default template
1016 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
1018 outtmpl
= self
.params
.setdefault('outtmpl', {})
1019 if not isinstance(outtmpl
, dict):
1020 self
.params
['outtmpl'] = outtmpl
= {'default': outtmpl}
1021 outtmpl
.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None}
)
1023 def get_output_path(self
, dir_type
='', filename
=None):
1024 paths
= self
.params
.get('paths', {})
1025 assert isinstance(paths
, dict)
1026 path
= os
.path
.join(
1027 expand_path(paths
.get('home', '').strip()),
1028 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
1030 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
1033 def _outtmpl_expandpath(outtmpl
):
1034 # expand_path translates '%%' into '%' and '$$' into '$'
1035 # correspondingly that is not what we want since we need to keep
1036 # '%%' intact for template dict substitution step. Working around
1037 # with boundary-alike separator hack.
1038 sep
= ''.join([random
.choice(ascii_letters
) for _
in range(32)])
1039 outtmpl
= outtmpl
.replace('%%', f
'%{sep}%').replace('$$', f
'${sep}$')
1041 # outtmpl should be expand_path'ed before template dict substitution
1042 # because meta fields may contain env variables we don't want to
1043 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
1044 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1045 return expand_path(outtmpl
).replace(sep
, '')
1048 def escape_outtmpl(outtmpl
):
1049 ''' Escape any remaining strings like %s, %abc% etc. '''
1051 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
1052 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
1056 def validate_outtmpl(cls
, outtmpl
):
1057 ''' @return None or Exception object '''
1059 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljhqBUDS]'),
1060 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1061 cls
._outtmpl
_expandpath
(outtmpl
))
1063 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1065 except ValueError as err
:
1069 def _copy_infodict(info_dict
):
1070 info_dict
= dict(info_dict
)
1071 info_dict
.pop('__postprocessors', None)
1072 info_dict
.pop('__pending_error', None)
1075 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=False):
1076 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1077 @param sanitize Whether to sanitize the output as a filename.
1078 For backward compatibility, a function can also be passed
1081 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1083 info_dict
= self
._copy
_infodict
(info_dict
)
1084 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1085 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1086 if info_dict
.get('duration', None) is not None
1088 info_dict
['autonumber'] = int(self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
)
1089 info_dict
['video_autonumber'] = self
._num
_videos
1090 if info_dict
.get('resolution') is None:
1091 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1093 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1094 # of %(field)s to %(field)0Nd for backward compatibility
1095 field_size_compat_map
= {
1096 'playlist_index': number_of_digits(info_dict
.get('__last_playlist_index') or 0),
1097 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1098 'autonumber': self
.params
.get('autonumber_size') or 5,
1102 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1107 # Field is of the form key1.key2...
1108 # where keys (except first) can be string, int or slice
1109 FIELD_RE
= r
'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num
=r
'(?:-?\d+)')
1110 MATH_FIELD_RE
= rf
'(?:{FIELD_RE}|-?{NUMBER_RE})'
1111 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1112 INTERNAL_FORMAT_RE
= re
.compile(rf
'''(?x)
1114 (?P<fields>{FIELD_RE})
1115 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1116 (?:>(?P<strf_format>.+?))?
1118 (?P<alternate>(?<!\\),[^|&)]+)?
1119 (?:&(?P<replacement>.*?))?
1120 (?:\|(?P<default>.*?))?
1123 def _traverse_infodict(k
):
1127 return traverse_obj(info_dict
, k
, is_user_input
=True, traverse_string
=True)
1129 def get_value(mdict
):
1131 value
= _traverse_infodict(mdict
['fields'])
1134 value
= float_or_none(value
)
1135 if value
is not None:
1138 offset_key
= mdict
['maths']
1140 value
= float_or_none(value
)
1144 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1145 offset_key
).group(0)
1146 offset_key
= offset_key
[len(item
):]
1147 if operator
is None:
1148 operator
= MATH_FUNCTIONS
[item
]
1150 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1151 offset
= float_or_none(item
)
1153 offset
= float_or_none(_traverse_infodict(item
))
1155 value
= operator(value
, multiplier
* offset
)
1156 except (TypeError, ZeroDivisionError):
1159 # Datetime formatting
1160 if mdict
['strf_format']:
1161 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1165 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1167 def filename_sanitizer(key
, value
, restricted
=self
.params
.get('restrictfilenames')):
1168 return sanitize_filename(str(value
), restricted
=restricted
, is_id
=(
1169 bool(re
.search(r
'(^|[_.])id(\.|$)', key
))
1170 if 'filename-sanitization' in self
.params
['compat_opts']
1173 sanitizer
= sanitize
if callable(sanitize
) else filename_sanitizer
1174 sanitize
= bool(sanitize
)
1176 def _dumpjson_default(obj
):
1177 if isinstance(obj
, (set, LazyList
)):
1181 def create_key(outer_mobj
):
1182 if not outer_mobj
.group('has_key'):
1183 return outer_mobj
.group(0)
1184 key
= outer_mobj
.group('key')
1185 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1186 initial_field
= mobj
.group('fields') if mobj
else ''
1187 value
, replacement
, default
= None, None, na
1189 mobj
= mobj
.groupdict()
1190 default
= mobj
['default'] if mobj
['default'] is not None else default
1191 value
= get_value(mobj
)
1192 replacement
= mobj
['replacement']
1193 if value
is None and mobj
['alternate']:
1194 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['remaining'][1:])
1198 fmt
= outer_mobj
.group('format')
1199 if fmt
== 's' and value
is not None and key
in field_size_compat_map
.keys():
1200 fmt
= f
'0{field_size_compat_map[key]:d}d'
1202 value
= default
if value
is None else value
if replacement
is None else replacement
1204 flags
= outer_mobj
.group('conversion') or ''
1205 str_fmt
= f
'{fmt[:-1]}s'
1206 if fmt
[-1] == 'l': # list
1207 delim
= '\n' if '#' in flags
else ', '
1208 value
, fmt
= delim
.join(map(str, variadic(value
, allowed_types
=(str, bytes)))), str_fmt
1209 elif fmt
[-1] == 'j': # json
1210 value
, fmt
= json
.dumps(value
, default
=_dumpjson_default
, indent
=4 if '#' in flags
else None), str_fmt
1211 elif fmt
[-1] == 'h': # html
1212 value
, fmt
= escapeHTML(value
), str_fmt
1213 elif fmt
[-1] == 'q': # quoted
1214 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1215 value
, fmt
= ' '.join(map(compat_shlex_quote
, value
)), str_fmt
1216 elif fmt
[-1] == 'B': # bytes
1217 value
= f
'%{str_fmt}'.encode() % str(value
).encode()
1218 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1219 elif fmt
[-1] == 'U': # unicode normalized
1220 value
, fmt
= unicodedata
.normalize(
1221 # "+" = compatibility equivalence, "#" = NFD
1222 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1224 elif fmt
[-1] == 'D': # decimal suffix
1225 num_fmt
, fmt
= fmt
[:-1].replace('#', ''), 's'
1226 value
= format_decimal_suffix(value
, f
'%{num_fmt}f%s' if num_fmt
else '%d%s',
1227 factor
=1024 if '#' in flags
else 1000)
1228 elif fmt
[-1] == 'S': # filename sanitization
1229 value
, fmt
= filename_sanitizer(initial_field
, value
, restricted
='#' in flags
), str_fmt
1230 elif fmt
[-1] == 'c':
1232 value
= str(value
)[0]
1235 elif fmt
[-1] not in 'rs': # numeric
1236 value
= float_or_none(value
)
1238 value
, fmt
= default
, 's'
1242 # If value is an object, sanitize might convert it to a string
1243 # So we convert it to repr first
1244 value
, fmt
= repr(value
), str_fmt
1245 if fmt
[-1] in 'csr':
1246 value
= sanitizer(initial_field
, value
)
1248 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1249 TMPL_DICT
[key
] = value
1250 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1252 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1254 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1255 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1256 return self
.escape_outtmpl(outtmpl
) % info_dict
1258 def _prepare_filename(self
, info_dict
, *, outtmpl
=None, tmpl_type
=None):
1259 assert None in (outtmpl
, tmpl_type
), 'outtmpl and tmpl_type are mutually exclusive'
1261 outtmpl
= self
.params
['outtmpl'].get(tmpl_type
or 'default', self
.params
['outtmpl']['default'])
1263 outtmpl
= self
._outtmpl
_expandpath
(outtmpl
)
1264 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, True)
1268 if tmpl_type
in ('', 'temp'):
1269 final_ext
, ext
= self
.params
.get('final_ext'), info_dict
.get('ext')
1270 if final_ext
and ext
and final_ext
!= ext
and filename
.endswith(f
'.{final_ext}'):
1271 filename
= replace_extension(filename
, ext
, final_ext
)
1273 force_ext
= OUTTMPL_TYPES
[tmpl_type
]
1275 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1277 # https://github.com/blackjack4494/youtube-dlc/issues/85
1278 trim_file_name
= self
.params
.get('trim_file_name', False)
1280 no_ext
, *ext
= filename
.rsplit('.', 2)
1281 filename
= join_nonempty(no_ext
[:trim_file_name
], *ext
, delim
='.')
1284 except ValueError as err
:
1285 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1288 def prepare_filename(self
, info_dict
, dir_type
='', *, outtmpl
=None, warn
=False):
1289 """Generate the output filename"""
1291 assert not dir_type
, 'outtmpl and dir_type are mutually exclusive'
1293 filename
= self
._prepare
_filename
(info_dict
, tmpl_type
=dir_type
, outtmpl
=outtmpl
)
1294 if not filename
and dir_type
not in ('', 'temp'):
1298 if not self
.params
.get('paths'):
1300 elif filename
== '-':
1301 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1302 elif os
.path
.isabs(filename
):
1303 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1304 if filename
== '-' or not filename
:
1307 return self
.get_output_path(dir_type
, filename
)
1309 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1310 """ Returns None if the file should be downloaded """
1312 video_title
= info_dict
.get('title', info_dict
.get('id', 'video'))
1315 if 'title' in info_dict
:
1316 # This can happen when we're just evaluating the playlist
1317 title
= info_dict
['title']
1318 matchtitle
= self
.params
.get('matchtitle', False)
1320 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1321 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1322 rejecttitle
= self
.params
.get('rejecttitle', False)
1324 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1325 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1326 date
= info_dict
.get('upload_date')
1327 if date
is not None:
1328 dateRange
= self
.params
.get('daterange', DateRange())
1329 if date
not in dateRange
:
1330 return f
'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1331 view_count
= info_dict
.get('view_count')
1332 if view_count
is not None:
1333 min_views
= self
.params
.get('min_views')
1334 if min_views
is not None and view_count
< min_views
:
1335 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1336 max_views
= self
.params
.get('max_views')
1337 if max_views
is not None and view_count
> max_views
:
1338 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1339 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1340 return 'Skipping "%s" because it is age restricted' % video_title
1342 match_filter
= self
.params
.get('match_filter')
1343 if match_filter
is not None:
1345 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1347 # For backward compatibility
1348 ret
= None if incomplete
else match_filter(info_dict
)
1349 if ret
is NO_DEFAULT
:
1351 filename
= self
._format
_screen
(self
.prepare_filename(info_dict
), self
.Styles
.FILENAME
)
1352 reply
= input(self
._format
_screen
(
1353 f
'Download "{filename}"? (Y/n): ', self
.Styles
.EMPHASIS
)).lower().strip()
1354 if reply
in {'y', ''}
:
1357 return f
'Skipping {video_title}'
1358 elif ret
is not None:
1362 if self
.in_download_archive(info_dict
):
1363 reason
= '%s has already been recorded in the archive' % video_title
1364 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1366 reason
= check_filter()
1367 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1368 if reason
is not None:
1370 self
.to_screen('[download] ' + reason
)
1371 if self
.params
.get(break_opt
, False):
1376 def add_extra_info(info_dict
, extra_info
):
1377 '''Set the keys from extra_info in info dict if they are missing'''
1378 for key
, value
in extra_info
.items():
1379 info_dict
.setdefault(key
, value
)
1381 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1382 process
=True, force_generic_extractor
=False):
1384 Return a list with a dictionary for each video extracted.
1387 url -- URL to extract
1390 download -- whether to download videos during extraction
1391 ie_key -- extractor key hint
1392 extra_info -- dictionary containing the extra values to add to each result
1393 process -- whether to resolve all unresolved references (URLs, playlist items),
1394 must be True for download to work.
1395 force_generic_extractor -- force using the generic extractor
1398 if extra_info
is None:
1401 if not ie_key
and force_generic_extractor
:
1405 ies
= {ie_key: self._get_info_extractor_class(ie_key)}
1409 for ie_key
, ie
in ies
.items():
1410 if not ie
.suitable(url
):
1413 if not ie
.working():
1414 self
.report_warning('The program functionality for this site has been marked as broken, '
1415 'and will probably not work.')
1417 temp_id
= ie
.get_temp_id(url
)
1418 if temp_id
is not None and self
.in_download_archive({'id': temp_id, 'ie_key': ie_key}
):
1419 self
.to_screen(f
'[{ie_key}] {temp_id}: has already been recorded in the archive')
1420 if self
.params
.get('break_on_existing', False):
1421 raise ExistingVideoReached()
1423 return self
.__extract
_info
(url
, self
.get_info_extractor(ie_key
), download
, extra_info
, process
)
1425 self
.report_error('no suitable InfoExtractor for URL %s' % url
)
1427 def _handle_extraction_exceptions(func
):
1428 @functools.wraps(func
)
1429 def wrapper(self
, *args
, **kwargs
):
1432 return func(self
, *args
, **kwargs
)
1433 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1435 except ReExtractInfo
as e
:
1437 self
.to_screen(f
'{e}; Re-extracting data')
1439 self
.to_stderr('\r')
1440 self
.report_warning(f
'{e}; Re-extracting data')
1442 except GeoRestrictedError
as e
:
1445 msg
+= '\nThis video is available in %s.' % ', '.join(
1446 map(ISO3166Utils
.short2full
, e
.countries
))
1447 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1448 self
.report_error(msg
)
1449 except ExtractorError
as e
: # An error we somewhat expected
1450 self
.report_error(str(e
), e
.format_traceback())
1451 except Exception as e
:
1452 if self
.params
.get('ignoreerrors'):
1453 self
.report_error(str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1459 def _wait_for_video(self
, ie_result
):
1460 if (not self
.params
.get('wait_for_video')
1461 or ie_result
.get('_type', 'video') != 'video'
1462 or ie_result
.get('formats') or ie_result
.get('url')):
1465 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1470 full_msg
= f
'{msg}\n'
1471 if not self
.params
.get('noprogress'):
1472 full_msg
= msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r'
1475 self
.to_screen(full_msg
, skip_eol
=True)
1478 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1479 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1480 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1481 diff
= round(random
.uniform(min_wait
, max_wait
) if (max_wait
and min_wait
) else (max_wait
or min_wait
), 0)
1482 self
.report_warning('Release time of video is not known')
1483 elif (diff
or 0) <= 0:
1484 self
.report_warning('Video should already be available according to extracted info')
1485 diff
= min(max(diff
or 0, min_wait
or 0), max_wait
or float('inf'))
1486 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1488 wait_till
= time
.time() + diff
1491 diff
= wait_till
- time
.time()
1494 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1495 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1497 except KeyboardInterrupt:
1499 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1500 except BaseException
as e
:
1501 if not isinstance(e
, ReExtractInfo
):
1505 @_handle_extraction_exceptions
1506 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1507 ie_result
= ie
.extract(url
)
1508 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1509 self
.report_warning(f
'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1511 if isinstance(ie_result
, list):
1512 # Backwards compatibility: old IE result format
1514 '_type': 'compat_list',
1515 'entries': ie_result
,
1517 if extra_info
.get('original_url'):
1518 ie_result
.setdefault('original_url', extra_info
['original_url'])
1519 self
.add_default_extra_info(ie_result
, ie
, url
)
1521 self
._wait
_for
_video
(ie_result
)
1522 return self
.process_ie_result(ie_result
, download
, extra_info
)
1526 def add_default_extra_info(self
, ie_result
, ie
, url
):
1528 self
.add_extra_info(ie_result
, {
1530 'original_url': url
,
1532 webpage_url
= ie_result
.get('webpage_url')
1534 self
.add_extra_info(ie_result
, {
1535 'webpage_url_basename': url_basename(webpage_url
),
1536 'webpage_url_domain': get_domain(webpage_url
),
1539 self
.add_extra_info(ie_result
, {
1540 'extractor': ie
.IE_NAME
,
1541 'extractor_key': ie
.ie_key(),
1544 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1546 Take the result of the ie(may be modified) and resolve all unresolved
1547 references (URLs, playlist items).
1549 It will also download the videos if 'download'.
1550 Returns the resolved ie_result.
1552 if extra_info
is None:
1554 result_type
= ie_result
.get('_type', 'video')
1556 if result_type
in ('url', 'url_transparent'):
1557 ie_result
['url'] = sanitize_url(ie_result
['url'])
1558 if ie_result
.get('original_url'):
1559 extra_info
.setdefault('original_url', ie_result
['original_url'])
1561 extract_flat
= self
.params
.get('extract_flat', False)
1562 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1563 or extract_flat
is True):
1564 info_copy
= ie_result
.copy()
1565 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1566 if ie
and not ie_result
.get('id'):
1567 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1568 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1569 self
.add_extra_info(info_copy
, extra_info
)
1570 info_copy
, _
= self
.pre_process(info_copy
)
1571 self
.__forced
_printings
(info_copy
, self
.prepare_filename(info_copy
), incomplete
=True)
1572 self
._raise
_pending
_errors
(info_copy
)
1573 if self
.params
.get('force_write_download_archive', False):
1574 self
.record_download_archive(info_copy
)
1577 if result_type
== 'video':
1578 self
.add_extra_info(ie_result
, extra_info
)
1579 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1580 self
._raise
_pending
_errors
(ie_result
)
1581 additional_urls
= (ie_result
or {}).get('additional_urls')
1583 # TODO: Improve MetadataParserPP to allow setting a list
1584 if isinstance(additional_urls
, str):
1585 additional_urls
= [additional_urls
]
1587 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1588 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1589 ie_result
['additional_entries'] = [
1591 url
, download
, extra_info
=extra_info
,
1592 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1593 for url
in additional_urls
1596 elif result_type
== 'url':
1597 # We have to add extra_info to the results because it may be
1598 # contained in a playlist
1599 return self
.extract_info(
1600 ie_result
['url'], download
,
1601 ie_key
=ie_result
.get('ie_key'),
1602 extra_info
=extra_info
)
1603 elif result_type
== 'url_transparent':
1604 # Use the information from the embedding page
1605 info
= self
.extract_info(
1606 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1607 extra_info
=extra_info
, download
=False, process
=False)
1609 # extract_info may return None when ignoreerrors is enabled and
1610 # extraction failed with an error, don't crash and return early
1615 exempted_fields
= {'_type', 'url', 'ie_key'}
1616 if not ie_result
.get('section_end') and ie_result
.get('section_start') is None:
1617 # For video clips, the id etc of the clip extractor should be used
1618 exempted_fields |
= {'id', 'extractor', 'extractor_key'}
1620 new_result
= info
.copy()
1621 new_result
.update(filter_dict(ie_result
, lambda k
, v
: v
is not None and k
not in exempted_fields
))
1623 # Extracted info may not be a video result (i.e.
1624 # info.get('_type', 'video') != video) but rather an url or
1625 # url_transparent. In such cases outer metadata (from ie_result)
1626 # should be propagated to inner one (info). For this to happen
1627 # _type of info should be overridden with url_transparent. This
1628 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1629 if new_result
.get('_type') == 'url':
1630 new_result
['_type'] = 'url_transparent'
1632 return self
.process_ie_result(
1633 new_result
, download
=download
, extra_info
=extra_info
)
1634 elif result_type
in ('playlist', 'multi_video'):
1635 # Protect from infinite recursion due to recursively nested playlists
1636 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1637 webpage_url
= ie_result
['webpage_url']
1638 if webpage_url
in self
._playlist
_urls
:
1640 '[download] Skipping already downloaded playlist: %s'
1641 % ie_result
.get('title') or ie_result
.get('id'))
1644 self
._playlist
_level
+= 1
1645 self
._playlist
_urls
.add(webpage_url
)
1646 self
._fill
_common
_fields
(ie_result
, False)
1647 self
._sanitize
_thumbnails
(ie_result
)
1649 return self
.__process
_playlist
(ie_result
, download
)
1651 self
._playlist
_level
-= 1
1652 if not self
._playlist
_level
:
1653 self
._playlist
_urls
.clear()
1654 elif result_type
== 'compat_list':
1655 self
.report_warning(
1656 'Extractor %s returned a compat_list result. '
1657 'It needs to be updated.' % ie_result
.get('extractor'))
1660 self
.add_extra_info(r
, {
1661 'extractor': ie_result
['extractor'],
1662 'webpage_url': ie_result
['webpage_url'],
1663 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1664 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1665 'extractor_key': ie_result
['extractor_key'],
1668 ie_result
['entries'] = [
1669 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1670 for r
in ie_result
['entries']
1674 raise Exception('Invalid result type: %s' % result_type
)
1676 def _ensure_dir_exists(self
, path
):
1677 return make_dir(path
, self
.report_error
)
1680 def _playlist_infodict(ie_result
, **kwargs
):
1683 'playlist': ie_result
.get('title') or ie_result
.get('id'),
1684 'playlist_id': ie_result
.get('id'),
1685 'playlist_title': ie_result
.get('title'),
1686 'playlist_uploader': ie_result
.get('uploader'),
1687 'playlist_uploader_id': ie_result
.get('uploader_id'),
1688 'playlist_index': 0,
1692 def __process_playlist(self
, ie_result
, download
):
1693 """Process each entry in the playlist"""
1694 assert ie_result
['_type'] in ('playlist', 'multi_video')
1696 title
= ie_result
.get('title') or ie_result
.get('id') or '<Untitled>'
1697 self
.to_screen(f
'[download] Downloading {ie_result["_type"]}: {title}')
1699 all_entries
= PlaylistEntries(self
, ie_result
)
1700 entries
= orderedSet(all_entries
.get_requested_items(), lazy
=True)
1702 lazy
= self
.params
.get('lazy_playlist')
1704 resolved_entries
, n_entries
= [], 'N/A'
1705 ie_result
['requested_entries'], ie_result
['entries'] = None, None
1707 entries
= resolved_entries
= list(entries
)
1708 n_entries
= len(resolved_entries
)
1709 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1710 if not ie_result
.get('playlist_count'):
1711 # Better to do this after potentially exhausting entries
1712 ie_result
['playlist_count'] = all_entries
.get_full_count()
1714 _infojson_written
= False
1715 write_playlist_files
= self
.params
.get('allow_playlist_files', True)
1716 if write_playlist_files
and self
.params
.get('list_thumbnails'):
1717 self
.list_thumbnails(ie_result
)
1718 if write_playlist_files
and not self
.params
.get('simulate'):
1719 ie_copy
= self
._playlist
_infodict
(ie_result
, n_entries
=int_or_none(n_entries
))
1720 _infojson_written
= self
._write
_info
_json
(
1721 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1722 if _infojson_written
is None:
1724 if self
._write
_description
('playlist', ie_result
,
1725 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1727 # TODO: This should be passed to ThumbnailsConvertor if necessary
1728 self
._write
_thumbnails
('playlist', ie_copy
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1731 if self
.params
.get('playlistreverse') or self
.params
.get('playlistrandom'):
1732 self
.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once
=True)
1733 elif self
.params
.get('playlistreverse'):
1735 elif self
.params
.get('playlistrandom'):
1736 random
.shuffle(entries
)
1738 self
.to_screen(f
'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1739 f
'{format_field(ie_result, "playlist_count", " of %s")}')
1741 keep_resolved_entries
= self
.params
.get('extract_flat') != 'discard'
1742 if self
.params
.get('extract_flat') == 'discard_in_playlist':
1743 keep_resolved_entries
= ie_result
['_type'] != 'playlist'
1744 if keep_resolved_entries
:
1745 self
.write_debug('The information of all playlist entries will be held in memory')
1748 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
1749 for i
, (playlist_index
, entry
) in enumerate(entries
):
1751 resolved_entries
.append((playlist_index
, entry
))
1753 # TODO: Add auto-generated fields
1754 if not entry
or self
._match
_entry
(entry
, incomplete
=True) is not None:
1757 self
.to_screen('[download] Downloading video %s of %s' % (
1758 self
._format
_screen
(i
+ 1, self
.Styles
.ID
), self
._format
_screen
(n_entries
, self
.Styles
.EMPHASIS
)))
1760 entry
['__x_forwarded_for_ip'] = ie_result
.get('__x_forwarded_for_ip')
1761 if not lazy
and 'playlist-index' in self
.params
.get('compat_opts', []):
1762 playlist_index
= ie_result
['requested_entries'][i
]
1764 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, {
1765 'n_entries': int_or_none(n_entries
),
1766 '__last_playlist_index': max(ie_result
['requested_entries'] or (0, 0)),
1767 'playlist_count': ie_result
.get('playlist_count'),
1768 'playlist_index': playlist_index
,
1769 'playlist_autonumber': i
+ 1,
1771 'playlist_id': ie_result
.get('id'),
1772 'playlist_title': ie_result
.get('title'),
1773 'playlist_uploader': ie_result
.get('uploader'),
1774 'playlist_uploader_id': ie_result
.get('uploader_id'),
1775 'extractor': ie_result
['extractor'],
1776 'webpage_url': ie_result
['webpage_url'],
1777 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1778 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1779 'extractor_key': ie_result
['extractor_key'],
1781 if not entry_result
:
1783 if failures
>= max_failures
:
1785 f
'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1787 if keep_resolved_entries
:
1788 resolved_entries
[i
] = (playlist_index
, entry_result
)
1790 # Update with processed data
1791 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1793 # Write the updated info to json
1794 if _infojson_written
is True and self
._write
_info
_json
(
1795 'updated playlist', ie_result
,
1796 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
1799 ie_result
= self
.run_all_pps('playlist', ie_result
)
1800 self
.to_screen(f
'[download] Finished downloading playlist: {title}')
1803 @_handle_extraction_exceptions
1804 def __process_iterable_entry(self
, entry
, download
, extra_info
):
1805 return self
.process_ie_result(
1806 entry
, download
=download
, extra_info
=extra_info
)
1808 def _build_format_filter(self
, filter_spec
):
1809 " Returns a function to filter the formats according to the filter_spec "
1819 operator_rex
= re
.compile(r
'''(?x)\s*
1820 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1821 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1822 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1823 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
1824 m
= operator_rex
.fullmatch(filter_spec
)
1827 comparison_value
= int(m
.group('value'))
1829 comparison_value
= parse_filesize(m
.group('value'))
1830 if comparison_value
is None:
1831 comparison_value
= parse_filesize(m
.group('value') + 'B')
1832 if comparison_value
is None:
1834 'Invalid value %r in format specification %r' % (
1835 m
.group('value'), filter_spec
))
1836 op
= OPERATORS
[m
.group('op')]
1841 '^=': lambda attr
, value
: attr
.startswith(value
),
1842 '$=': lambda attr
, value
: attr
.endswith(value
),
1843 '*=': lambda attr
, value
: value
in attr
,
1844 '~=': lambda attr
, value
: value
.search(attr
) is not None
1846 str_operator_rex
= re
.compile(r
'''(?x)\s*
1847 (?P<key>[a-zA-Z0-9._-]+)\s*
1848 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1850 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1851 (?(quote)(?P=quote))\s*
1852 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
1853 m
= str_operator_rex
.fullmatch(filter_spec
)
1855 if m
.group('op') == '~=':
1856 comparison_value
= re
.compile(m
.group('value'))
1858 comparison_value
= re
.sub(r
'''\\([\\"'])''', r
'\1', m
.group('value'))
1859 str_op
= STR_OPERATORS
[m
.group('op')]
1860 if m
.group('negation'):
1861 op
= lambda attr
, value
: not str_op(attr
, value
)
1866 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
1869 actual_value
= f
.get(m
.group('key'))
1870 if actual_value
is None:
1871 return m
.group('none_inclusive')
1872 return op(actual_value
, comparison_value
)
1875 def _check_formats(self
, formats
):
1877 self
.to_screen('[info] Testing format %s' % f
['format_id'])
1878 path
= self
.get_output_path('temp')
1879 if not self
._ensure
_dir
_exists
(f
'{path}/'):
1881 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
1884 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
1885 except (DownloadError
, OSError, ValueError) + network_exceptions
:
1888 if os
.path
.exists(temp_file
.name
):
1890 os
.remove(temp_file
.name
)
1892 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
1896 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
1898 def _default_format_spec(self
, info_dict
, download
=True):
1901 merger
= FFmpegMergerPP(self
)
1902 return merger
.available
and merger
.can_merge()
1905 not self
.params
.get('simulate')
1909 or info_dict
.get('is_live') and not self
.params
.get('live_from_start')
1910 or self
.params
['outtmpl']['default'] == '-'))
1913 or self
.params
.get('allow_multiple_audio_streams', False)
1914 or 'format-spec' in self
.params
['compat_opts'])
1917 'best/bestvideo+bestaudio' if prefer_best
1918 else 'bestvideo*+bestaudio/best' if not compat
1919 else 'bestvideo+bestaudio/best')
1921 def build_format_selector(self
, format_spec
):
1922 def syntax_error(note
, start
):
1924 'Invalid format specification: '
1925 '{}\n\t{}\n\t{}^'.format(note
, format_spec
, ' ' * start
[1]))
1926 return SyntaxError(message
)
1928 PICKFIRST
= 'PICKFIRST'
1932 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1934 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
1935 'video': self
.params
.get('allow_multiple_video_streams', False)}
1937 check_formats
= self
.params
.get('check_formats') == 'selected'
1939 def _parse_filter(tokens
):
1941 for type, string
, start
, _
, _
in tokens
:
1942 if type == tokenize
.OP
and string
== ']':
1943 return ''.join(filter_parts
)
1945 filter_parts
.append(string
)
1947 def _remove_unused_ops(tokens
):
1948 # Remove operators that we don't use and join them with the surrounding strings
1949 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1950 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
1951 last_string
, last_start
, last_end
, last_line
= None, None, None, None
1952 for type, string
, start
, end
, line
in tokens
:
1953 if type == tokenize
.OP
and string
== '[':
1955 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1957 yield type, string
, start
, end
, line
1958 # everything inside brackets will be handled by _parse_filter
1959 for type, string
, start
, end
, line
in tokens
:
1960 yield type, string
, start
, end
, line
1961 if type == tokenize
.OP
and string
== ']':
1963 elif type == tokenize
.OP
and string
in ALLOWED_OPS
:
1965 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1967 yield type, string
, start
, end
, line
1968 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
1970 last_string
= string
1974 last_string
+= string
1976 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1978 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
1980 current_selector
= None
1981 for type, string
, start
, _
, _
in tokens
:
1982 # ENCODING is only defined in python 3.x
1983 if type == getattr(tokenize
, 'ENCODING', None):
1985 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
1986 current_selector
= FormatSelector(SINGLE
, string
, [])
1987 elif type == tokenize
.OP
:
1989 if not inside_group
:
1990 # ')' will be handled by the parentheses group
1991 tokens
.restore_last_token()
1993 elif inside_merge
and string
in ['/', ',']:
1994 tokens
.restore_last_token()
1996 elif inside_choice
and string
== ',':
1997 tokens
.restore_last_token()
2000 if not current_selector
:
2001 raise syntax_error('"," must follow a format selector', start
)
2002 selectors
.append(current_selector
)
2003 current_selector
= None
2005 if not current_selector
:
2006 raise syntax_error('"/" must follow a format selector', start
)
2007 first_choice
= current_selector
2008 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
2009 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
2011 if not current_selector
:
2012 current_selector
= FormatSelector(SINGLE
, 'best', [])
2013 format_filter
= _parse_filter(tokens
)
2014 current_selector
.filters
.append(format_filter
)
2016 if current_selector
:
2017 raise syntax_error('Unexpected "("', start
)
2018 group
= _parse_format_selection(tokens
, inside_group
=True)
2019 current_selector
= FormatSelector(GROUP
, group
, [])
2021 if not current_selector
:
2022 raise syntax_error('Unexpected "+"', start
)
2023 selector_1
= current_selector
2024 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
2026 raise syntax_error('Expected a selector', start
)
2027 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
2029 raise syntax_error(f
'Operator not recognized: "{string}"', start
)
2030 elif type == tokenize
.ENDMARKER
:
2032 if current_selector
:
2033 selectors
.append(current_selector
)
2036 def _merge(formats_pair
):
2037 format_1
, format_2
= formats_pair
2040 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
2041 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
2043 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
2044 get_no_more
= {'video': False, 'audio': False}
2045 for (i
, fmt_info
) in enumerate(formats_info
):
2046 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
2049 for aud_vid
in ['audio', 'video']:
2050 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
2051 if get_no_more
[aud_vid
]:
2054 get_no_more
[aud_vid
] = True
2056 if len(formats_info
) == 1:
2057 return formats_info
[0]
2059 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
2060 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
2062 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
2063 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
2065 output_ext
= self
.params
.get('merge_output_format')
2068 output_ext
= the_only_video
['ext']
2069 elif the_only_audio
and not video_fmts
:
2070 output_ext
= the_only_audio
['ext']
2074 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2077 'requested_formats': formats_info
,
2078 'format': '+'.join(filtered('format')),
2079 'format_id': '+'.join(filtered('format_id')),
2081 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2082 'language': '+'.join(orderedSet(filtered('language'))) or None,
2083 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2084 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2085 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2090 'width': the_only_video
.get('width'),
2091 'height': the_only_video
.get('height'),
2092 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2093 'fps': the_only_video
.get('fps'),
2094 'dynamic_range': the_only_video
.get('dynamic_range'),
2095 'vcodec': the_only_video
.get('vcodec'),
2096 'vbr': the_only_video
.get('vbr'),
2097 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2102 'acodec': the_only_audio
.get('acodec'),
2103 'abr': the_only_audio
.get('abr'),
2104 'asr': the_only_audio
.get('asr'),
2109 def _check_formats(formats
):
2110 if not check_formats
:
2113 yield from self
._check
_formats
(formats
)
2115 def _build_selector_function(selector
):
2116 if isinstance(selector
, list): # ,
2117 fs
= [_build_selector_function(s
) for s
in selector
]
2119 def selector_function(ctx
):
2122 return selector_function
2124 elif selector
.type == GROUP
: # ()
2125 selector_function
= _build_selector_function(selector
.selector
)
2127 elif selector
.type == PICKFIRST
: # /
2128 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2130 def selector_function(ctx
):
2132 picked_formats
= list(f(ctx
))
2134 return picked_formats
2137 elif selector
.type == MERGE
: # +
2138 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2140 def selector_function(ctx
):
2141 for pair
in itertools
.product(selector_1(ctx
), selector_2(ctx
)):
2144 elif selector
.type == SINGLE
: # atom
2145 format_spec
= selector
.selector
or 'best'
2147 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2148 if format_spec
== 'all':
2149 def selector_function(ctx
):
2150 yield from _check_formats(ctx
['formats'][::-1])
2151 elif format_spec
== 'mergeall':
2152 def selector_function(ctx
):
2153 formats
= list(_check_formats(
2154 f
for f
in ctx
['formats'] if f
.get('vcodec') != 'none' or f
.get('acodec') != 'none'))
2157 merged_format
= formats
[-1]
2158 for f
in formats
[-2::-1]:
2159 merged_format
= _merge((merged_format
, f
))
2163 format_fallback
, seperate_fallback
, format_reverse
, format_idx
= False, None, True, 1
2165 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2167 if mobj
is not None:
2168 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2169 format_reverse
= mobj
.group('bw')[0] == 'b'
2170 format_type
= (mobj
.group('type') or [None])[0]
2171 not_format_type
= {'v': 'a', 'a': 'v'}
.get(format_type
)
2172 format_modified
= mobj
.group('mod') is not None
2174 format_fallback
= not format_type
and not format_modified
# for b, w
2176 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2177 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2178 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2179 if format_type
# bv, ba, wv, wa
2180 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2181 if not format_modified
# b, w
2182 else lambda f
: True) # b*, w*
2183 filter_f
= lambda f
: _filter_f(f
) and (
2184 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2186 if format_spec
in self
._format
_selection
_exts
['audio']:
2187 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2188 elif format_spec
in self
._format
_selection
_exts
['video']:
2189 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2190 seperate_fallback
= lambda f
: f
.get('ext') == format_spec
and f
.get('vcodec') != 'none'
2191 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2192 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2194 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2196 def selector_function(ctx
):
2197 formats
= list(ctx
['formats'])
2198 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2200 if format_fallback
and ctx
['incomplete_formats']:
2201 # for extractors with incomplete formats (audio only (soundcloud)
2202 # or video only (imgur)) best/worst will fallback to
2203 # best/worst {video,audio}-only format
2205 elif seperate_fallback
and not ctx
['has_merged_format']:
2206 # for compatibility with youtube-dl when there is no pre-merged format
2207 matches
= list(filter(seperate_fallback
, formats
))
2208 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2210 yield matches
[format_idx
- 1]
2211 except LazyList
.IndexError:
2214 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2216 def final_selector(ctx
):
2217 ctx_copy
= dict(ctx
)
2218 for _filter
in filters
:
2219 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2220 return selector_function(ctx_copy
)
2221 return final_selector
2223 stream
= io
.BytesIO(format_spec
.encode())
2225 tokens
= list(_remove_unused_ops(tokenize
.tokenize(stream
.readline
)))
2226 except tokenize
.TokenError
:
2227 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2229 class TokenIterator
:
2230 def __init__(self
, tokens
):
2231 self
.tokens
= tokens
2238 if self
.counter
>= len(self
.tokens
):
2239 raise StopIteration()
2240 value
= self
.tokens
[self
.counter
]
2246 def restore_last_token(self
):
2249 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2250 return _build_selector_function(parsed_selector
)
2252 def _calc_headers(self
, info_dict
):
2253 res
= merge_headers(self
.params
['http_headers'], info_dict
.get('http_headers') or {})
2255 cookies
= self
._calc
_cookies
(info_dict
['url'])
2257 res
['Cookie'] = cookies
2259 if 'X-Forwarded-For' not in res
:
2260 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2261 if x_forwarded_for_ip
:
2262 res
['X-Forwarded-For'] = x_forwarded_for_ip
2266 def _calc_cookies(self
, url
):
2267 pr
= sanitized_Request(url
)
2268 self
.cookiejar
.add_cookie_header(pr
)
2269 return pr
.get_header('Cookie')
2271 def _sort_thumbnails(self
, thumbnails
):
2272 thumbnails
.sort(key
=lambda t
: (
2273 t
.get('preference') if t
.get('preference') is not None else -1,
2274 t
.get('width') if t
.get('width') is not None else -1,
2275 t
.get('height') if t
.get('height') is not None else -1,
2276 t
.get('id') if t
.get('id') is not None else '',
2279 def _sanitize_thumbnails(self
, info_dict
):
2280 thumbnails
= info_dict
.get('thumbnails')
2281 if thumbnails
is None:
2282 thumbnail
= info_dict
.get('thumbnail')
2284 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail}
]
2288 def check_thumbnails(thumbnails
):
2289 for t
in thumbnails
:
2290 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2292 self
.urlopen(HEADRequest(t
['url']))
2293 except network_exceptions
as err
:
2294 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2298 self
._sort
_thumbnails
(thumbnails
)
2299 for i
, t
in enumerate(thumbnails
):
2300 if t
.get('id') is None:
2302 if t
.get('width') and t
.get('height'):
2303 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2304 t
['url'] = sanitize_url(t
['url'])
2306 if self
.params
.get('check_formats') is True:
2307 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2309 info_dict
['thumbnails'] = thumbnails
2311 def _fill_common_fields(self
, info_dict
, is_video
=True):
2312 # TODO: move sanitization here
2314 # playlists are allowed to lack "title"
2315 title
= info_dict
.get('title', NO_DEFAULT
)
2316 if title
is NO_DEFAULT
:
2317 raise ExtractorError('Missing "title" field in extractor result',
2318 video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2319 info_dict
['fulltitle'] = title
2322 self
.write_debug('Extractor gave empty title. Creating a generic title')
2324 self
.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2325 info_dict
['title'] = f
'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2327 if info_dict
.get('duration') is not None:
2328 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2330 for ts_key
, date_key
in (
2331 ('timestamp', 'upload_date'),
2332 ('release_timestamp', 'release_date'),
2333 ('modified_timestamp', 'modified_date'),
2335 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2336 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2337 # see http://bugs.python.org/issue1646728)
2338 with contextlib
.suppress(ValueError, OverflowError, OSError):
2339 upload_date
= datetime
.datetime
.utcfromtimestamp(info_dict
[ts_key
])
2340 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2342 live_keys
= ('is_live', 'was_live')
2343 live_status
= info_dict
.get('live_status')
2344 if live_status
is None:
2345 for key
in live_keys
:
2346 if info_dict
.get(key
) is False:
2348 if info_dict
.get(key
):
2351 if all(info_dict
.get(key
) is False for key
in live_keys
):
2352 live_status
= 'not_live'
2354 info_dict
['live_status'] = live_status
2355 for key
in live_keys
:
2356 if info_dict
.get(key
) is None:
2357 info_dict
[key
] = (live_status
== key
)
2359 # Auto generate title fields corresponding to the *_number fields when missing
2360 # in order to always have clean titles. This is very common for TV series.
2361 for field
in ('chapter', 'season', 'episode'):
2362 if info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2363 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2365 def _raise_pending_errors(self
, info
):
2366 err
= info
.pop('__pending_error', None)
2368 self
.report_error(err
, tb
=False)
2370 def process_video_result(self
, info_dict
, download
=True):
2371 assert info_dict
.get('_type', 'video') == 'video'
2372 self
._num
_videos
+= 1
2374 if 'id' not in info_dict
:
2375 raise ExtractorError('Missing "id" field in extractor result', ie
=info_dict
['extractor'])
2376 elif not info_dict
.get('id'):
2377 raise ExtractorError('Extractor failed to obtain "id"', ie
=info_dict
['extractor'])
2379 def report_force_conversion(field
, field_not
, conversion
):
2380 self
.report_warning(
2381 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2382 % (field
, field_not
, conversion
))
2384 def sanitize_string_field(info
, string_field
):
2385 field
= info
.get(string_field
)
2386 if field
is None or isinstance(field
, str):
2388 report_force_conversion(string_field
, 'a string', 'string')
2389 info
[string_field
] = str(field
)
2391 def sanitize_numeric_fields(info
):
2392 for numeric_field
in self
._NUMERIC
_FIELDS
:
2393 field
= info
.get(numeric_field
)
2394 if field
is None or isinstance(field
, (int, float)):
2396 report_force_conversion(numeric_field
, 'numeric', 'int')
2397 info
[numeric_field
] = int_or_none(field
)
2399 sanitize_string_field(info_dict
, 'id')
2400 sanitize_numeric_fields(info_dict
)
2401 if info_dict
.get('section_end') and info_dict
.get('section_start') is not None:
2402 info_dict
['duration'] = round(info_dict
['section_end'] - info_dict
['section_start'], 3)
2403 if (info_dict
.get('duration') or 0) <= 0 and info_dict
.pop('duration', None):
2404 self
.report_warning('"duration" field is negative, there is an error in extractor')
2406 chapters
= info_dict
.get('chapters') or []
2407 if chapters
and chapters
[0].get('start_time'):
2408 chapters
.insert(0, {'start_time': 0}
)
2410 dummy_chapter
= {'end_time': 0, 'start_time': info_dict.get('duration')}
2411 for idx
, (prev
, current
, next_
) in enumerate(zip(
2412 (dummy_chapter
, *chapters
), chapters
, (*chapters
[1:], dummy_chapter
)), 1):
2413 if current
.get('start_time') is None:
2414 current
['start_time'] = prev
.get('end_time')
2415 if not current
.get('end_time'):
2416 current
['end_time'] = next_
.get('start_time')
2417 if not current
.get('title'):
2418 current
['title'] = f
'<Untitled Chapter {idx}>'
2420 if 'playlist' not in info_dict
:
2421 # It isn't part of a playlist
2422 info_dict
['playlist'] = None
2423 info_dict
['playlist_index'] = None
2425 self
._sanitize
_thumbnails
(info_dict
)
2427 thumbnail
= info_dict
.get('thumbnail')
2428 thumbnails
= info_dict
.get('thumbnails')
2430 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2432 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2434 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2435 info_dict
['display_id'] = info_dict
['id']
2437 self
._fill
_common
_fields
(info_dict
)
2439 for cc_kind
in ('subtitles', 'automatic_captions'):
2440 cc
= info_dict
.get(cc_kind
)
2442 for _
, subtitle
in cc
.items():
2443 for subtitle_format
in subtitle
:
2444 if subtitle_format
.get('url'):
2445 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2446 if subtitle_format
.get('ext') is None:
2447 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2449 automatic_captions
= info_dict
.get('automatic_captions')
2450 subtitles
= info_dict
.get('subtitles')
2452 info_dict
['requested_subtitles'] = self
.process_subtitles(
2453 info_dict
['id'], subtitles
, automatic_captions
)
2455 if info_dict
.get('formats') is None:
2456 # There's only one format available
2457 formats
= [info_dict
]
2459 formats
= info_dict
['formats']
2461 # or None ensures --clean-infojson removes it
2462 info_dict
['_has_drm'] = any(f
.get('has_drm') for f
in formats
) or None
2463 if not self
.params
.get('allow_unplayable_formats'):
2464 formats
= [f
for f
in formats
if not f
.get('has_drm')]
2465 if info_dict
['_has_drm'] and all(
2466 f
.get('acodec') == f
.get('vcodec') == 'none' for f
in formats
):
2467 self
.report_warning(
2468 'This video is DRM protected and only images are available for download. '
2469 'Use --list-formats to see them')
2471 get_from_start
= not info_dict
.get('is_live') or bool(self
.params
.get('live_from_start'))
2472 if not get_from_start
:
2473 info_dict
['title'] += ' ' + datetime
.datetime
.now().strftime('%Y-%m-%d %H:%M')
2474 if info_dict
.get('is_live') and formats
:
2475 formats
= [f
for f
in formats
if bool(f
.get('is_from_start')) == get_from_start
]
2476 if get_from_start
and not formats
:
2477 self
.raise_no_formats(info_dict
, msg
=(
2478 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2479 'If you want to download from the current time, use --no-live-from-start'))
2482 self
.raise_no_formats(info_dict
)
2484 def is_wellformed(f
):
2487 self
.report_warning(
2488 '"url" field is missing or empty - skipping format, '
2489 'there is an error in extractor')
2491 if isinstance(url
, bytes):
2492 sanitize_string_field(f
, 'url')
2495 # Filter out malformed formats for better extraction robustness
2496 formats
= list(filter(is_wellformed
, formats
))
2500 # We check that all the formats have the format and format_id fields
2501 for i
, format
in enumerate(formats
):
2502 sanitize_string_field(format
, 'format_id')
2503 sanitize_numeric_fields(format
)
2504 format
['url'] = sanitize_url(format
['url'])
2505 if not format
.get('format_id'):
2506 format
['format_id'] = str(i
)
2508 # Sanitize format_id from characters used in format selector expression
2509 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2510 format_id
= format
['format_id']
2511 if format_id
not in formats_dict
:
2512 formats_dict
[format_id
] = []
2513 formats_dict
[format_id
].append(format
)
2515 # Make sure all formats have unique format_id
2516 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2517 for format_id
, ambiguous_formats
in formats_dict
.items():
2518 ambigious_id
= len(ambiguous_formats
) > 1
2519 for i
, format
in enumerate(ambiguous_formats
):
2521 format
['format_id'] = '%s-%d' % (format_id
, i
)
2522 if format
.get('ext') is None:
2523 format
['ext'] = determine_ext(format
['url']).lower()
2524 # Ensure there is no conflict between id and ext in format selection
2525 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2526 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2527 format
['format_id'] = 'f%s' % format
['format_id']
2529 for i
, format
in enumerate(formats
):
2530 if format
.get('format') is None:
2531 format
['format'] = '{id} - {res}{note}'.format(
2532 id=format
['format_id'],
2533 res
=self
.format_resolution(format
),
2534 note
=format_field(format
, 'format_note', ' (%s)'),
2536 if format
.get('protocol') is None:
2537 format
['protocol'] = determine_protocol(format
)
2538 if format
.get('resolution') is None:
2539 format
['resolution'] = self
.format_resolution(format
, default
=None)
2540 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2541 format
['dynamic_range'] = 'SDR'
2542 if (info_dict
.get('duration') and format
.get('tbr')
2543 and not format
.get('filesize') and not format
.get('filesize_approx')):
2544 format
['filesize_approx'] = int(info_dict
['duration'] * format
['tbr'] * (1024 / 8))
2546 # Add HTTP headers, so that external programs can use them from the
2548 full_format_info
= info_dict
.copy()
2549 full_format_info
.update(format
)
2550 format
['http_headers'] = self
._calc
_headers
(full_format_info
)
2551 # Remove private housekeeping stuff
2552 if '__x_forwarded_for_ip' in info_dict
:
2553 del info_dict
['__x_forwarded_for_ip']
2555 if self
.params
.get('check_formats') is True:
2556 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2558 if not formats
or formats
[0] is not info_dict
:
2559 # only set the 'formats' fields if the original info_dict list them
2560 # otherwise we end up with a circular reference, the first (and unique)
2561 # element in the 'formats' field in info_dict is info_dict itself,
2562 # which can't be exported to json
2563 info_dict
['formats'] = formats
2565 info_dict
, _
= self
.pre_process(info_dict
)
2567 if self
._match
_entry
(info_dict
, incomplete
=self
._format
_fields
) is not None:
2570 self
.post_extract(info_dict
)
2571 info_dict
, _
= self
.pre_process(info_dict
, 'after_filter')
2573 # The pre-processors may have modified the formats
2574 formats
= info_dict
.get('formats', [info_dict
])
2576 list_only
= self
.params
.get('simulate') is None and (
2577 self
.params
.get('list_thumbnails') or self
.params
.get('listformats') or self
.params
.get('listsubtitles'))
2578 interactive_format_selection
= not list_only
and self
.format_selector
== '-'
2579 if self
.params
.get('list_thumbnails'):
2580 self
.list_thumbnails(info_dict
)
2581 if self
.params
.get('listsubtitles'):
2582 if 'automatic_captions' in info_dict
:
2583 self
.list_subtitles(
2584 info_dict
['id'], automatic_captions
, 'automatic captions')
2585 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2586 if self
.params
.get('listformats') or interactive_format_selection
:
2587 self
.list_formats(info_dict
)
2589 # Without this printing, -F --print-json will not work
2590 self
.__forced
_printings
(info_dict
, self
.prepare_filename(info_dict
), incomplete
=True)
2593 format_selector
= self
.format_selector
2594 if format_selector
is None:
2595 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2596 self
.write_debug('Default format spec: %s' % req_format
)
2597 format_selector
= self
.build_format_selector(req_format
)
2600 if interactive_format_selection
:
2602 self
._format
_screen
('\nEnter format selector: ', self
.Styles
.EMPHASIS
))
2604 format_selector
= self
.build_format_selector(req_format
)
2605 except SyntaxError as err
:
2606 self
.report_error(err
, tb
=False, is_error
=False)
2609 formats_to_download
= list(format_selector({
2611 'has_merged_format': any('none' not in (f
.get('acodec'), f
.get('vcodec')) for f
in formats
),
2612 'incomplete_formats': (
2613 # All formats are video-only or
2614 all(f
.get('vcodec') != 'none' and f
.get('acodec') == 'none' for f
in formats
)
2615 # all formats are audio-only
2616 or all(f
.get('vcodec') == 'none' and f
.get('acodec') != 'none' for f
in formats
)),
2618 if interactive_format_selection
and not formats_to_download
:
2619 self
.report_error('Requested format is not available', tb
=False, is_error
=False)
2623 if not formats_to_download
:
2624 if not self
.params
.get('ignore_no_formats_error'):
2625 raise ExtractorError(
2626 'Requested format is not available. Use --list-formats for a list of available formats',
2627 expected
=True, video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2628 self
.report_warning('Requested format is not available')
2629 # Process what we can, even without any available formats.
2630 formats_to_download
= [{}]
2632 requested_ranges
= self
.params
.get('download_ranges')
2633 if requested_ranges
:
2634 requested_ranges
= tuple(requested_ranges(info_dict
, self
))
2636 best_format
, downloaded_formats
= formats_to_download
[-1], []
2639 def to_screen(*msg
):
2640 self
.to_screen(f
'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2642 to_screen(f
'Downloading {len(formats_to_download)} format(s):',
2643 (f
['format_id'] for f
in formats_to_download
))
2644 if requested_ranges
:
2645 to_screen(f
'Downloading {len(requested_ranges)} time ranges:',
2646 (f
'{int(c["start_time"])}-{int(c["end_time"])}' for c
in requested_ranges
))
2647 max_downloads_reached
= False
2649 for fmt
, chapter
in itertools
.product(formats_to_download
, requested_ranges
or [{}]):
2650 new_info
= self
._copy
_infodict
(info_dict
)
2651 new_info
.update(fmt
)
2652 offset
, duration
= info_dict
.get('section_start') or 0, info_dict
.get('duration') or float('inf')
2653 if chapter
or offset
:
2655 'section_start': offset
+ chapter
.get('start_time', 0),
2656 'section_end': offset
+ min(chapter
.get('end_time', duration
), duration
),
2657 'section_title': chapter
.get('title'),
2658 'section_number': chapter
.get('index'),
2660 downloaded_formats
.append(new_info
)
2662 self
.process_info(new_info
)
2663 except MaxDownloadsReached
:
2664 max_downloads_reached
= True
2665 self
._raise
_pending
_errors
(new_info
)
2666 # Remove copied info
2667 for key
, val
in tuple(new_info
.items()):
2668 if info_dict
.get(key
) == val
:
2670 if max_downloads_reached
:
2673 write_archive
= {f.get('__write_download_archive', False) for f in downloaded_formats}
2674 assert write_archive
.issubset({True, False, 'ignore'}
)
2675 if True in write_archive
and False not in write_archive
:
2676 self
.record_download_archive(info_dict
)
2678 info_dict
['requested_downloads'] = downloaded_formats
2679 info_dict
= self
.run_all_pps('after_video', info_dict
)
2680 if max_downloads_reached
:
2681 raise MaxDownloadsReached()
2683 # We update the info dict with the selected best quality format (backwards compatibility)
2684 info_dict
.update(best_format
)
2687 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
2688 """Select the requested subtitles and their format"""
2689 available_subs
, normal_sub_langs
= {}, []
2690 if normal_subtitles
and self
.params
.get('writesubtitles'):
2691 available_subs
.update(normal_subtitles
)
2692 normal_sub_langs
= tuple(normal_subtitles
.keys())
2693 if automatic_captions
and self
.params
.get('writeautomaticsub'):
2694 for lang
, cap_info
in automatic_captions
.items():
2695 if lang
not in available_subs
:
2696 available_subs
[lang
] = cap_info
2698 if (not self
.params
.get('writesubtitles') and not
2699 self
.params
.get('writeautomaticsub') or not
2703 all_sub_langs
= tuple(available_subs
.keys())
2704 if self
.params
.get('allsubtitles', False):
2705 requested_langs
= all_sub_langs
2706 elif self
.params
.get('subtitleslangs', False):
2707 # A list is used so that the order of languages will be the same as
2708 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2709 requested_langs
= []
2710 for lang_re
in self
.params
.get('subtitleslangs'):
2711 discard
= lang_re
[0] == '-'
2713 lang_re
= lang_re
[1:]
2714 if lang_re
== 'all':
2716 requested_langs
= []
2718 requested_langs
.extend(all_sub_langs
)
2720 current_langs
= filter(re
.compile(lang_re
+ '$').match
, all_sub_langs
)
2722 for lang
in current_langs
:
2723 while lang
in requested_langs
:
2724 requested_langs
.remove(lang
)
2726 requested_langs
.extend(current_langs
)
2727 requested_langs
= orderedSet(requested_langs
)
2728 elif normal_sub_langs
:
2729 requested_langs
= ['en'] if 'en' in normal_sub_langs
else normal_sub_langs
[:1]
2731 requested_langs
= ['en'] if 'en' in all_sub_langs
else all_sub_langs
[:1]
2733 self
.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs
))
2735 formats_query
= self
.params
.get('subtitlesformat', 'best')
2736 formats_preference
= formats_query
.split('/') if formats_query
else []
2738 for lang
in requested_langs
:
2739 formats
= available_subs
.get(lang
)
2741 self
.report_warning(f
'{lang} subtitles not available for {video_id}')
2743 for ext
in formats_preference
:
2747 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
2753 self
.report_warning(
2754 'No subtitle format found matching "%s" for language %s, '
2755 'using %s' % (formats_query
, lang
, f
['ext']))
2759 def _forceprint(self
, key
, info_dict
):
2760 if info_dict
is None:
2762 info_copy
= info_dict
.copy()
2763 info_copy
['formats_table'] = self
.render_formats_table(info_dict
)
2764 info_copy
['thumbnails_table'] = self
.render_thumbnails_table(info_dict
)
2765 info_copy
['subtitles_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('subtitles'))
2766 info_copy
['automatic_captions_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('automatic_captions'))
2768 def format_tmpl(tmpl
):
2769 mobj
= re
.match(r
'\w+(=?)$', tmpl
)
2770 if mobj
and mobj
.group(1):
2771 return f
'{tmpl[:-1]} = %({tmpl[:-1]})r'
2773 return f
'%({tmpl})s'
2776 for tmpl
in self
.params
['forceprint'].get(key
, []):
2777 self
.to_stdout(self
.evaluate_outtmpl(format_tmpl(tmpl
), info_copy
))
2779 for tmpl
, file_tmpl
in self
.params
['print_to_file'].get(key
, []):
2780 filename
= self
.prepare_filename(info_dict
, outtmpl
=file_tmpl
)
2781 tmpl
= format_tmpl(tmpl
)
2782 self
.to_screen(f
'[info] Writing {tmpl!r} to: {filename}')
2783 if self
._ensure
_dir
_exists
(filename
):
2784 with open(filename
, 'a', encoding
='utf-8') as f
:
2785 f
.write(self
.evaluate_outtmpl(tmpl
, info_copy
) + '\n')
2787 def __forced_printings(self
, info_dict
, filename
, incomplete
):
2788 def print_mandatory(field
, actual_field
=None):
2789 if actual_field
is None:
2790 actual_field
= field
2791 if (self
.params
.get('force%s' % field
, False)
2792 and (not incomplete
or info_dict
.get(actual_field
) is not None)):
2793 self
.to_stdout(info_dict
[actual_field
])
2795 def print_optional(field
):
2796 if (self
.params
.get('force%s' % field
, False)
2797 and info_dict
.get(field
) is not None):
2798 self
.to_stdout(info_dict
[field
])
2800 info_dict
= info_dict
.copy()
2801 if filename
is not None:
2802 info_dict
['filename'] = filename
2803 if info_dict
.get('requested_formats') is not None:
2804 # For RTMP URLs, also include the playpath
2805 info_dict
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
2806 elif info_dict
.get('url'):
2807 info_dict
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
2809 if (self
.params
.get('forcejson')
2810 or self
.params
['forceprint'].get('video')
2811 or self
.params
['print_to_file'].get('video')):
2812 self
.post_extract(info_dict
)
2813 self
._forceprint
('video', info_dict
)
2815 print_mandatory('title')
2816 print_mandatory('id')
2817 print_mandatory('url', 'urls')
2818 print_optional('thumbnail')
2819 print_optional('description')
2820 print_optional('filename')
2821 if self
.params
.get('forceduration') and info_dict
.get('duration') is not None:
2822 self
.to_stdout(formatSeconds(info_dict
['duration']))
2823 print_mandatory('format')
2825 if self
.params
.get('forcejson'):
2826 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
2828 def dl(self
, name
, info
, subtitle
=False, test
=False):
2829 if not info
.get('url'):
2830 self
.raise_no_formats(info
, True)
2833 verbose
= self
.params
.get('verbose')
2836 'quiet': self
.params
.get('quiet') or not verbose
,
2838 'noprogress': not verbose
,
2840 'skip_unavailable_fragments': False,
2841 'keep_fragments': False,
2843 '_no_ytdl_file': True,
2846 params
= self
.params
2847 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
2849 for ph
in self
._progress
_hooks
:
2850 fd
.add_progress_hook(ph
)
2852 (f
['url'].split(',')[0] + ',<data>' if f
['url'].startswith('data:') else f
['url'])
2853 for f
in info
.get('requested_formats', []) or [info
])
2854 self
.write_debug(f
'Invoking {fd.FD_NAME} downloader on "{urls}"')
2856 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2857 # But it may contain objects that are not deep-copyable
2858 new_info
= self
._copy
_infodict
(info
)
2859 if new_info
.get('http_headers') is None:
2860 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
2861 return fd
.download(name
, new_info
, subtitle
)
2863 def existing_file(self
, filepaths
, *, default_overwrite
=True):
2864 existing_files
= list(filter(os
.path
.exists
, orderedSet(filepaths
)))
2865 if existing_files
and not self
.params
.get('overwrites', default_overwrite
):
2866 return existing_files
[0]
2868 for file in existing_files
:
2869 self
.report_file_delete(file)
2873 def process_info(self
, info_dict
):
2874 """Process a single resolved IE result. (Modifies it in-place)"""
2876 assert info_dict
.get('_type', 'video') == 'video'
2877 original_infodict
= info_dict
2879 if 'format' not in info_dict
and 'ext' in info_dict
:
2880 info_dict
['format'] = info_dict
['ext']
2882 # This is mostly just for backward compatibility of process_info
2883 # As a side-effect, this allows for format-specific filters
2884 if self
._match
_entry
(info_dict
) is not None:
2885 info_dict
['__write_download_archive'] = 'ignore'
2888 # Does nothing under normal operation - for backward compatibility of process_info
2889 self
.post_extract(info_dict
)
2890 self
._num
_downloads
+= 1
2892 # info_dict['_filename'] needs to be set for backward compatibility
2893 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
2894 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
2898 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
2900 def check_max_downloads():
2901 if self
._num
_downloads
>= float(self
.params
.get('max_downloads') or 'inf'):
2902 raise MaxDownloadsReached()
2904 if self
.params
.get('simulate'):
2905 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
2906 check_max_downloads()
2909 if full_filename
is None:
2911 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
2913 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
2916 if self
._write
_description
('video', info_dict
,
2917 self
.prepare_filename(info_dict
, 'description')) is None:
2920 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
2921 if sub_files
is None:
2923 files_to_move
.update(dict(sub_files
))
2925 thumb_files
= self
._write
_thumbnails
(
2926 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
2927 if thumb_files
is None:
2929 files_to_move
.update(dict(thumb_files
))
2931 infofn
= self
.prepare_filename(info_dict
, 'infojson')
2932 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
2933 if _infojson_written
:
2934 info_dict
['infojson_filename'] = infofn
2935 # For backward compatibility, even though it was a private field
2936 info_dict
['__infojson_filename'] = infofn
2937 elif _infojson_written
is None:
2940 # Note: Annotations are deprecated
2942 if self
.params
.get('writeannotations', False):
2943 annofn
= self
.prepare_filename(info_dict
, 'annotation')
2945 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
2947 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
2948 self
.to_screen('[info] Video annotations are already present')
2949 elif not info_dict
.get('annotations'):
2950 self
.report_warning('There are no annotations to write.')
2953 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
2954 with open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
2955 annofile
.write(info_dict
['annotations'])
2956 except (KeyError, TypeError):
2957 self
.report_warning('There are no annotations to write.')
2959 self
.report_error('Cannot write annotations file: ' + annofn
)
2962 # Write internet shortcut files
2963 def _write_link_file(link_type
):
2964 url
= try_get(info_dict
['webpage_url'], iri_to_uri
)
2966 self
.report_warning(
2967 f
'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
2969 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
2970 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
2972 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
2973 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
2976 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
2977 with open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
2978 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
2979 template_vars
= {'url': url}
2980 if link_type
== 'desktop':
2981 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
2982 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
2984 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
2989 'url': self
.params
.get('writeurllink'),
2990 'webloc': self
.params
.get('writewebloclink'),
2991 'desktop': self
.params
.get('writedesktoplink'),
2993 if self
.params
.get('writelink'):
2994 link_type
= ('webloc' if sys
.platform
== 'darwin'
2995 else 'desktop' if sys
.platform
.startswith('linux')
2997 write_links
[link_type
] = True
2999 if any(should_write
and not _write_link_file(link_type
)
3000 for link_type
, should_write
in write_links
.items()):
3003 def replace_info_dict(new_info
):
3005 if new_info
== info_dict
:
3008 info_dict
.update(new_info
)
3010 new_info
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
3011 replace_info_dict(new_info
)
3013 if self
.params
.get('skip_download'):
3014 info_dict
['filepath'] = temp_filename
3015 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3016 info_dict
['__files_to_move'] = files_to_move
3017 replace_info_dict(self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
))
3018 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3021 info_dict
.setdefault('__postprocessors', [])
3024 def existing_video_file(*filepaths
):
3025 ext
= info_dict
.get('ext')
3026 converted
= lambda file: replace_extension(file, self
.params
.get('final_ext') or ext
, ext
)
3027 file = self
.existing_file(itertools
.chain(*zip(map(converted
, filepaths
), filepaths
)),
3028 default_overwrite
=False)
3030 info_dict
['ext'] = os
.path
.splitext(file)[1][1:]
3033 fd
, success
= None, True
3034 if info_dict
.get('protocol') or info_dict
.get('url'):
3035 fd
= get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-')
3036 if fd
is not FFmpegFD
and (
3037 info_dict
.get('section_start') or info_dict
.get('section_end')):
3038 msg
= ('This format cannot be partially downloaded' if FFmpegFD
.available()
3039 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3040 self
.report_error(f
'{msg}. Aborting')
3043 if info_dict
.get('requested_formats') is not None:
3045 def compatible_formats(formats
):
3046 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
3047 video_formats
= [format
for format
in formats
if format
.get('vcodec') != 'none']
3048 audio_formats
= [format
for format
in formats
if format
.get('acodec') != 'none']
3049 if len(video_formats
) > 2 or len(audio_formats
) > 2:
3053 exts
= {format.get('ext') for format in formats}
3055 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'}
,
3058 for ext_sets
in COMPATIBLE_EXTS
:
3059 if ext_sets
.issuperset(exts
):
3061 # TODO: Check acodec/vcodec
3064 requested_formats
= info_dict
['requested_formats']
3065 old_ext
= info_dict
['ext']
3066 if self
.params
.get('merge_output_format') is None:
3067 if not compatible_formats(requested_formats
):
3068 info_dict
['ext'] = 'mkv'
3069 self
.report_warning(
3070 'Requested formats are incompatible for merge and will be merged into mkv')
3071 if (info_dict
['ext'] == 'webm'
3072 and info_dict
.get('thumbnails')
3073 # check with type instead of pp_key, __name__, or isinstance
3074 # since we dont want any custom PPs to trigger this
3075 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])): # noqa: E721
3076 info_dict
['ext'] = 'mkv'
3077 self
.report_warning(
3078 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3079 new_ext
= info_dict
['ext']
3081 def correct_ext(filename
, ext
=new_ext
):
3084 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
3086 os
.path
.splitext(filename
)[0]
3087 if filename_real_ext
in (old_ext
, new_ext
)
3089 return f
'{filename_wo_ext}.{ext}'
3091 # Ensure filename always has a correct extension for successful merge
3092 full_filename
= correct_ext(full_filename
)
3093 temp_filename
= correct_ext(temp_filename
)
3094 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3095 info_dict
['__real_download'] = False
3097 merger
= FFmpegMergerPP(self
)
3099 if dl_filename
is not None:
3100 self
.report_file_already_downloaded(dl_filename
)
3102 for f
in requested_formats
if fd
!= FFmpegFD
else []:
3103 f
['filepath'] = fname
= prepend_extension(
3104 correct_ext(temp_filename
, info_dict
['ext']),
3105 'f%s' % f
['format_id'], info_dict
['ext'])
3106 downloaded
.append(fname
)
3107 info_dict
['url'] = '\n'.join(f
['url'] for f
in requested_formats
)
3108 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3109 info_dict
['__real_download'] = real_download
3111 if self
.params
.get('allow_unplayable_formats'):
3112 self
.report_warning(
3113 'You have requested merging of multiple formats '
3114 'while also allowing unplayable formats to be downloaded. '
3115 'The formats won\'t be merged to prevent data corruption.')
3116 elif not merger
.available
:
3117 msg
= 'You have requested merging of multiple formats but ffmpeg is not installed'
3118 if not self
.params
.get('ignoreerrors'):
3119 self
.report_error(f
'{msg}. Aborting due to --abort-on-error')
3121 self
.report_warning(f
'{msg}. The formats won\'t be merged')
3123 if temp_filename
== '-':
3124 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
, self
.params
)
3125 else 'but the formats are incompatible for simultaneous download' if merger
.available
3126 else 'but ffmpeg is not installed')
3127 self
.report_warning(
3128 f
'You have requested downloading multiple formats to stdout {reason}. '
3129 'The formats will be streamed one after the other')
3130 fname
= temp_filename
3131 for f
in requested_formats
:
3132 new_info
= dict(info_dict
)
3133 del new_info
['requested_formats']
3135 if temp_filename
!= '-':
3136 fname
= prepend_extension(
3137 correct_ext(temp_filename
, new_info
['ext']),
3138 'f%s' % f
['format_id'], new_info
['ext'])
3139 if not self
._ensure
_dir
_exists
(fname
):
3141 f
['filepath'] = fname
3142 downloaded
.append(fname
)
3143 partial_success
, real_download
= self
.dl(fname
, new_info
)
3144 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
3145 success
= success
and partial_success
3147 if downloaded
and merger
.available
and not self
.params
.get('allow_unplayable_formats'):
3148 info_dict
['__postprocessors'].append(merger
)
3149 info_dict
['__files_to_merge'] = downloaded
3150 # Even if there were no downloads, it is being merged only now
3151 info_dict
['__real_download'] = True
3153 for file in downloaded
:
3154 files_to_move
[file] = None
3156 # Just a single file
3157 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3158 if dl_filename
is None or dl_filename
== temp_filename
:
3159 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3160 # So we should try to resume the download
3161 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3162 info_dict
['__real_download'] = real_download
3164 self
.report_file_already_downloaded(dl_filename
)
3166 dl_filename
= dl_filename
or temp_filename
3167 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3169 except network_exceptions
as err
:
3170 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
3172 except OSError as err
:
3173 raise UnavailableVideoError(err
)
3174 except (ContentTooShortError
, ) as err
:
3175 self
.report_error(f
'content too short (expected {err.expected} bytes and served {err.downloaded})')
3178 self
._raise
_pending
_errors
(info_dict
)
3179 if success
and full_filename
!= '-':
3183 fixup_policy
= self
.params
.get('fixup')
3184 vid
= info_dict
['id']
3186 if fixup_policy
in ('ignore', 'never'):
3188 elif fixup_policy
== 'warn':
3190 elif fixup_policy
!= 'force':
3191 assert fixup_policy
in ('detect_or_warn', None)
3192 if not info_dict
.get('__real_download'):
3195 def ffmpeg_fixup(cndn
, msg
, cls
):
3196 if not (do_fixup
and cndn
):
3198 elif do_fixup
== 'warn':
3199 self
.report_warning(f
'{vid}: {msg}')
3203 info_dict
['__postprocessors'].append(pp
)
3205 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
3207 stretched_ratio
= info_dict
.get('stretched_ratio')
3208 ffmpeg_fixup(stretched_ratio
not in (1, None),
3209 f
'Non-uniform pixel ratio {stretched_ratio}',
3210 FFmpegFixupStretchedPP
)
3212 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
3213 downloader
= downloader
.FD_NAME
if downloader
else None
3215 ext
= info_dict
.get('ext')
3216 postprocessed_by_ffmpeg
= info_dict
.get('requested_formats') or any((
3217 isinstance(pp
, FFmpegVideoConvertorPP
)
3218 and resolve_recode_mapping(ext
, pp
.mapping
)[0] not in (ext
, None)
3219 ) for pp
in self
._pps
['post_process'])
3221 if not postprocessed_by_ffmpeg
:
3222 ffmpeg_fixup(ext
== 'm4a' and info_dict
.get('container') == 'm4a_dash',
3223 'writing DASH m4a. Only some players support this container',
3225 ffmpeg_fixup(downloader
== 'hlsnative' and not self
.params
.get('hls_use_mpegts')
3226 or info_dict
.get('is_live') and self
.params
.get('hls_use_mpegts') is None,
3227 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3229 ffmpeg_fixup(info_dict
.get('is_live') and downloader
== 'DashSegmentsFD',
3230 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP
)
3232 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3233 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP
)
3237 replace_info_dict(self
.post_process(dl_filename
, info_dict
, files_to_move
))
3238 except PostProcessingError
as err
:
3239 self
.report_error('Postprocessing: %s' % str(err
))
3242 for ph
in self
._post
_hooks
:
3243 ph(info_dict
['filepath'])
3244 except Exception as err
:
3245 self
.report_error('post hooks: %s' % str(err
))
3247 info_dict
['__write_download_archive'] = True
3249 assert info_dict
is original_infodict
# Make sure the info_dict was modified in-place
3250 if self
.params
.get('force_write_download_archive'):
3251 info_dict
['__write_download_archive'] = True
3252 check_max_downloads()
3254 def __download_wrapper(self
, func
):
3255 @functools.wraps(func
)
3256 def wrapper(*args
, **kwargs
):
3258 res
= func(*args
, **kwargs
)
3259 except UnavailableVideoError
as e
:
3260 self
.report_error(e
)
3261 except DownloadCancelled
as e
:
3262 self
.to_screen(f
'[info] {e}')
3263 if not self
.params
.get('break_per_url'):
3266 if self
.params
.get('dump_single_json', False):
3267 self
.post_extract(res
)
3268 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3271 def download(self
, url_list
):
3272 """Download a given list of URLs."""
3273 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3274 outtmpl
= self
.params
['outtmpl']['default']
3275 if (len(url_list
) > 1
3277 and '%' not in outtmpl
3278 and self
.params
.get('max_downloads') != 1):
3279 raise SameFileError(outtmpl
)
3281 for url
in url_list
:
3282 self
.__download
_wrapper
(self
.extract_info
)(
3283 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3285 return self
._download
_retcode
3287 def download_with_info_file(self
, info_filename
):
3288 with contextlib
.closing(fileinput
.FileInput(
3289 [info_filename
], mode
='r',
3290 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3291 # FileInput doesn't have a read method, we can't call json.load
3292 info
= self
.sanitize_info(json
.loads('\n'.join(f
)), self
.params
.get('clean_infojson', True))
3294 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3295 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3296 if not isinstance(e
, EntryNotInPlaylist
):
3297 self
.to_stderr('\r')
3298 webpage_url
= info
.get('webpage_url')
3299 if webpage_url
is not None:
3300 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3301 return self
.download([webpage_url
])
3304 return self
._download
_retcode
3307 def sanitize_info(info_dict
, remove_private_keys
=False):
3308 ''' Sanitize the infodict for converting to json '''
3309 if info_dict
is None:
3311 info_dict
.setdefault('epoch', int(time
.time()))
3312 info_dict
.setdefault('_type', 'video')
3314 if remove_private_keys
:
3315 reject
= lambda k
, v
: v
is None or k
.startswith('__') or k
in {
3316 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3317 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
3320 reject
= lambda k
, v
: False
3323 if isinstance(obj
, dict):
3324 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3325 elif isinstance(obj
, (list, tuple, set, LazyList
)):
3326 return list(map(filter_fn
, obj
))
3327 elif obj
is None or isinstance(obj
, (str, int, float, bool)):
3332 return filter_fn(info_dict
)
3335 def filter_requested_info(info_dict
, actually_filter
=True):
3336 ''' Alias of sanitize_info for backward compatibility '''
3337 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3339 def _delete_downloaded_files(self
, *files_to_delete
, info
={}, msg
=None):
3340 for filename
in set(filter(None, files_to_delete
)):
3342 self
.to_screen(msg
% filename
)
3346 self
.report_warning(f
'Unable to delete file {filename}')
3347 if filename
in info
.get('__files_to_move', []): # NB: Delete even if None
3348 del info
['__files_to_move'][filename
]
3351 def post_extract(info_dict
):
3352 def actual_post_extract(info_dict
):
3353 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3354 for video_dict
in info_dict
.get('entries', {}):
3355 actual_post_extract(video_dict
or {})
3358 post_extractor
= info_dict
.pop('__post_extractor', None) or (lambda: {})
3359 info_dict
.update(post_extractor())
3361 actual_post_extract(info_dict
or {})
3363 def run_pp(self
, pp
, infodict
):
3364 files_to_delete
= []
3365 if '__files_to_move' not in infodict
:
3366 infodict
['__files_to_move'] = {}
3368 files_to_delete
, infodict
= pp
.run(infodict
)
3369 except PostProcessingError
as e
:
3370 # Must be True and not 'only_download'
3371 if self
.params
.get('ignoreerrors') is True:
3372 self
.report_error(e
)
3376 if not files_to_delete
:
3378 if self
.params
.get('keepvideo', False):
3379 for f
in files_to_delete
:
3380 infodict
['__files_to_move'].setdefault(f
, '')
3382 self
._delete
_downloaded
_files
(
3383 *files_to_delete
, info
=infodict
, msg
='Deleting original file %s (pass -k to keep)')
3386 def run_all_pps(self
, key
, info
, *, additional_pps
=None):
3387 self
._forceprint
(key
, info
)
3388 for pp
in (additional_pps
or []) + self
._pps
[key
]:
3389 info
= self
.run_pp(pp
, info
)
3392 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3393 info
= dict(ie_info
)
3394 info
['__files_to_move'] = files_to_move
or {}
3396 info
= self
.run_all_pps(key
, info
)
3397 except PostProcessingError
as err
:
3398 msg
= f
'Preprocessing: {err}'
3399 info
.setdefault('__pending_error', msg
)
3400 self
.report_error(msg
, is_error
=False)
3401 return info
, info
.pop('__files_to_move', None)
3403 def post_process(self
, filename
, info
, files_to_move
=None):
3404 """Run all the postprocessors on the given file."""
3405 info
['filepath'] = filename
3406 info
['__files_to_move'] = files_to_move
or {}
3407 info
= self
.run_all_pps('post_process', info
, additional_pps
=info
.get('__postprocessors'))
3408 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3409 del info
['__files_to_move']
3410 return self
.run_all_pps('after_move', info
)
3412 def _make_archive_id(self
, info_dict
):
3413 video_id
= info_dict
.get('id')
3416 # Future-proof against any change in case
3417 # and backwards compatibility with prior versions
3418 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3419 if extractor
is None:
3420 url
= str_or_none(info_dict
.get('url'))
3423 # Try to find matching extractor for the URL and take its ie_key
3424 for ie_key
, ie
in self
._ies
.items():
3425 if ie
.suitable(url
):
3430 return f
'{extractor.lower()} {video_id}'
3432 def in_download_archive(self
, info_dict
):
3433 fn
= self
.params
.get('download_archive')
3437 vid_id
= self
._make
_archive
_id
(info_dict
)
3439 return False # Incomplete video information
3441 return vid_id
in self
.archive
3443 def record_download_archive(self
, info_dict
):
3444 fn
= self
.params
.get('download_archive')
3447 vid_id
= self
._make
_archive
_id
(info_dict
)
3449 self
.write_debug(f
'Adding to archive: {vid_id}')
3450 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3451 archive_file
.write(vid_id
+ '\n')
3452 self
.archive
.add(vid_id
)
3455 def format_resolution(format
, default
='unknown'):
3456 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3458 if format
.get('resolution') is not None:
3459 return format
['resolution']
3460 if format
.get('width') and format
.get('height'):
3461 return '%dx%d' % (format
['width'], format
['height'])
3462 elif format
.get('height'):
3463 return '%sp' % format
['height']
3464 elif format
.get('width'):
3465 return '%dx?' % format
['width']
3468 def _list_format_headers(self
, *headers
):
3469 if self
.params
.get('listformats_table', True) is not False:
3470 return [self
._format
_out
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3473 def _format_note(self
, fdict
):
3475 if fdict
.get('ext') in ['f4f', 'f4m']:
3476 res
+= '(unsupported)'
3477 if fdict
.get('language'):
3480 res
+= '[%s]' % fdict
['language']
3481 if fdict
.get('format_note') is not None:
3484 res
+= fdict
['format_note']
3485 if fdict
.get('tbr') is not None:
3488 res
+= '%4dk' % fdict
['tbr']
3489 if fdict
.get('container') is not None:
3492 res
+= '%s container' % fdict
['container']
3493 if (fdict
.get('vcodec') is not None
3494 and fdict
.get('vcodec') != 'none'):
3497 res
+= fdict
['vcodec']
3498 if fdict
.get('vbr') is not None:
3500 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3502 if fdict
.get('vbr') is not None:
3503 res
+= '%4dk' % fdict
['vbr']
3504 if fdict
.get('fps') is not None:
3507 res
+= '%sfps' % fdict
['fps']
3508 if fdict
.get('acodec') is not None:
3511 if fdict
['acodec'] == 'none':
3514 res
+= '%-5s' % fdict
['acodec']
3515 elif fdict
.get('abr') is not None:
3519 if fdict
.get('abr') is not None:
3520 res
+= '@%3dk' % fdict
['abr']
3521 if fdict
.get('asr') is not None:
3522 res
+= ' (%5dHz)' % fdict
['asr']
3523 if fdict
.get('filesize') is not None:
3526 res
+= format_bytes(fdict
['filesize'])
3527 elif fdict
.get('filesize_approx') is not None:
3530 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3533 def render_formats_table(self
, info_dict
):
3534 if not info_dict
.get('formats') and not info_dict
.get('url'):
3537 formats
= info_dict
.get('formats', [info_dict
])
3538 if not self
.params
.get('listformats_table', True) is not False:
3541 format_field(f
, 'format_id'),
3542 format_field(f
, 'ext'),
3543 self
.format_resolution(f
),
3544 self
._format
_note
(f
)
3545 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3546 return render_table(['format code', 'extension', 'resolution', 'note'], table
, extra_gap
=1)
3548 def simplified_codec(f
, field
):
3549 assert field
in ('acodec', 'vcodec')
3550 codec
= f
.get(field
, 'unknown')
3553 elif codec
!= 'none':
3554 return '.'.join(codec
.split('.')[:4])
3556 if field
== 'vcodec' and f
.get('acodec') == 'none':
3558 elif field
== 'acodec' and f
.get('vcodec') == 'none':
3560 return self
._format
_out
('audio only' if field
== 'vcodec' else 'video only',
3561 self
.Styles
.SUPPRESS
)
3563 delim
= self
._format
_out
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3566 self
._format
_out
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3567 format_field(f
, 'ext'),
3568 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3569 format_field(f
, 'fps', '\t%d', func
=round),
3570 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3572 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
) + format_field(f
, 'filesize_approx', '~\t%s', func
=format_bytes
),
3573 format_field(f
, 'tbr', '\t%dk', func
=round),
3574 shorten_protocol_name(f
.get('protocol', '')),
3576 simplified_codec(f
, 'vcodec'),
3577 format_field(f
, 'vbr', '\t%dk', func
=round),
3578 simplified_codec(f
, 'acodec'),
3579 format_field(f
, 'abr', '\t%dk', func
=round),
3580 format_field(f
, 'asr', '\t%s', func
=format_decimal_suffix
),
3582 self
._format
_out
('UNSUPPORTED', 'light red') if f
.get('ext') in ('f4f', 'f4m') else None,
3583 format_field(f
, 'language', '[%s]'),
3584 join_nonempty(format_field(f
, 'format_note'),
3585 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3588 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3589 header_line
= self
._list
_format
_headers
(
3590 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3591 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3593 return render_table(
3594 header_line
, table
, hide_empty
=True,
3595 delim
=self
._format
_out
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True))
3597 def render_thumbnails_table(self
, info_dict
):
3598 thumbnails
= list(info_dict
.get('thumbnails') or [])
3601 return render_table(
3602 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3603 [[t
.get('id'), t
.get('width', 'unknown'), t
.get('height', 'unknown'), t
['url']] for t
in thumbnails
])
3605 def render_subtitles_table(self
, video_id
, subtitles
):
3606 def _row(lang
, formats
):
3607 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3608 if len(set(names
)) == 1:
3609 names
= [] if names
[0] == 'unknown' else names
[:1]
3610 return [lang
, ', '.join(names
), ', '.join(exts
)]
3614 return render_table(
3615 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3616 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3619 def __list_table(self
, video_id
, name
, func
, *args
):
3622 self
.to_screen(f
'{video_id} has no {name}')
3624 self
.to_screen(f
'[info] Available {name} for {video_id}:')
3625 self
.to_stdout(table
)
3627 def list_formats(self
, info_dict
):
3628 self
.__list
_table
(info_dict
['id'], 'formats', self
.render_formats_table
, info_dict
)
3630 def list_thumbnails(self
, info_dict
):
3631 self
.__list
_table
(info_dict
['id'], 'thumbnails', self
.render_thumbnails_table
, info_dict
)
3633 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3634 self
.__list
_table
(video_id
, name
, self
.render_subtitles_table
, video_id
, subtitles
)
3636 def urlopen(self
, req
):
3637 """ Start an HTTP download """
3638 if isinstance(req
, str):
3639 req
= sanitized_Request(req
)
3640 return self
._opener
.open(req
, timeout
=self
._socket
_timeout
)
3642 def print_debug_header(self
):
3643 if not self
.params
.get('verbose'):
3646 # These imports can be slow. So import them only as needed
3647 from .extractor
.extractors
import _LAZY_LOADER
3648 from .extractor
.extractors
import _PLUGIN_CLASSES
as plugin_extractors
3650 def get_encoding(stream
):
3651 ret
= str(getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__))
3652 if not supports_terminal_sequences(stream
):
3653 from .utils
import WINDOWS_VT_MODE
# Must be imported locally
3654 ret
+= ' (No VT)' if WINDOWS_VT_MODE
is False else ' (No ANSI)'
3657 encoding_str
= 'Encodings: locale %s, fs %s, pref %s, %s' % (
3658 locale
.getpreferredencoding(),
3659 sys
.getfilesystemencoding(),
3660 self
.get_encoding(),
3662 f
'{key} {get_encoding(stream)}' for key
, stream
in self
._out
_files
.items_
3663 if stream
is not None and key
!= 'console')
3666 logger
= self
.params
.get('logger')
3668 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3669 write_debug(encoding_str
)
3671 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3672 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3674 source
= detect_variant()
3675 write_debug(join_nonempty(
3676 'yt-dlp version', __version__
,
3677 f
'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD
else '',
3678 '' if source
== 'unknown' else f
'({source})',
3680 if not _LAZY_LOADER
:
3681 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
3682 write_debug('Lazy loading extractors is forcibly disabled')
3684 write_debug('Lazy loading extractors is disabled')
3685 if plugin_extractors
or plugin_postprocessors
:
3686 write_debug('Plugins: %s' % [
3687 '%s%s' % (klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
3688 for name
, klass
in itertools
.chain(plugin_extractors
.items(), plugin_postprocessors
.items())])
3689 if self
.params
['compat_opts']:
3690 write_debug('Compatibility options: %s' % ', '.join(self
.params
['compat_opts']))
3692 if source
== 'source':
3694 stdout
, _
, _
= Popen
.run(
3695 ['git', 'rev-parse', '--short', 'HEAD'],
3696 text
=True, cwd
=os
.path
.dirname(os
.path
.abspath(__file__
)),
3697 stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
3698 if re
.fullmatch('[0-9a-f]+', stdout
.strip()):
3699 write_debug(f
'Git HEAD: {stdout.strip()}')
3701 with contextlib
.suppress(Exception):
3704 write_debug(system_identifier())
3706 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
3707 ffmpeg_features
= {key for key, val in ffmpeg_features.items() if val}
3709 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features
))
3711 exe_versions
['rtmpdump'] = rtmpdump_version()
3712 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
3713 exe_str
= ', '.join(
3714 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
3716 write_debug('exe versions: %s' % exe_str
)
3718 from .compat
.compat_utils
import get_package_info
3719 from .dependencies
import available_dependencies
3721 write_debug('Optional libraries: %s' % (', '.join(sorted({
3722 join_nonempty(*get_package_info(m
)) for m
in available_dependencies
.values()
3725 self
._setup
_opener
()
3727 for handler
in self
._opener
.handlers
:
3728 if hasattr(handler
, 'proxies'):
3729 proxy_map
.update(handler
.proxies
)
3730 write_debug(f
'Proxy map: {proxy_map}')
3733 if False and self
.params
.get('call_home'):
3734 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode()
3735 write_debug('Public IP address: %s' % ipaddr
)
3736 latest_version
= self
.urlopen(
3737 'https://yt-dl.org/latest/version').read().decode()
3738 if version_tuple(latest_version
) > version_tuple(__version__
):
3739 self
.report_warning(
3740 'You are using an outdated version (newest version: %s)! '
3741 'See https://yt-dl.org/update if you need help updating.' %
3744 def _setup_opener(self
):
3745 if hasattr(self
, '_opener'):
3747 timeout_val
= self
.params
.get('socket_timeout')
3748 self
._socket
_timeout
= 20 if timeout_val
is None else float(timeout_val
)
3750 opts_cookiesfrombrowser
= self
.params
.get('cookiesfrombrowser')
3751 opts_cookiefile
= self
.params
.get('cookiefile')
3752 opts_proxy
= self
.params
.get('proxy')
3754 self
.cookiejar
= load_cookies(opts_cookiefile
, opts_cookiesfrombrowser
, self
)
3756 cookie_processor
= YoutubeDLCookieProcessor(self
.cookiejar
)
3757 if opts_proxy
is not None:
3758 if opts_proxy
== '':
3761 proxies
= {'http': opts_proxy, 'https': opts_proxy}
3763 proxies
= urllib
.request
.getproxies()
3764 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3765 if 'http' in proxies
and 'https' not in proxies
:
3766 proxies
['https'] = proxies
['http']
3767 proxy_handler
= PerRequestProxyHandler(proxies
)
3769 debuglevel
= 1 if self
.params
.get('debug_printtraffic') else 0
3770 https_handler
= make_HTTPS_handler(self
.params
, debuglevel
=debuglevel
)
3771 ydlh
= YoutubeDLHandler(self
.params
, debuglevel
=debuglevel
)
3772 redirect_handler
= YoutubeDLRedirectHandler()
3773 data_handler
= urllib
.request
.DataHandler()
3775 # When passing our own FileHandler instance, build_opener won't add the
3776 # default FileHandler and allows us to disable the file protocol, which
3777 # can be used for malicious purposes (see
3778 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3779 file_handler
= urllib
.request
.FileHandler()
3781 def file_open(*args
, **kwargs
):
3782 raise urllib
.error
.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3783 file_handler
.file_open
= file_open
3785 opener
= urllib
.request
.build_opener(
3786 proxy_handler
, https_handler
, cookie_processor
, ydlh
, redirect_handler
, data_handler
, file_handler
)
3788 # Delete the default user-agent header, which would otherwise apply in
3789 # cases where our custom HTTP handler doesn't come into play
3790 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3791 opener
.addheaders
= []
3792 self
._opener
= opener
3794 def encode(self
, s
):
3795 if isinstance(s
, bytes):
3796 return s
# Already encoded
3799 return s
.encode(self
.get_encoding())
3800 except UnicodeEncodeError as err
:
3801 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
3804 def get_encoding(self
):
3805 encoding
= self
.params
.get('encoding')
3806 if encoding
is None:
3807 encoding
= preferredencoding()
3810 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
3811 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3812 if overwrite
is None:
3813 overwrite
= self
.params
.get('overwrites', True)
3814 if not self
.params
.get('writeinfojson'):
3817 self
.write_debug(f
'Skipping writing {label} infojson')
3819 elif not self
._ensure
_dir
_exists
(infofn
):
3821 elif not overwrite
and os
.path
.exists(infofn
):
3822 self
.to_screen(f
'[info] {label.title()} metadata is already present')
3825 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
3827 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
3830 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
3833 def _write_description(self
, label
, ie_result
, descfn
):
3834 ''' Write description and returns True = written, False = skip, None = error '''
3835 if not self
.params
.get('writedescription'):
3838 self
.write_debug(f
'Skipping writing {label} description')
3840 elif not self
._ensure
_dir
_exists
(descfn
):
3842 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
3843 self
.to_screen(f
'[info] {label.title()} description is already present')
3844 elif ie_result
.get('description') is None:
3845 self
.report_warning(f
'There\'s no {label} description to write')
3849 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
3850 with open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
3851 descfile
.write(ie_result
['description'])
3853 self
.report_error(f
'Cannot write {label} description file {descfn}')
3857 def _write_subtitles(self
, info_dict
, filename
):
3858 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3860 subtitles
= info_dict
.get('requested_subtitles')
3861 if not subtitles
or not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
3862 # subtitles download errors are already managed as troubles in relevant IE
3863 # that way it will silently go on when used with unsupporting IE
3866 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
3867 if not sub_filename_base
:
3868 self
.to_screen('[info] Skipping writing video subtitles')
3870 for sub_lang
, sub_info
in subtitles
.items():
3871 sub_format
= sub_info
['ext']
3872 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
3873 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
3874 existing_sub
= self
.existing_file((sub_filename_final
, sub_filename
))
3876 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3877 sub_info
['filepath'] = existing_sub
3878 ret
.append((existing_sub
, sub_filename_final
))
3881 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
3882 if sub_info
.get('data') is not None:
3884 # Use newline='' to prevent conversion of newline characters
3885 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3886 with open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
3887 subfile
.write(sub_info
['data'])
3888 sub_info
['filepath'] = sub_filename
3889 ret
.append((sub_filename
, sub_filename_final
))
3892 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
3896 sub_copy
= sub_info
.copy()
3897 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
3898 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
3899 sub_info
['filepath'] = sub_filename
3900 ret
.append((sub_filename
, sub_filename_final
))
3901 except (DownloadError
, ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
3902 msg
= f
'Unable to download video subtitles for {sub_lang!r}: {err}'
3903 if self
.params
.get('ignoreerrors') is not True: # False or 'only_download'
3904 if not self
.params
.get('ignoreerrors'):
3905 self
.report_error(msg
)
3906 raise DownloadError(msg
)
3907 self
.report_warning(msg
)
3910 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
3911 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3912 write_all
= self
.params
.get('write_all_thumbnails', False)
3913 thumbnails
, ret
= [], []
3914 if write_all
or self
.params
.get('writethumbnail', False):
3915 thumbnails
= info_dict
.get('thumbnails') or []
3916 multiple
= write_all
and len(thumbnails
) > 1
3918 if thumb_filename_base
is None:
3919 thumb_filename_base
= filename
3920 if thumbnails
and not thumb_filename_base
:
3921 self
.write_debug(f
'Skipping writing {label} thumbnail')
3924 for idx
, t
in list(enumerate(thumbnails
))[::-1]:
3925 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
3926 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
3927 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
3928 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
3930 existing_thumb
= self
.existing_file((thumb_filename_final
, thumb_filename
))
3932 self
.to_screen('[info] %s is already present' % (
3933 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
3934 t
['filepath'] = existing_thumb
3935 ret
.append((existing_thumb
, thumb_filename_final
))
3937 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
3939 uf
= self
.urlopen(sanitized_Request(t
['url'], headers
=t
.get('http_headers', {})))
3940 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
3941 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
3942 shutil
.copyfileobj(uf
, thumbf
)
3943 ret
.append((thumb_filename
, thumb_filename_final
))
3944 t
['filepath'] = thumb_filename
3945 except network_exceptions
as err
:
3947 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
3948 if ret
and not write_all
: