24 from string
import ascii_letters
26 from .cache
import Cache
27 from .compat
import compat_os_name
, compat_shlex_quote
28 from .cookies
import load_cookies
29 from .downloader
import FFmpegFD
, get_suitable_downloader
, shorten_protocol_name
30 from .downloader
.rtmp
import rtmpdump_version
31 from .extractor
import gen_extractor_classes
, get_info_extractor
32 from .extractor
.openload
import PhantomJSwrapper
33 from .minicurses
import format_text
34 from .postprocessor
import _PLUGIN_CLASSES
as plugin_postprocessors
35 from .postprocessor
import (
37 FFmpegFixupDuplicateMoovPP
,
38 FFmpegFixupDurationPP
,
41 FFmpegFixupStretchedPP
,
42 FFmpegFixupTimestampPP
,
45 FFmpegVideoConvertorPP
,
46 MoveFilesAfterDownloadPP
,
49 from .postprocessor
.ffmpeg
import resolve_mapping
as resolve_recode_mapping
50 from .update
import detect_variant
76 PerRequestProxyHandler
,
83 UnavailableVideoError
,
85 YoutubeDLCookieProcessor
,
87 YoutubeDLRedirectHandler
,
102 format_decimal_suffix
,
121 register_socks_protocols
,
122 remove_terminal_sequences
,
133 supports_terminal_sequences
,
143 windows_enable_vt_mode
,
147 from .version
import RELEASE_GIT_HEAD
, __version__
149 if compat_os_name
== 'nt':
156 YoutubeDL objects are the ones responsible of downloading the
157 actual video file and writing it to disk if the user has requested
158 it, among some other tasks. In most cases there should be one per
159 program. As, given a video URL, the downloader doesn't know how to
160 extract all the needed information, task that InfoExtractors do, it
161 has to pass the URL to one of them.
163 For this, YoutubeDL objects have a method that allows
164 InfoExtractors to be registered in a given order. When it is passed
165 a URL, the YoutubeDL object handles it to the first InfoExtractor it
166 finds that reports being able to handle it. The InfoExtractor extracts
167 all the information about the video or videos the URL refers to, and
168 YoutubeDL process the extracted information, possibly using a File
169 Downloader to download the video.
171 YoutubeDL objects accept a lot of parameters. In order not to saturate
172 the object constructor with arguments, it receives a dictionary of
173 options instead. These options are available through the params
174 attribute for the InfoExtractors to use. The YoutubeDL also
175 registers itself as the downloader in charge for the InfoExtractors
176 that are added to it, so this is a "mutual registration".
180 username: Username for authentication purposes.
181 password: Password for authentication purposes.
182 videopassword: Password for accessing a video.
183 ap_mso: Adobe Pass multiple-system operator identifier.
184 ap_username: Multiple-system operator account username.
185 ap_password: Multiple-system operator account password.
186 usenetrc: Use netrc for authentication instead.
187 verbose: Print additional info to stdout.
188 quiet: Do not print messages to stdout.
189 no_warnings: Do not print out anything for warnings.
190 forceprint: A dict with keys WHEN mapped to a list of templates to
191 print to stdout. The allowed keys are video or any of the
192 items in utils.POSTPROCESS_WHEN.
193 For compatibility, a single list is also accepted
194 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
195 a list of tuples with (template, filename)
196 forcejson: Force printing info_dict as JSON.
197 dump_single_json: Force printing the info_dict of the whole playlist
198 (or video) as a single JSON line.
199 force_write_download_archive: Force writing download archive regardless
200 of 'skip_download' or 'simulate'.
201 simulate: Do not download the video files. If unset (or None),
202 simulate only if listsubtitles, listformats or list_thumbnails is used
203 format: Video format code. see "FORMAT SELECTION" for more details.
204 You can also pass a function. The function takes 'ctx' as
205 argument and returns the formats to download.
206 See "build_format_selector" for an implementation
207 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
208 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
209 extracting metadata even if the video is not actually
210 available for download (experimental)
211 format_sort: A list of fields by which to sort the video formats.
212 See "Sorting Formats" for more details.
213 format_sort_force: Force the given format_sort. see "Sorting Formats"
215 prefer_free_formats: Whether to prefer video formats with free containers
216 over non-free ones of same quality.
217 allow_multiple_video_streams: Allow multiple video streams to be merged
219 allow_multiple_audio_streams: Allow multiple audio streams to be merged
221 check_formats Whether to test if the formats are downloadable.
222 Can be True (check all), False (check none),
223 'selected' (check selected formats),
224 or None (check only if requested by extractor)
225 paths: Dictionary of output paths. The allowed keys are 'home'
226 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
227 outtmpl: Dictionary of templates for output names. Allowed keys
228 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
229 For compatibility with youtube-dl, a single string can also be used
230 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
231 restrictfilenames: Do not allow "&" and spaces in file names
232 trim_file_name: Limit length of filename (extension excluded)
233 windowsfilenames: Force the filenames to be windows compatible
234 ignoreerrors: Do not stop on download/postprocessing errors.
235 Can be 'only_download' to ignore only download errors.
236 Default is 'only_download' for CLI, but False for API
237 skip_playlist_after_errors: Number of allowed failures until the rest of
238 the playlist is skipped
239 force_generic_extractor: Force downloader to use the generic extractor
240 overwrites: Overwrite all video and metadata files if True,
241 overwrite only non-video files if None
242 and don't overwrite any file if False
243 For compatibility with youtube-dl,
244 "nooverwrites" may also be used instead
245 playlist_items: Specific indices of playlist to download.
246 playlistrandom: Download playlist items in random order.
247 lazy_playlist: Process playlist entries as they are received.
248 matchtitle: Download only matching titles.
249 rejecttitle: Reject downloads for matching titles.
250 logger: Log messages to a logging.Logger instance.
251 logtostderr: Log messages to stderr instead of stdout.
252 consoletitle: Display progress in console window's titlebar.
253 writedescription: Write the video description to a .description file
254 writeinfojson: Write the video description to a .info.json file
255 clean_infojson: Remove private fields from the infojson
256 getcomments: Extract video comments. This will not be written to disk
257 unless writeinfojson is also given
258 writeannotations: Write the video annotations to a .annotations.xml file
259 writethumbnail: Write the thumbnail image to a file
260 allow_playlist_files: Whether to write playlists' description, infojson etc
261 also to disk when using the 'write*' options
262 write_all_thumbnails: Write all thumbnail formats to files
263 writelink: Write an internet shortcut file, depending on the
264 current platform (.url/.webloc/.desktop)
265 writeurllink: Write a Windows internet shortcut file (.url)
266 writewebloclink: Write a macOS internet shortcut file (.webloc)
267 writedesktoplink: Write a Linux internet shortcut file (.desktop)
268 writesubtitles: Write the video subtitles to a file
269 writeautomaticsub: Write the automatically generated subtitles to a file
270 listsubtitles: Lists all available subtitles for the video
271 subtitlesformat: The format code for subtitles
272 subtitleslangs: List of languages of the subtitles to download (can be regex).
273 The list may contain "all" to refer to all the available
274 subtitles. The language can be prefixed with a "-" to
275 exclude it from the requested languages. Eg: ['all', '-live_chat']
276 keepvideo: Keep the video file after post-processing
277 daterange: A DateRange object, download only if the upload_date is in the range.
278 skip_download: Skip the actual download of the video file
279 cachedir: Location of the cache files in the filesystem.
280 False to disable filesystem cache.
281 noplaylist: Download single video instead of a playlist if in doubt.
282 age_limit: An integer representing the user's age in years.
283 Unsuitable videos for the given age are skipped.
284 min_views: An integer representing the minimum view count the video
285 must have in order to not be skipped.
286 Videos without view count information are always
287 downloaded. None for no limit.
288 max_views: An integer representing the maximum view count.
289 Videos that are more popular than that are not
291 Videos without view count information are always
292 downloaded. None for no limit.
293 download_archive: File name of a file where all downloads are recorded.
294 Videos already present in the file are not downloaded
296 break_on_existing: Stop the download process after attempting to download a
297 file that is in the archive.
298 break_on_reject: Stop the download process when encountering a video that
299 has been filtered out.
300 break_per_url: Whether break_on_reject and break_on_existing
301 should act on each input URL as opposed to for the entire queue
302 cookiefile: File name or text stream from where cookies should be read and dumped to
303 cookiesfrombrowser: A tuple containing the name of the browser, the profile
304 name/pathfrom where cookies are loaded, and the name of the
305 keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
306 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
307 support RFC 5746 secure renegotiation
308 nocheckcertificate: Do not verify SSL certificates
309 client_certificate: Path to client certificate file in PEM format. May include the private key
310 client_certificate_key: Path to private key file for client certificate
311 client_certificate_password: Password for client certificate private key, if encrypted.
312 If not provided and the key is encrypted, yt-dlp will ask interactively
313 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
314 (Only supported by some extractors)
315 http_headers: A dictionary of custom headers to be used for all requests
316 proxy: URL of the proxy server to use
317 geo_verification_proxy: URL of the proxy to use for IP address verification
318 on geo-restricted sites.
319 socket_timeout: Time to wait for unresponsive hosts, in seconds
320 bidi_workaround: Work around buggy terminals without bidirectional text
321 support, using fridibi
322 debug_printtraffic:Print out sent and received HTTP traffic
323 default_search: Prepend this string if an input url is not valid.
324 'auto' for elaborate guessing
325 encoding: Use this encoding instead of the system-specified.
326 extract_flat: Whether to resolve and process url_results further
327 * False: Always process (default)
328 * True: Never process
329 * 'in_playlist': Do not process inside playlist/multi_video
330 * 'discard': Always process, but don't return the result
331 from inside playlist/multi_video
332 * 'discard_in_playlist': Same as "discard", but only for
333 playlists (not multi_video)
334 wait_for_video: If given, wait for scheduled streams to become available.
335 The value should be a tuple containing the range
336 (min_secs, max_secs) to wait between retries
337 postprocessors: A list of dictionaries, each with an entry
338 * key: The name of the postprocessor. See
339 yt_dlp/postprocessor/__init__.py for a list.
340 * when: When to run the postprocessor. Allowed values are
341 the entries of utils.POSTPROCESS_WHEN
342 Assumed to be 'post_process' if not given
343 progress_hooks: A list of functions that get called on download
344 progress, with a dictionary with the entries
345 * status: One of "downloading", "error", or "finished".
346 Check this first and ignore unknown values.
347 * info_dict: The extracted info_dict
349 If status is one of "downloading", or "finished", the
350 following properties may also be present:
351 * filename: The final filename (always present)
352 * tmpfilename: The filename we're currently writing to
353 * downloaded_bytes: Bytes on disk
354 * total_bytes: Size of the whole file, None if unknown
355 * total_bytes_estimate: Guess of the eventual file size,
357 * elapsed: The number of seconds since download started.
358 * eta: The estimated time in seconds, None if unknown
359 * speed: The download speed in bytes/second, None if
361 * fragment_index: The counter of the currently
362 downloaded video fragment.
363 * fragment_count: The number of fragments (= individual
364 files that will be merged)
366 Progress hooks are guaranteed to be called at least once
367 (with status "finished") if the download is successful.
368 postprocessor_hooks: A list of functions that get called on postprocessing
369 progress, with a dictionary with the entries
370 * status: One of "started", "processing", or "finished".
371 Check this first and ignore unknown values.
372 * postprocessor: Name of the postprocessor
373 * info_dict: The extracted info_dict
375 Progress hooks are guaranteed to be called at least twice
376 (with status "started" and "finished") if the processing is successful.
377 merge_output_format: "/" separated list of extensions to use when merging formats.
378 final_ext: Expected final extension; used to detect when the file was
379 already downloaded and converted
380 fixup: Automatically correct known faults of the file.
382 - "never": do nothing
383 - "warn": only emit a warning
384 - "detect_or_warn": check whether we can do anything
385 about it, warn otherwise (default)
386 source_address: Client-side IP address to bind to.
387 sleep_interval_requests: Number of seconds to sleep between requests
389 sleep_interval: Number of seconds to sleep before each download when
390 used alone or a lower bound of a range for randomized
391 sleep before each download (minimum possible number
392 of seconds to sleep) when used along with
394 max_sleep_interval:Upper bound of a range for randomized sleep before each
395 download (maximum possible number of seconds to sleep).
396 Must only be used along with sleep_interval.
397 Actual sleep time will be a random float from range
398 [sleep_interval; max_sleep_interval].
399 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
400 listformats: Print an overview of available video formats and exit.
401 list_thumbnails: Print a table of all thumbnails and exit.
402 match_filter: A function that gets called for every video with the signature
403 (info_dict, *, incomplete: bool) -> Optional[str]
404 For backward compatibility with youtube-dl, the signature
405 (info_dict) -> Optional[str] is also allowed.
406 - If it returns a message, the video is ignored.
407 - If it returns None, the video is downloaded.
408 - If it returns utils.NO_DEFAULT, the user is interactively
409 asked whether to download the video.
410 match_filter_func in utils.py is one example for this.
411 no_color: Do not emit color codes in output.
412 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
415 Two-letter ISO 3166-2 country code that will be used for
416 explicit geographic restriction bypassing via faking
417 X-Forwarded-For HTTP header
419 IP range in CIDR notation that will be used similarly to
421 external_downloader: A dictionary of protocol keys and the executable of the
422 external downloader to use for it. The allowed protocols
423 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
424 Set the value to 'native' to use the native downloader
425 compat_opts: Compatibility options. See "Differences in default behavior".
426 The following options do not work when used through the API:
427 filename, abort-on-error, multistreams, no-live-chat, format-sort
428 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
429 Refer __init__.py for their implementation
430 progress_template: Dictionary of templates for progress outputs.
431 Allowed keys are 'download', 'postprocess',
432 'download-title' (console title) and 'postprocess-title'.
433 The template is mapped on a dictionary with keys 'progress' and 'info'
434 retry_sleep_functions: Dictionary of functions that takes the number of attempts
435 as argument and returns the time to sleep in seconds.
436 Allowed keys are 'http', 'fragment', 'file_access'
437 download_ranges: A callback function that gets called for every video with
438 the signature (info_dict, ydl) -> Iterable[Section].
439 Only the returned sections will be downloaded.
440 Each Section is a dict with the following keys:
441 * start_time: Start time of the section in seconds
442 * end_time: End time of the section in seconds
443 * title: Section title (Optional)
444 * index: Section number (Optional)
445 force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts
446 noprogress: Do not print the progress bar
448 The following parameters are not used by YoutubeDL itself, they are used by
449 the downloader (see yt_dlp/downloader/common.py):
450 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
451 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
452 continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
453 external_downloader_args, concurrent_fragment_downloads.
455 The following options are used by the post processors:
456 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
457 to the binary or its containing directory.
458 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
459 and a list of additional command-line arguments for the
460 postprocessor/executable. The dict can also have "PP+EXE" keys
461 which are used when the given exe is used by the given PP.
462 Use 'default' as the name for arguments to passed to all PP
463 For compatibility with youtube-dl, a single list of args
466 The following options are used by the extractors:
467 extractor_retries: Number of times to retry for known errors
468 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
469 hls_split_discontinuity: Split HLS playlists to different formats at
470 discontinuities such as ad breaks (default: False)
471 extractor_args: A dictionary of arguments to be passed to the extractors.
472 See "EXTRACTOR ARGUMENTS" for details.
473 Eg: {'youtube': {'skip': ['dash', 'hls']}}
474 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
476 The following options are deprecated and may be removed in the future:
478 playliststart: - Use playlist_items
479 Playlist item to start at.
480 playlistend: - Use playlist_items
481 Playlist item to end at.
482 playlistreverse: - Use playlist_items
483 Download playlist items in reverse order.
484 forceurl: - Use forceprint
485 Force printing final URL.
486 forcetitle: - Use forceprint
487 Force printing title.
488 forceid: - Use forceprint
490 forcethumbnail: - Use forceprint
491 Force printing thumbnail URL.
492 forcedescription: - Use forceprint
493 Force printing description.
494 forcefilename: - Use forceprint
495 Force printing final filename.
496 forceduration: - Use forceprint
497 Force printing duration.
498 allsubtitles: - Use subtitleslangs = ['all']
499 Downloads all the subtitles of the video
500 (requires writesubtitles or writeautomaticsub)
501 include_ads: - Doesn't work
503 call_home: - Not implemented
504 Boolean, true iff we are allowed to contact the
505 yt-dlp servers for debugging.
506 post_hooks: - Register a custom postprocessor
507 A list of functions that get called as the final step
508 for each video file, after all postprocessors have been
509 called. The filename will be passed as the only argument.
510 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
511 Use the native HLS downloader instead of ffmpeg/avconv
512 if True, otherwise use ffmpeg/avconv if False, otherwise
513 use downloader suggested by extractor if None.
514 prefer_ffmpeg: - avconv support is deprecated
515 If False, use avconv instead of ffmpeg if both are available,
516 otherwise prefer ffmpeg.
517 youtube_include_dash_manifest: - Use extractor_args
518 If True (default), DASH manifests and related
519 data will be downloaded and processed by extractor.
520 You can reduce network I/O by disabling it if you don't
521 care about DASH. (only for youtube)
522 youtube_include_hls_manifest: - Use extractor_args
523 If True (default), HLS manifests and related
524 data will be downloaded and processed by extractor.
525 You can reduce network I/O by disabling it if you don't
526 care about HLS. (only for youtube)
530 'width', 'height', 'asr', 'audio_channels', 'fps',
531 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
532 'timestamp', 'release_timestamp',
533 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
534 'average_rating', 'comment_count', 'age_limit',
535 'start_time', 'end_time',
536 'chapter_number', 'season_number', 'episode_number',
537 'track_number', 'disc_number', 'release_year',
541 # NB: Keep in sync with the docstring of extractor/common.py
542 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
543 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
544 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
545 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
546 'preference', 'language', 'language_preference', 'quality', 'source_preference',
547 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
548 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
550 _format_selection_exts
= {
551 'audio': set(MEDIA_EXTENSIONS
.common_audio
),
552 'video': set(MEDIA_EXTENSIONS
.common_video
+ ('3gp', )),
553 'storyboards': set(MEDIA_EXTENSIONS
.storyboards
),
556 def __init__(self
, params
=None, auto_init
=True):
557 """Create a FileDownloader object with the given options.
558 @param auto_init Whether to load the default extractors and print header (if verbose).
559 Set to 'no_verbose_header' to not print the header
565 self
._ies
_instances
= {}
566 self
._pps
= {k: [] for k in POSTPROCESS_WHEN}
567 self
._printed
_messages
= set()
568 self
._first
_webpage
_request
= True
569 self
._post
_hooks
= []
570 self
._progress
_hooks
= []
571 self
._postprocessor
_hooks
= []
572 self
._download
_retcode
= 0
573 self
._num
_downloads
= 0
575 self
._playlist
_level
= 0
576 self
._playlist
_urls
= set()
577 self
.cache
= Cache(self
)
579 windows_enable_vt_mode()
580 stdout
= sys
.stderr
if self
.params
.get('logtostderr') else sys
.stdout
581 self
._out
_files
= Namespace(
584 screen
=sys
.stderr
if self
.params
.get('quiet') else stdout
,
585 console
=None if compat_os_name
== 'nt' else next(
586 filter(supports_terminal_sequences
, (sys
.stderr
, sys
.stdout
)), None)
588 self
._allow
_colors
= Namespace(**{
589 type_
: not self
.params
.get('no_color') and supports_terminal_sequences(stream
)
590 for type_
, stream
in self
._out
_files
.items_
if type_
!= 'console'
593 # The code is left like this to be reused for future deprecations
594 MIN_SUPPORTED
, MIN_RECOMMENDED
= (3, 7), (3, 7)
595 current_version
= sys
.version_info
[:2]
596 if current_version
< MIN_RECOMMENDED
:
597 msg
= ('Support for Python version %d.%d has been deprecated. '
598 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
599 '\n You will no longer receive updates on this version')
600 if current_version
< MIN_SUPPORTED
:
601 msg
= 'Python version %d.%d is no longer supported'
602 self
.deprecation_warning(
603 f
'{msg}! Please update to Python %d.%d or above' % (*current_version
, *MIN_RECOMMENDED
))
605 if self
.params
.get('allow_unplayable_formats'):
607 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
608 'This is a developer option intended for debugging. \n'
609 ' If you experience any issues while using this option, '
610 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
612 def check_deprecated(param
, option
, suggestion
):
613 if self
.params
.get(param
) is not None:
614 self
.report_warning(f
'{option} is deprecated. Use {suggestion} instead')
618 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
619 if self
.params
.get('geo_verification_proxy') is None:
620 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
622 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
623 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
624 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
626 for msg
in self
.params
.get('_warnings', []):
627 self
.report_warning(msg
)
628 for msg
in self
.params
.get('_deprecation_warnings', []):
629 self
.deprecation_warning(msg
)
631 self
.params
['compat_opts'] = set(self
.params
.get('compat_opts', ()))
632 if 'list-formats' in self
.params
['compat_opts']:
633 self
.params
['listformats_table'] = False
635 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
636 # nooverwrites was unnecessarily changed to overwrites
637 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
638 # This ensures compatibility with both keys
639 self
.params
['overwrites'] = not self
.params
['nooverwrites']
640 elif self
.params
.get('overwrites') is None:
641 self
.params
.pop('overwrites', None)
643 self
.params
['nooverwrites'] = not self
.params
['overwrites']
645 self
.params
.setdefault('forceprint', {})
646 self
.params
.setdefault('print_to_file', {})
648 # Compatibility with older syntax
649 if not isinstance(params
['forceprint'], dict):
650 self
.params
['forceprint'] = {'video': params['forceprint']}
652 if self
.params
.get('bidi_workaround', False):
655 master
, slave
= pty
.openpty()
656 width
= shutil
.get_terminal_size().columns
657 width_args
= [] if width
is None else ['-w', str(width
)]
658 sp_kwargs
= {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
660 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
662 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
663 self
._output
_channel
= os
.fdopen(master
, 'rb')
664 except OSError as ose
:
665 if ose
.errno
== errno
.ENOENT
:
667 'Could not find fribidi executable, ignoring --bidi-workaround. '
668 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
673 if auto_init
!= 'no_verbose_header':
674 self
.print_debug_header()
675 self
.add_default_info_extractors()
677 if (sys
.platform
!= 'win32'
678 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
679 and not self
.params
.get('restrictfilenames', False)):
680 # Unicode filesystem API will throw errors (#1474, #13027)
682 'Assuming --restrict-filenames since file system encoding '
683 'cannot encode all characters. '
684 'Set the LC_ALL environment variable to fix this.')
685 self
.params
['restrictfilenames'] = True
687 self
._parse
_outtmpl
()
689 # Creating format selector here allows us to catch syntax errors before the extraction
690 self
.format_selector
= (
691 self
.params
.get('format') if self
.params
.get('format') in (None, '-')
692 else self
.params
['format'] if callable(self
.params
['format'])
693 else self
.build_format_selector(self
.params
['format']))
695 # Set http_headers defaults according to std_headers
696 self
.params
['http_headers'] = merge_headers(std_headers
, self
.params
.get('http_headers', {}))
699 'post_hooks': self
.add_post_hook
,
700 'progress_hooks': self
.add_progress_hook
,
701 'postprocessor_hooks': self
.add_postprocessor_hook
,
703 for opt
, fn
in hooks
.items():
704 for ph
in self
.params
.get(opt
, []):
707 for pp_def_raw
in self
.params
.get('postprocessors', []):
708 pp_def
= dict(pp_def_raw
)
709 when
= pp_def
.pop('when', 'post_process')
710 self
.add_post_processor(
711 get_postprocessor(pp_def
.pop('key'))(self
, **pp_def
),
715 register_socks_protocols()
717 def preload_download_archive(fn
):
718 """Preload the archive, if any is specified"""
721 self
.write_debug(f
'Loading archive file {fn!r}')
723 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
724 for line
in archive_file
:
725 self
.archive
.add(line
.strip())
726 except OSError as ioe
:
727 if ioe
.errno
!= errno
.ENOENT
:
733 preload_download_archive(self
.params
.get('download_archive'))
735 def warn_if_short_id(self
, argv
):
736 # short YouTube ID starting with dash?
738 i
for i
, a
in enumerate(argv
)
739 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
743 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
744 + ['--'] + [argv
[i
] for i
in idxs
]
747 'Long argument string detected. '
748 'Use -- to separate parameters and URLs, like this:\n%s' %
749 args_to_str(correct_argv
))
751 def add_info_extractor(self
, ie
):
752 """Add an InfoExtractor object to the end of the list."""
754 self
._ies
[ie_key
] = ie
755 if not isinstance(ie
, type):
756 self
._ies
_instances
[ie_key
] = ie
757 ie
.set_downloader(self
)
759 def _get_info_extractor_class(self
, ie_key
):
760 ie
= self
._ies
.get(ie_key
)
762 ie
= get_info_extractor(ie_key
)
763 self
.add_info_extractor(ie
)
766 def get_info_extractor(self
, ie_key
):
768 Get an instance of an IE with name ie_key, it will try to get one from
769 the _ies list, if there's no instance it will create a new one and add
770 it to the extractor list.
772 ie
= self
._ies
_instances
.get(ie_key
)
774 ie
= get_info_extractor(ie_key
)()
775 self
.add_info_extractor(ie
)
778 def add_default_info_extractors(self
):
780 Add the InfoExtractors returned by gen_extractors to the end of the list
782 for ie
in gen_extractor_classes():
783 self
.add_info_extractor(ie
)
785 def add_post_processor(self
, pp
, when
='post_process'):
786 """Add a PostProcessor object to the end of the chain."""
787 assert when
in POSTPROCESS_WHEN
, f
'Invalid when={when}'
788 self
._pps
[when
].append(pp
)
789 pp
.set_downloader(self
)
791 def add_post_hook(self
, ph
):
792 """Add the post hook"""
793 self
._post
_hooks
.append(ph
)
795 def add_progress_hook(self
, ph
):
796 """Add the download progress hook"""
797 self
._progress
_hooks
.append(ph
)
799 def add_postprocessor_hook(self
, ph
):
800 """Add the postprocessing progress hook"""
801 self
._postprocessor
_hooks
.append(ph
)
802 for pps
in self
._pps
.values():
804 pp
.add_progress_hook(ph
)
806 def _bidi_workaround(self
, message
):
807 if not hasattr(self
, '_output_channel'):
810 assert hasattr(self
, '_output_process')
811 assert isinstance(message
, str)
812 line_count
= message
.count('\n') + 1
813 self
._output
_process
.stdin
.write((message
+ '\n').encode())
814 self
._output
_process
.stdin
.flush()
815 res
= ''.join(self
._output
_channel
.readline().decode()
816 for _
in range(line_count
))
817 return res
[:-len('\n')]
819 def _write_string(self
, message
, out
=None, only_once
=False):
821 if message
in self
._printed
_messages
:
823 self
._printed
_messages
.add(message
)
824 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
826 def to_stdout(self
, message
, skip_eol
=False, quiet
=None):
827 """Print message to stdout"""
828 if quiet
is not None:
829 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
830 if skip_eol
is not False:
831 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
832 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.out
)
834 def to_screen(self
, message
, skip_eol
=False, quiet
=None):
835 """Print message to screen if not in quiet mode"""
836 if self
.params
.get('logger'):
837 self
.params
['logger'].debug(message
)
839 if (self
.params
.get('quiet') if quiet
is None else quiet
) and not self
.params
.get('verbose'):
842 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
843 self
._out
_files
.screen
)
845 def to_stderr(self
, message
, only_once
=False):
846 """Print message to stderr"""
847 assert isinstance(message
, str)
848 if self
.params
.get('logger'):
849 self
.params
['logger'].error(message
)
851 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.error
, only_once
=only_once
)
853 def _send_console_code(self
, code
):
854 if compat_os_name
== 'nt' or not self
._out
_files
.console
:
856 self
._write
_string
(code
, self
._out
_files
.console
)
858 def to_console_title(self
, message
):
859 if not self
.params
.get('consoletitle', False):
861 message
= remove_terminal_sequences(message
)
862 if compat_os_name
== 'nt':
863 if ctypes
.windll
.kernel32
.GetConsoleWindow():
864 # c_wchar_p() might not be necessary if `message` is
865 # already of type unicode()
866 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
868 self
._send
_console
_code
(f
'\033]0;{message}\007')
870 def save_console_title(self
):
871 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
873 self
._send
_console
_code
('\033[22;0t') # Save the title on stack
875 def restore_console_title(self
):
876 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
878 self
._send
_console
_code
('\033[23;0t') # Restore the title from stack
881 self
.save_console_title()
884 def __exit__(self
, *args
):
885 self
.restore_console_title()
887 if self
.params
.get('cookiefile') is not None:
888 self
.cookiejar
.save(ignore_discard
=True, ignore_expires
=True)
890 def trouble(self
, message
=None, tb
=None, is_error
=True):
891 """Determine action to take when a download problem appears.
893 Depending on if the downloader has been configured to ignore
894 download errors or not, this method may throw an exception or
895 not when errors are found, after printing the message.
897 @param tb If given, is additional traceback information
898 @param is_error Whether to raise error according to ignorerrors
900 if message
is not None:
901 self
.to_stderr(message
)
902 if self
.params
.get('verbose'):
904 if sys
.exc_info()[0]: # if .trouble has been called from an except block
906 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
907 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
908 tb
+= encode_compat_str(traceback
.format_exc())
910 tb_data
= traceback
.format_list(traceback
.extract_stack())
911 tb
= ''.join(tb_data
)
916 if not self
.params
.get('ignoreerrors'):
917 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
918 exc_info
= sys
.exc_info()[1].exc_info
920 exc_info
= sys
.exc_info()
921 raise DownloadError(message
, exc_info
)
922 self
._download
_retcode
= 1
926 EMPHASIS
='light blue',
932 SUPPRESS
='light black',
935 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
939 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
940 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', None) or 'ascii'
941 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
942 if fallback
is not None and text
!= original_text
:
944 return format_text(text
, f
) if allow_colors
else text
if fallback
is None else fallback
946 def _format_out(self
, *args
, **kwargs
):
947 return self
._format
_text
(self
._out
_files
.out
, self
._allow
_colors
.out
, *args
, **kwargs
)
949 def _format_screen(self
, *args
, **kwargs
):
950 return self
._format
_text
(self
._out
_files
.screen
, self
._allow
_colors
.screen
, *args
, **kwargs
)
952 def _format_err(self
, *args
, **kwargs
):
953 return self
._format
_text
(self
._out
_files
.error
, self
._allow
_colors
.error
, *args
, **kwargs
)
955 def report_warning(self
, message
, only_once
=False):
957 Print the message to stderr, it will be prefixed with 'WARNING:'
958 If stderr is a tty file the 'WARNING:' will be colored
960 if self
.params
.get('logger') is not None:
961 self
.params
['logger'].warning(message
)
963 if self
.params
.get('no_warnings'):
965 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
967 def deprecation_warning(self
, message
):
968 if self
.params
.get('logger') is not None:
969 self
.params
['logger'].warning(f
'DeprecationWarning: {message}')
971 self
.to_stderr(f
'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
973 def report_error(self
, message
, *args
, **kwargs
):
975 Do the same as trouble, but prefixes the message with 'ERROR:', colored
976 in red if stderr is a tty file.
978 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args
, **kwargs
)
980 def write_debug(self
, message
, only_once
=False):
981 '''Log debug message or Print message to stderr'''
982 if not self
.params
.get('verbose', False):
984 message
= f
'[debug] {message}'
985 if self
.params
.get('logger'):
986 self
.params
['logger'].debug(message
)
988 self
.to_stderr(message
, only_once
)
990 def report_file_already_downloaded(self
, file_name
):
991 """Report file has already been fully downloaded."""
993 self
.to_screen('[download] %s has already been downloaded' % file_name
)
994 except UnicodeEncodeError:
995 self
.to_screen('[download] The file has already been downloaded')
997 def report_file_delete(self
, file_name
):
998 """Report that existing file will be deleted."""
1000 self
.to_screen('Deleting existing file %s' % file_name
)
1001 except UnicodeEncodeError:
1002 self
.to_screen('Deleting existing file')
1004 def raise_no_formats(self
, info
, forced
=False, *, msg
=None):
1005 has_drm
= info
.get('_has_drm')
1006 ignored
, expected
= self
.params
.get('ignore_no_formats_error'), bool(msg
)
1007 msg
= msg
or has_drm
and 'This video is DRM protected' or 'No video formats found!'
1008 if forced
or not ignored
:
1009 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
1010 expected
=has_drm
or ignored
or expected
)
1012 self
.report_warning(msg
)
1014 def parse_outtmpl(self
):
1015 self
.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
1016 self
._parse
_outtmpl
()
1017 return self
.params
['outtmpl']
1019 def _parse_outtmpl(self
):
1021 if self
.params
.get('restrictfilenames'): # Remove spaces in the default template
1022 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
1024 outtmpl
= self
.params
.setdefault('outtmpl', {})
1025 if not isinstance(outtmpl
, dict):
1026 self
.params
['outtmpl'] = outtmpl
= {'default': outtmpl}
1027 outtmpl
.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None}
)
1029 def get_output_path(self
, dir_type
='', filename
=None):
1030 paths
= self
.params
.get('paths', {})
1031 assert isinstance(paths
, dict)
1032 path
= os
.path
.join(
1033 expand_path(paths
.get('home', '').strip()),
1034 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
1036 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
1039 def _outtmpl_expandpath(outtmpl
):
1040 # expand_path translates '%%' into '%' and '$$' into '$'
1041 # correspondingly that is not what we want since we need to keep
1042 # '%%' intact for template dict substitution step. Working around
1043 # with boundary-alike separator hack.
1044 sep
= ''.join([random
.choice(ascii_letters
) for _
in range(32)])
1045 outtmpl
= outtmpl
.replace('%%', f
'%{sep}%').replace('$$', f
'${sep}$')
1047 # outtmpl should be expand_path'ed before template dict substitution
1048 # because meta fields may contain env variables we don't want to
1049 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
1050 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1051 return expand_path(outtmpl
).replace(sep
, '')
1054 def escape_outtmpl(outtmpl
):
1055 ''' Escape any remaining strings like %s, %abc% etc. '''
1057 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
1058 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
1062 def validate_outtmpl(cls
, outtmpl
):
1063 ''' @return None or Exception object '''
1065 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljhqBUDS]'),
1066 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1067 cls
._outtmpl
_expandpath
(outtmpl
))
1069 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1071 except ValueError as err
:
1075 def _copy_infodict(info_dict
):
1076 info_dict
= dict(info_dict
)
1077 info_dict
.pop('__postprocessors', None)
1078 info_dict
.pop('__pending_error', None)
1081 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=False):
1082 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1083 @param sanitize Whether to sanitize the output as a filename.
1084 For backward compatibility, a function can also be passed
1087 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1089 info_dict
= self
._copy
_infodict
(info_dict
)
1090 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1091 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1092 if info_dict
.get('duration', None) is not None
1094 info_dict
['autonumber'] = int(self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
)
1095 info_dict
['video_autonumber'] = self
._num
_videos
1096 if info_dict
.get('resolution') is None:
1097 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1099 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1100 # of %(field)s to %(field)0Nd for backward compatibility
1101 field_size_compat_map
= {
1102 'playlist_index': number_of_digits(info_dict
.get('__last_playlist_index') or 0),
1103 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1104 'autonumber': self
.params
.get('autonumber_size') or 5,
1108 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljhqBUDS]'))
1113 # Field is of the form key1.key2...
1114 # where keys (except first) can be string, int or slice
1115 FIELD_RE
= r
'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num
=r
'(?:-?\d+)')
1116 MATH_FIELD_RE
= rf
'(?:{FIELD_RE}|-?{NUMBER_RE})'
1117 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1118 INTERNAL_FORMAT_RE
= re
.compile(rf
'''(?x)
1120 (?P<fields>{FIELD_RE})
1121 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1122 (?:>(?P<strf_format>.+?))?
1124 (?P<alternate>(?<!\\),[^|&)]+)?
1125 (?:&(?P<replacement>.*?))?
1126 (?:\|(?P<default>.*?))?
1129 def _traverse_infodict(k
):
1133 return traverse_obj(info_dict
, k
, is_user_input
=True, traverse_string
=True)
1135 def get_value(mdict
):
1137 value
= _traverse_infodict(mdict
['fields'])
1140 value
= float_or_none(value
)
1141 if value
is not None:
1144 offset_key
= mdict
['maths']
1146 value
= float_or_none(value
)
1150 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1151 offset_key
).group(0)
1152 offset_key
= offset_key
[len(item
):]
1153 if operator
is None:
1154 operator
= MATH_FUNCTIONS
[item
]
1156 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1157 offset
= float_or_none(item
)
1159 offset
= float_or_none(_traverse_infodict(item
))
1161 value
= operator(value
, multiplier
* offset
)
1162 except (TypeError, ZeroDivisionError):
1165 # Datetime formatting
1166 if mdict
['strf_format']:
1167 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1169 # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
1170 if sanitize
and value
== '':
1174 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1176 def filename_sanitizer(key
, value
, restricted
=self
.params
.get('restrictfilenames')):
1177 return sanitize_filename(str(value
), restricted
=restricted
, is_id
=(
1178 bool(re
.search(r
'(^|[_.])id(\.|$)', key
))
1179 if 'filename-sanitization' in self
.params
['compat_opts']
1182 sanitizer
= sanitize
if callable(sanitize
) else filename_sanitizer
1183 sanitize
= bool(sanitize
)
1185 def _dumpjson_default(obj
):
1186 if isinstance(obj
, (set, LazyList
)):
1190 def create_key(outer_mobj
):
1191 if not outer_mobj
.group('has_key'):
1192 return outer_mobj
.group(0)
1193 key
= outer_mobj
.group('key')
1194 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1195 initial_field
= mobj
.group('fields') if mobj
else ''
1196 value
, replacement
, default
= None, None, na
1198 mobj
= mobj
.groupdict()
1199 default
= mobj
['default'] if mobj
['default'] is not None else default
1200 value
= get_value(mobj
)
1201 replacement
= mobj
['replacement']
1202 if value
is None and mobj
['alternate']:
1203 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['remaining'][1:])
1207 fmt
= outer_mobj
.group('format')
1208 if fmt
== 's' and value
is not None and key
in field_size_compat_map
.keys():
1209 fmt
= f
'0{field_size_compat_map[key]:d}d'
1211 value
= default
if value
is None else value
if replacement
is None else replacement
1213 flags
= outer_mobj
.group('conversion') or ''
1214 str_fmt
= f
'{fmt[:-1]}s'
1215 if fmt
[-1] == 'l': # list
1216 delim
= '\n' if '#' in flags
else ', '
1217 value
, fmt
= delim
.join(map(str, variadic(value
, allowed_types
=(str, bytes)))), str_fmt
1218 elif fmt
[-1] == 'j': # json
1219 value
, fmt
= json
.dumps(value
, default
=_dumpjson_default
, indent
=4 if '#' in flags
else None), str_fmt
1220 elif fmt
[-1] == 'h': # html
1221 value
, fmt
= escapeHTML(value
), str_fmt
1222 elif fmt
[-1] == 'q': # quoted
1223 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1224 value
, fmt
= ' '.join(map(compat_shlex_quote
, value
)), str_fmt
1225 elif fmt
[-1] == 'B': # bytes
1226 value
= f
'%{str_fmt}'.encode() % str(value
).encode()
1227 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1228 elif fmt
[-1] == 'U': # unicode normalized
1229 value
, fmt
= unicodedata
.normalize(
1230 # "+" = compatibility equivalence, "#" = NFD
1231 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1233 elif fmt
[-1] == 'D': # decimal suffix
1234 num_fmt
, fmt
= fmt
[:-1].replace('#', ''), 's'
1235 value
= format_decimal_suffix(value
, f
'%{num_fmt}f%s' if num_fmt
else '%d%s',
1236 factor
=1024 if '#' in flags
else 1000)
1237 elif fmt
[-1] == 'S': # filename sanitization
1238 value
, fmt
= filename_sanitizer(initial_field
, value
, restricted
='#' in flags
), str_fmt
1239 elif fmt
[-1] == 'c':
1241 value
= str(value
)[0]
1244 elif fmt
[-1] not in 'rs': # numeric
1245 value
= float_or_none(value
)
1247 value
, fmt
= default
, 's'
1251 # If value is an object, sanitize might convert it to a string
1252 # So we convert it to repr first
1253 value
, fmt
= repr(value
), str_fmt
1254 if fmt
[-1] in 'csr':
1255 value
= sanitizer(initial_field
, value
)
1257 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1258 TMPL_DICT
[key
] = value
1259 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1261 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1263 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1264 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1265 return self
.escape_outtmpl(outtmpl
) % info_dict
1267 def _prepare_filename(self
, info_dict
, *, outtmpl
=None, tmpl_type
=None):
1268 assert None in (outtmpl
, tmpl_type
), 'outtmpl and tmpl_type are mutually exclusive'
1270 outtmpl
= self
.params
['outtmpl'].get(tmpl_type
or 'default', self
.params
['outtmpl']['default'])
1272 outtmpl
= self
._outtmpl
_expandpath
(outtmpl
)
1273 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, True)
1277 if tmpl_type
in ('', 'temp'):
1278 final_ext
, ext
= self
.params
.get('final_ext'), info_dict
.get('ext')
1279 if final_ext
and ext
and final_ext
!= ext
and filename
.endswith(f
'.{final_ext}'):
1280 filename
= replace_extension(filename
, ext
, final_ext
)
1282 force_ext
= OUTTMPL_TYPES
[tmpl_type
]
1284 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1286 # https://github.com/blackjack4494/youtube-dlc/issues/85
1287 trim_file_name
= self
.params
.get('trim_file_name', False)
1289 no_ext
, *ext
= filename
.rsplit('.', 2)
1290 filename
= join_nonempty(no_ext
[:trim_file_name
], *ext
, delim
='.')
1293 except ValueError as err
:
1294 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1297 def prepare_filename(self
, info_dict
, dir_type
='', *, outtmpl
=None, warn
=False):
1298 """Generate the output filename"""
1300 assert not dir_type
, 'outtmpl and dir_type are mutually exclusive'
1302 filename
= self
._prepare
_filename
(info_dict
, tmpl_type
=dir_type
, outtmpl
=outtmpl
)
1303 if not filename
and dir_type
not in ('', 'temp'):
1307 if not self
.params
.get('paths'):
1309 elif filename
== '-':
1310 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1311 elif os
.path
.isabs(filename
):
1312 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1313 if filename
== '-' or not filename
:
1316 return self
.get_output_path(dir_type
, filename
)
1318 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1319 """ Returns None if the file should be downloaded """
1321 video_title
= info_dict
.get('title', info_dict
.get('id', 'entry'))
1324 if 'title' in info_dict
:
1325 # This can happen when we're just evaluating the playlist
1326 title
= info_dict
['title']
1327 matchtitle
= self
.params
.get('matchtitle', False)
1329 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1330 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1331 rejecttitle
= self
.params
.get('rejecttitle', False)
1333 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1334 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1335 date
= info_dict
.get('upload_date')
1336 if date
is not None:
1337 dateRange
= self
.params
.get('daterange', DateRange())
1338 if date
not in dateRange
:
1339 return f
'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1340 view_count
= info_dict
.get('view_count')
1341 if view_count
is not None:
1342 min_views
= self
.params
.get('min_views')
1343 if min_views
is not None and view_count
< min_views
:
1344 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1345 max_views
= self
.params
.get('max_views')
1346 if max_views
is not None and view_count
> max_views
:
1347 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1348 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1349 return 'Skipping "%s" because it is age restricted' % video_title
1351 match_filter
= self
.params
.get('match_filter')
1352 if match_filter
is not None:
1354 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1356 # For backward compatibility
1357 ret
= None if incomplete
else match_filter(info_dict
)
1358 if ret
is NO_DEFAULT
:
1360 filename
= self
._format
_screen
(self
.prepare_filename(info_dict
), self
.Styles
.FILENAME
)
1361 reply
= input(self
._format
_screen
(
1362 f
'Download "{filename}"? (Y/n): ', self
.Styles
.EMPHASIS
)).lower().strip()
1363 if reply
in {'y', ''}
:
1366 return f
'Skipping {video_title}'
1367 elif ret
is not None:
1371 if self
.in_download_archive(info_dict
):
1372 reason
= '%s has already been recorded in the archive' % video_title
1373 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1375 reason
= check_filter()
1376 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1377 if reason
is not None:
1379 self
.to_screen('[download] ' + reason
)
1380 if self
.params
.get(break_opt
, False):
1385 def add_extra_info(info_dict
, extra_info
):
1386 '''Set the keys from extra_info in info dict if they are missing'''
1387 for key
, value
in extra_info
.items():
1388 info_dict
.setdefault(key
, value
)
1390 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1391 process
=True, force_generic_extractor
=False):
1393 Return a list with a dictionary for each video extracted.
1396 url -- URL to extract
1399 download -- whether to download videos during extraction
1400 ie_key -- extractor key hint
1401 extra_info -- dictionary containing the extra values to add to each result
1402 process -- whether to resolve all unresolved references (URLs, playlist items),
1403 must be True for download to work.
1404 force_generic_extractor -- force using the generic extractor
1407 if extra_info
is None:
1410 if not ie_key
and force_generic_extractor
:
1414 ies
= {ie_key: self._get_info_extractor_class(ie_key)}
1418 for ie_key
, ie
in ies
.items():
1419 if not ie
.suitable(url
):
1422 if not ie
.working():
1423 self
.report_warning('The program functionality for this site has been marked as broken, '
1424 'and will probably not work.')
1426 temp_id
= ie
.get_temp_id(url
)
1427 if temp_id
is not None and self
.in_download_archive({'id': temp_id, 'ie_key': ie_key}
):
1428 self
.to_screen(f
'[{ie_key}] {temp_id}: has already been recorded in the archive')
1429 if self
.params
.get('break_on_existing', False):
1430 raise ExistingVideoReached()
1432 return self
.__extract
_info
(url
, self
.get_info_extractor(ie_key
), download
, extra_info
, process
)
1434 self
.report_error('no suitable InfoExtractor for URL %s' % url
)
1436 def _handle_extraction_exceptions(func
):
1437 @functools.wraps(func
)
1438 def wrapper(self
, *args
, **kwargs
):
1441 return func(self
, *args
, **kwargs
)
1442 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1444 except ReExtractInfo
as e
:
1446 self
.to_screen(f
'{e}; Re-extracting data')
1448 self
.to_stderr('\r')
1449 self
.report_warning(f
'{e}; Re-extracting data')
1451 except GeoRestrictedError
as e
:
1454 msg
+= '\nThis video is available in %s.' % ', '.join(
1455 map(ISO3166Utils
.short2full
, e
.countries
))
1456 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1457 self
.report_error(msg
)
1458 except ExtractorError
as e
: # An error we somewhat expected
1459 self
.report_error(str(e
), e
.format_traceback())
1460 except Exception as e
:
1461 if self
.params
.get('ignoreerrors'):
1462 self
.report_error(str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1468 def _wait_for_video(self
, ie_result
={}):
1469 if (not self
.params
.get('wait_for_video')
1470 or ie_result
.get('_type', 'video') != 'video'
1471 or ie_result
.get('formats') or ie_result
.get('url')):
1474 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1479 full_msg
= f
'{msg}\n'
1480 if not self
.params
.get('noprogress'):
1481 full_msg
= msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r'
1484 self
.to_screen(full_msg
, skip_eol
=True)
1487 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1488 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1489 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1490 diff
= round(random
.uniform(min_wait
, max_wait
) if (max_wait
and min_wait
) else (max_wait
or min_wait
), 0)
1491 self
.report_warning('Release time of video is not known')
1492 elif ie_result
and (diff
or 0) <= 0:
1493 self
.report_warning('Video should already be available according to extracted info')
1494 diff
= min(max(diff
or 0, min_wait
or 0), max_wait
or float('inf'))
1495 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1497 wait_till
= time
.time() + diff
1500 diff
= wait_till
- time
.time()
1503 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1504 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1506 except KeyboardInterrupt:
1508 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1509 except BaseException
as e
:
1510 if not isinstance(e
, ReExtractInfo
):
1514 @_handle_extraction_exceptions
1515 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1517 ie_result
= ie
.extract(url
)
1518 except UserNotLive
as e
:
1520 if self
.params
.get('wait_for_video'):
1521 self
.report_warning(e
)
1522 self
._wait
_for
_video
()
1524 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1525 self
.report_warning(f
'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
1527 if isinstance(ie_result
, list):
1528 # Backwards compatibility: old IE result format
1530 '_type': 'compat_list',
1531 'entries': ie_result
,
1533 if extra_info
.get('original_url'):
1534 ie_result
.setdefault('original_url', extra_info
['original_url'])
1535 self
.add_default_extra_info(ie_result
, ie
, url
)
1537 self
._wait
_for
_video
(ie_result
)
1538 return self
.process_ie_result(ie_result
, download
, extra_info
)
1542 def add_default_extra_info(self
, ie_result
, ie
, url
):
1544 self
.add_extra_info(ie_result
, {
1546 'original_url': url
,
1548 webpage_url
= ie_result
.get('webpage_url')
1550 self
.add_extra_info(ie_result
, {
1551 'webpage_url_basename': url_basename(webpage_url
),
1552 'webpage_url_domain': get_domain(webpage_url
),
1555 self
.add_extra_info(ie_result
, {
1556 'extractor': ie
.IE_NAME
,
1557 'extractor_key': ie
.ie_key(),
1560 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1562 Take the result of the ie(may be modified) and resolve all unresolved
1563 references (URLs, playlist items).
1565 It will also download the videos if 'download'.
1566 Returns the resolved ie_result.
1568 if extra_info
is None:
1570 result_type
= ie_result
.get('_type', 'video')
1572 if result_type
in ('url', 'url_transparent'):
1573 ie_result
['url'] = sanitize_url(
1574 ie_result
['url'], scheme
='http' if self
.params
.get('prefer_insecure') else 'https')
1575 if ie_result
.get('original_url'):
1576 extra_info
.setdefault('original_url', ie_result
['original_url'])
1578 extract_flat
= self
.params
.get('extract_flat', False)
1579 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1580 or extract_flat
is True):
1581 info_copy
= ie_result
.copy()
1582 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1583 if ie
and not ie_result
.get('id'):
1584 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1585 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1586 self
.add_extra_info(info_copy
, extra_info
)
1587 info_copy
, _
= self
.pre_process(info_copy
)
1588 self
.__forced
_printings
(info_copy
, self
.prepare_filename(info_copy
), incomplete
=True)
1589 self
._raise
_pending
_errors
(info_copy
)
1590 if self
.params
.get('force_write_download_archive', False):
1591 self
.record_download_archive(info_copy
)
1594 if result_type
== 'video':
1595 self
.add_extra_info(ie_result
, extra_info
)
1596 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1597 self
._raise
_pending
_errors
(ie_result
)
1598 additional_urls
= (ie_result
or {}).get('additional_urls')
1600 # TODO: Improve MetadataParserPP to allow setting a list
1601 if isinstance(additional_urls
, str):
1602 additional_urls
= [additional_urls
]
1604 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1605 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1606 ie_result
['additional_entries'] = [
1608 url
, download
, extra_info
=extra_info
,
1609 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1610 for url
in additional_urls
1613 elif result_type
== 'url':
1614 # We have to add extra_info to the results because it may be
1615 # contained in a playlist
1616 return self
.extract_info(
1617 ie_result
['url'], download
,
1618 ie_key
=ie_result
.get('ie_key'),
1619 extra_info
=extra_info
)
1620 elif result_type
== 'url_transparent':
1621 # Use the information from the embedding page
1622 info
= self
.extract_info(
1623 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1624 extra_info
=extra_info
, download
=False, process
=False)
1626 # extract_info may return None when ignoreerrors is enabled and
1627 # extraction failed with an error, don't crash and return early
1632 exempted_fields
= {'_type', 'url', 'ie_key'}
1633 if not ie_result
.get('section_end') and ie_result
.get('section_start') is None:
1634 # For video clips, the id etc of the clip extractor should be used
1635 exempted_fields |
= {'id', 'extractor', 'extractor_key'}
1637 new_result
= info
.copy()
1638 new_result
.update(filter_dict(ie_result
, lambda k
, v
: v
is not None and k
not in exempted_fields
))
1640 # Extracted info may not be a video result (i.e.
1641 # info.get('_type', 'video') != video) but rather an url or
1642 # url_transparent. In such cases outer metadata (from ie_result)
1643 # should be propagated to inner one (info). For this to happen
1644 # _type of info should be overridden with url_transparent. This
1645 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1646 if new_result
.get('_type') == 'url':
1647 new_result
['_type'] = 'url_transparent'
1649 return self
.process_ie_result(
1650 new_result
, download
=download
, extra_info
=extra_info
)
1651 elif result_type
in ('playlist', 'multi_video'):
1652 # Protect from infinite recursion due to recursively nested playlists
1653 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1654 webpage_url
= ie_result
['webpage_url']
1655 if webpage_url
in self
._playlist
_urls
:
1657 '[download] Skipping already downloaded playlist: %s'
1658 % ie_result
.get('title') or ie_result
.get('id'))
1661 self
._playlist
_level
+= 1
1662 self
._playlist
_urls
.add(webpage_url
)
1663 self
._fill
_common
_fields
(ie_result
, False)
1664 self
._sanitize
_thumbnails
(ie_result
)
1666 return self
.__process
_playlist
(ie_result
, download
)
1668 self
._playlist
_level
-= 1
1669 if not self
._playlist
_level
:
1670 self
._playlist
_urls
.clear()
1671 elif result_type
== 'compat_list':
1672 self
.report_warning(
1673 'Extractor %s returned a compat_list result. '
1674 'It needs to be updated.' % ie_result
.get('extractor'))
1677 self
.add_extra_info(r
, {
1678 'extractor': ie_result
['extractor'],
1679 'webpage_url': ie_result
['webpage_url'],
1680 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1681 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1682 'extractor_key': ie_result
['extractor_key'],
1685 ie_result
['entries'] = [
1686 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1687 for r
in ie_result
['entries']
1691 raise Exception('Invalid result type: %s' % result_type
)
1693 def _ensure_dir_exists(self
, path
):
1694 return make_dir(path
, self
.report_error
)
1697 def _playlist_infodict(ie_result
, strict
=False, **kwargs
):
1699 'playlist_count': ie_result
.get('playlist_count'),
1700 'playlist': ie_result
.get('title') or ie_result
.get('id'),
1701 'playlist_id': ie_result
.get('id'),
1702 'playlist_title': ie_result
.get('title'),
1703 'playlist_uploader': ie_result
.get('uploader'),
1704 'playlist_uploader_id': ie_result
.get('uploader_id'),
1711 'playlist_index': 0,
1712 '__last_playlist_index': max(ie_result
['requested_entries'] or (0, 0)),
1713 'extractor': ie_result
['extractor'],
1714 'webpage_url': ie_result
['webpage_url'],
1715 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1716 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1717 'extractor_key': ie_result
['extractor_key'],
1720 def __process_playlist(self
, ie_result
, download
):
1721 """Process each entry in the playlist"""
1722 assert ie_result
['_type'] in ('playlist', 'multi_video')
1724 common_info
= self
._playlist
_infodict
(ie_result
, strict
=True)
1725 title
= common_info
.get('playlist') or '<Untitled>'
1726 if self
._match
_entry
(common_info
, incomplete
=True) is not None:
1728 self
.to_screen(f
'[download] Downloading {ie_result["_type"]}: {title}')
1730 all_entries
= PlaylistEntries(self
, ie_result
)
1731 entries
= orderedSet(all_entries
.get_requested_items(), lazy
=True)
1733 lazy
= self
.params
.get('lazy_playlist')
1735 resolved_entries
, n_entries
= [], 'N/A'
1736 ie_result
['requested_entries'], ie_result
['entries'] = None, None
1738 entries
= resolved_entries
= list(entries
)
1739 n_entries
= len(resolved_entries
)
1740 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1741 if not ie_result
.get('playlist_count'):
1742 # Better to do this after potentially exhausting entries
1743 ie_result
['playlist_count'] = all_entries
.get_full_count()
1745 extra
= self
._playlist
_infodict
(ie_result
, n_entries
=int_or_none(n_entries
))
1746 ie_copy
= collections
.ChainMap(ie_result
, extra
)
1748 _infojson_written
= False
1749 write_playlist_files
= self
.params
.get('allow_playlist_files', True)
1750 if write_playlist_files
and self
.params
.get('list_thumbnails'):
1751 self
.list_thumbnails(ie_result
)
1752 if write_playlist_files
and not self
.params
.get('simulate'):
1753 _infojson_written
= self
._write
_info
_json
(
1754 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1755 if _infojson_written
is None:
1757 if self
._write
_description
('playlist', ie_result
,
1758 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1760 # TODO: This should be passed to ThumbnailsConvertor if necessary
1761 self
._write
_thumbnails
('playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1764 if self
.params
.get('playlistreverse') or self
.params
.get('playlistrandom'):
1765 self
.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once
=True)
1766 elif self
.params
.get('playlistreverse'):
1768 elif self
.params
.get('playlistrandom'):
1769 random
.shuffle(entries
)
1771 self
.to_screen(f
'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1772 f
'{format_field(ie_result, "playlist_count", " of %s")}')
1774 keep_resolved_entries
= self
.params
.get('extract_flat') != 'discard'
1775 if self
.params
.get('extract_flat') == 'discard_in_playlist':
1776 keep_resolved_entries
= ie_result
['_type'] != 'playlist'
1777 if keep_resolved_entries
:
1778 self
.write_debug('The information of all playlist entries will be held in memory')
1781 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
1782 for i
, (playlist_index
, entry
) in enumerate(entries
):
1784 resolved_entries
.append((playlist_index
, entry
))
1788 entry
['__x_forwarded_for_ip'] = ie_result
.get('__x_forwarded_for_ip')
1789 if not lazy
and 'playlist-index' in self
.params
.get('compat_opts', []):
1790 playlist_index
= ie_result
['requested_entries'][i
]
1792 entry_copy
= collections
.ChainMap(entry
, {
1794 'n_entries': int_or_none(n_entries
),
1795 'playlist_index': playlist_index
,
1796 'playlist_autonumber': i
+ 1,
1799 if self
._match
_entry
(entry_copy
, incomplete
=True) is not None:
1802 self
.to_screen('[download] Downloading video %s of %s' % (
1803 self
._format
_screen
(i
+ 1, self
.Styles
.ID
), self
._format
_screen
(n_entries
, self
.Styles
.EMPHASIS
)))
1806 'playlist_index': playlist_index
,
1807 'playlist_autonumber': i
+ 1,
1809 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, extra
)
1810 if not entry_result
:
1812 if failures
>= max_failures
:
1814 f
'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1816 if keep_resolved_entries
:
1817 resolved_entries
[i
] = (playlist_index
, entry_result
)
1819 # Update with processed data
1820 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1822 # Write the updated info to json
1823 if _infojson_written
is True and self
._write
_info
_json
(
1824 'updated playlist', ie_result
,
1825 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
1828 ie_result
= self
.run_all_pps('playlist', ie_result
)
1829 self
.to_screen(f
'[download] Finished downloading playlist: {title}')
1832 @_handle_extraction_exceptions
1833 def __process_iterable_entry(self
, entry
, download
, extra_info
):
1834 return self
.process_ie_result(
1835 entry
, download
=download
, extra_info
=extra_info
)
1837 def _build_format_filter(self
, filter_spec
):
1838 " Returns a function to filter the formats according to the filter_spec "
1848 operator_rex
= re
.compile(r
'''(?x)\s*
1849 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1850 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1851 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1852 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
1853 m
= operator_rex
.fullmatch(filter_spec
)
1856 comparison_value
= int(m
.group('value'))
1858 comparison_value
= parse_filesize(m
.group('value'))
1859 if comparison_value
is None:
1860 comparison_value
= parse_filesize(m
.group('value') + 'B')
1861 if comparison_value
is None:
1863 'Invalid value %r in format specification %r' % (
1864 m
.group('value'), filter_spec
))
1865 op
= OPERATORS
[m
.group('op')]
1870 '^=': lambda attr
, value
: attr
.startswith(value
),
1871 '$=': lambda attr
, value
: attr
.endswith(value
),
1872 '*=': lambda attr
, value
: value
in attr
,
1873 '~=': lambda attr
, value
: value
.search(attr
) is not None
1875 str_operator_rex
= re
.compile(r
'''(?x)\s*
1876 (?P<key>[a-zA-Z0-9._-]+)\s*
1877 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1879 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1880 (?(quote)(?P=quote))\s*
1881 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
1882 m
= str_operator_rex
.fullmatch(filter_spec
)
1884 if m
.group('op') == '~=':
1885 comparison_value
= re
.compile(m
.group('value'))
1887 comparison_value
= re
.sub(r
'''\\([\\"'])''', r
'\1', m
.group('value'))
1888 str_op
= STR_OPERATORS
[m
.group('op')]
1889 if m
.group('negation'):
1890 op
= lambda attr
, value
: not str_op(attr
, value
)
1895 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
1898 actual_value
= f
.get(m
.group('key'))
1899 if actual_value
is None:
1900 return m
.group('none_inclusive')
1901 return op(actual_value
, comparison_value
)
1904 def _check_formats(self
, formats
):
1906 self
.to_screen('[info] Testing format %s' % f
['format_id'])
1907 path
= self
.get_output_path('temp')
1908 if not self
._ensure
_dir
_exists
(f
'{path}/'):
1910 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
1913 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
1914 except (DownloadError
, OSError, ValueError) + network_exceptions
:
1917 if os
.path
.exists(temp_file
.name
):
1919 os
.remove(temp_file
.name
)
1921 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
1925 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
1927 def _default_format_spec(self
, info_dict
, download
=True):
1930 merger
= FFmpegMergerPP(self
)
1931 return merger
.available
and merger
.can_merge()
1934 not self
.params
.get('simulate')
1938 or info_dict
.get('is_live') and not self
.params
.get('live_from_start')
1939 or self
.params
['outtmpl']['default'] == '-'))
1942 or self
.params
.get('allow_multiple_audio_streams', False)
1943 or 'format-spec' in self
.params
['compat_opts'])
1946 'best/bestvideo+bestaudio' if prefer_best
1947 else 'bestvideo*+bestaudio/best' if not compat
1948 else 'bestvideo+bestaudio/best')
1950 def build_format_selector(self
, format_spec
):
1951 def syntax_error(note
, start
):
1953 'Invalid format specification: '
1954 '{}\n\t{}\n\t{}^'.format(note
, format_spec
, ' ' * start
[1]))
1955 return SyntaxError(message
)
1957 PICKFIRST
= 'PICKFIRST'
1961 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1963 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
1964 'video': self
.params
.get('allow_multiple_video_streams', False)}
1966 check_formats
= self
.params
.get('check_formats') == 'selected'
1968 def _parse_filter(tokens
):
1970 for type, string
, start
, _
, _
in tokens
:
1971 if type == tokenize
.OP
and string
== ']':
1972 return ''.join(filter_parts
)
1974 filter_parts
.append(string
)
1976 def _remove_unused_ops(tokens
):
1977 # Remove operators that we don't use and join them with the surrounding strings
1978 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1979 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
1980 last_string
, last_start
, last_end
, last_line
= None, None, None, None
1981 for type, string
, start
, end
, line
in tokens
:
1982 if type == tokenize
.OP
and string
== '[':
1984 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1986 yield type, string
, start
, end
, line
1987 # everything inside brackets will be handled by _parse_filter
1988 for type, string
, start
, end
, line
in tokens
:
1989 yield type, string
, start
, end
, line
1990 if type == tokenize
.OP
and string
== ']':
1992 elif type == tokenize
.OP
and string
in ALLOWED_OPS
:
1994 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1996 yield type, string
, start
, end
, line
1997 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
1999 last_string
= string
2003 last_string
+= string
2005 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
2007 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
2009 current_selector
= None
2010 for type, string
, start
, _
, _
in tokens
:
2011 # ENCODING is only defined in python 3.x
2012 if type == getattr(tokenize
, 'ENCODING', None):
2014 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
2015 current_selector
= FormatSelector(SINGLE
, string
, [])
2016 elif type == tokenize
.OP
:
2018 if not inside_group
:
2019 # ')' will be handled by the parentheses group
2020 tokens
.restore_last_token()
2022 elif inside_merge
and string
in ['/', ',']:
2023 tokens
.restore_last_token()
2025 elif inside_choice
and string
== ',':
2026 tokens
.restore_last_token()
2029 if not current_selector
:
2030 raise syntax_error('"," must follow a format selector', start
)
2031 selectors
.append(current_selector
)
2032 current_selector
= None
2034 if not current_selector
:
2035 raise syntax_error('"/" must follow a format selector', start
)
2036 first_choice
= current_selector
2037 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
2038 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
2040 if not current_selector
:
2041 current_selector
= FormatSelector(SINGLE
, 'best', [])
2042 format_filter
= _parse_filter(tokens
)
2043 current_selector
.filters
.append(format_filter
)
2045 if current_selector
:
2046 raise syntax_error('Unexpected "("', start
)
2047 group
= _parse_format_selection(tokens
, inside_group
=True)
2048 current_selector
= FormatSelector(GROUP
, group
, [])
2050 if not current_selector
:
2051 raise syntax_error('Unexpected "+"', start
)
2052 selector_1
= current_selector
2053 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
2055 raise syntax_error('Expected a selector', start
)
2056 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
2058 raise syntax_error(f
'Operator not recognized: "{string}"', start
)
2059 elif type == tokenize
.ENDMARKER
:
2061 if current_selector
:
2062 selectors
.append(current_selector
)
2065 def _merge(formats_pair
):
2066 format_1
, format_2
= formats_pair
2069 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
2070 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
2072 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
2073 get_no_more
= {'video': False, 'audio': False}
2074 for (i
, fmt_info
) in enumerate(formats_info
):
2075 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
2078 for aud_vid
in ['audio', 'video']:
2079 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
2080 if get_no_more
[aud_vid
]:
2083 get_no_more
[aud_vid
] = True
2085 if len(formats_info
) == 1:
2086 return formats_info
[0]
2088 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
2089 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
2091 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
2092 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
2094 output_ext
= get_compatible_ext(
2095 vcodecs
=[f
.get('vcodec') for f
in video_fmts
],
2096 acodecs
=[f
.get('acodec') for f
in audio_fmts
],
2097 vexts
=[f
['ext'] for f
in video_fmts
],
2098 aexts
=[f
['ext'] for f
in audio_fmts
],
2099 preferences
=(try_call(lambda: self
.params
['merge_output_format'].split('/'))
2100 or self
.params
.get('prefer_free_formats') and ('webm', 'mkv')))
2102 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2105 'requested_formats': formats_info
,
2106 'format': '+'.join(filtered('format')),
2107 'format_id': '+'.join(filtered('format_id')),
2109 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2110 'language': '+'.join(orderedSet(filtered('language'))) or None,
2111 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2112 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2113 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2118 'width': the_only_video
.get('width'),
2119 'height': the_only_video
.get('height'),
2120 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2121 'fps': the_only_video
.get('fps'),
2122 'dynamic_range': the_only_video
.get('dynamic_range'),
2123 'vcodec': the_only_video
.get('vcodec'),
2124 'vbr': the_only_video
.get('vbr'),
2125 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2130 'acodec': the_only_audio
.get('acodec'),
2131 'abr': the_only_audio
.get('abr'),
2132 'asr': the_only_audio
.get('asr'),
2133 'audio_channels': the_only_audio
.get('audio_channels')
2138 def _check_formats(formats
):
2139 if not check_formats
:
2142 yield from self
._check
_formats
(formats
)
2144 def _build_selector_function(selector
):
2145 if isinstance(selector
, list): # ,
2146 fs
= [_build_selector_function(s
) for s
in selector
]
2148 def selector_function(ctx
):
2151 return selector_function
2153 elif selector
.type == GROUP
: # ()
2154 selector_function
= _build_selector_function(selector
.selector
)
2156 elif selector
.type == PICKFIRST
: # /
2157 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2159 def selector_function(ctx
):
2161 picked_formats
= list(f(ctx
))
2163 return picked_formats
2166 elif selector
.type == MERGE
: # +
2167 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2169 def selector_function(ctx
):
2170 for pair
in itertools
.product(selector_1(ctx
), selector_2(ctx
)):
2173 elif selector
.type == SINGLE
: # atom
2174 format_spec
= selector
.selector
or 'best'
2176 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2177 if format_spec
== 'all':
2178 def selector_function(ctx
):
2179 yield from _check_formats(ctx
['formats'][::-1])
2180 elif format_spec
== 'mergeall':
2181 def selector_function(ctx
):
2182 formats
= list(_check_formats(
2183 f
for f
in ctx
['formats'] if f
.get('vcodec') != 'none' or f
.get('acodec') != 'none'))
2186 merged_format
= formats
[-1]
2187 for f
in formats
[-2::-1]:
2188 merged_format
= _merge((merged_format
, f
))
2192 format_fallback
, seperate_fallback
, format_reverse
, format_idx
= False, None, True, 1
2194 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2196 if mobj
is not None:
2197 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2198 format_reverse
= mobj
.group('bw')[0] == 'b'
2199 format_type
= (mobj
.group('type') or [None])[0]
2200 not_format_type
= {'v': 'a', 'a': 'v'}
.get(format_type
)
2201 format_modified
= mobj
.group('mod') is not None
2203 format_fallback
= not format_type
and not format_modified
# for b, w
2205 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2206 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2207 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2208 if format_type
# bv, ba, wv, wa
2209 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2210 if not format_modified
# b, w
2211 else lambda f
: True) # b*, w*
2212 filter_f
= lambda f
: _filter_f(f
) and (
2213 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2215 if format_spec
in self
._format
_selection
_exts
['audio']:
2216 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2217 elif format_spec
in self
._format
_selection
_exts
['video']:
2218 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2219 seperate_fallback
= lambda f
: f
.get('ext') == format_spec
and f
.get('vcodec') != 'none'
2220 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2221 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2223 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2225 def selector_function(ctx
):
2226 formats
= list(ctx
['formats'])
2227 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2229 if format_fallback
and ctx
['incomplete_formats']:
2230 # for extractors with incomplete formats (audio only (soundcloud)
2231 # or video only (imgur)) best/worst will fallback to
2232 # best/worst {video,audio}-only format
2234 elif seperate_fallback
and not ctx
['has_merged_format']:
2235 # for compatibility with youtube-dl when there is no pre-merged format
2236 matches
= list(filter(seperate_fallback
, formats
))
2237 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2239 yield matches
[format_idx
- 1]
2240 except LazyList
.IndexError:
2243 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2245 def final_selector(ctx
):
2246 ctx_copy
= dict(ctx
)
2247 for _filter
in filters
:
2248 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2249 return selector_function(ctx_copy
)
2250 return final_selector
2252 stream
= io
.BytesIO(format_spec
.encode())
2254 tokens
= list(_remove_unused_ops(tokenize
.tokenize(stream
.readline
)))
2255 except tokenize
.TokenError
:
2256 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2258 class TokenIterator
:
2259 def __init__(self
, tokens
):
2260 self
.tokens
= tokens
2267 if self
.counter
>= len(self
.tokens
):
2268 raise StopIteration()
2269 value
= self
.tokens
[self
.counter
]
2275 def restore_last_token(self
):
2278 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2279 return _build_selector_function(parsed_selector
)
2281 def _calc_headers(self
, info_dict
):
2282 res
= merge_headers(self
.params
['http_headers'], info_dict
.get('http_headers') or {})
2284 cookies
= self
._calc
_cookies
(info_dict
['url'])
2286 res
['Cookie'] = cookies
2288 if 'X-Forwarded-For' not in res
:
2289 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2290 if x_forwarded_for_ip
:
2291 res
['X-Forwarded-For'] = x_forwarded_for_ip
2295 def _calc_cookies(self
, url
):
2296 pr
= sanitized_Request(url
)
2297 self
.cookiejar
.add_cookie_header(pr
)
2298 return pr
.get_header('Cookie')
2300 def _sort_thumbnails(self
, thumbnails
):
2301 thumbnails
.sort(key
=lambda t
: (
2302 t
.get('preference') if t
.get('preference') is not None else -1,
2303 t
.get('width') if t
.get('width') is not None else -1,
2304 t
.get('height') if t
.get('height') is not None else -1,
2305 t
.get('id') if t
.get('id') is not None else '',
2308 def _sanitize_thumbnails(self
, info_dict
):
2309 thumbnails
= info_dict
.get('thumbnails')
2310 if thumbnails
is None:
2311 thumbnail
= info_dict
.get('thumbnail')
2313 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail}
]
2317 def check_thumbnails(thumbnails
):
2318 for t
in thumbnails
:
2319 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2321 self
.urlopen(HEADRequest(t
['url']))
2322 except network_exceptions
as err
:
2323 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2327 self
._sort
_thumbnails
(thumbnails
)
2328 for i
, t
in enumerate(thumbnails
):
2329 if t
.get('id') is None:
2331 if t
.get('width') and t
.get('height'):
2332 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2333 t
['url'] = sanitize_url(t
['url'])
2335 if self
.params
.get('check_formats') is True:
2336 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2338 info_dict
['thumbnails'] = thumbnails
2340 def _fill_common_fields(self
, info_dict
, is_video
=True):
2341 # TODO: move sanitization here
2343 # playlists are allowed to lack "title"
2344 title
= info_dict
.get('title', NO_DEFAULT
)
2345 if title
is NO_DEFAULT
:
2346 raise ExtractorError('Missing "title" field in extractor result',
2347 video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2348 info_dict
['fulltitle'] = title
2351 self
.write_debug('Extractor gave empty title. Creating a generic title')
2353 self
.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2354 info_dict
['title'] = f
'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2356 if info_dict
.get('duration') is not None:
2357 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2359 for ts_key
, date_key
in (
2360 ('timestamp', 'upload_date'),
2361 ('release_timestamp', 'release_date'),
2362 ('modified_timestamp', 'modified_date'),
2364 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2365 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2366 # see http://bugs.python.org/issue1646728)
2367 with contextlib
.suppress(ValueError, OverflowError, OSError):
2368 upload_date
= datetime
.datetime
.utcfromtimestamp(info_dict
[ts_key
])
2369 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2371 live_keys
= ('is_live', 'was_live')
2372 live_status
= info_dict
.get('live_status')
2373 if live_status
is None:
2374 for key
in live_keys
:
2375 if info_dict
.get(key
) is False:
2377 if info_dict
.get(key
):
2380 if all(info_dict
.get(key
) is False for key
in live_keys
):
2381 live_status
= 'not_live'
2383 info_dict
['live_status'] = live_status
2384 for key
in live_keys
:
2385 if info_dict
.get(key
) is None:
2386 info_dict
[key
] = (live_status
== key
)
2388 # Auto generate title fields corresponding to the *_number fields when missing
2389 # in order to always have clean titles. This is very common for TV series.
2390 for field
in ('chapter', 'season', 'episode'):
2391 if info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2392 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2394 def _raise_pending_errors(self
, info
):
2395 err
= info
.pop('__pending_error', None)
2397 self
.report_error(err
, tb
=False)
2399 def process_video_result(self
, info_dict
, download
=True):
2400 assert info_dict
.get('_type', 'video') == 'video'
2401 self
._num
_videos
+= 1
2403 if 'id' not in info_dict
:
2404 raise ExtractorError('Missing "id" field in extractor result', ie
=info_dict
['extractor'])
2405 elif not info_dict
.get('id'):
2406 raise ExtractorError('Extractor failed to obtain "id"', ie
=info_dict
['extractor'])
2408 def report_force_conversion(field
, field_not
, conversion
):
2409 self
.report_warning(
2410 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2411 % (field
, field_not
, conversion
))
2413 def sanitize_string_field(info
, string_field
):
2414 field
= info
.get(string_field
)
2415 if field
is None or isinstance(field
, str):
2417 report_force_conversion(string_field
, 'a string', 'string')
2418 info
[string_field
] = str(field
)
2420 def sanitize_numeric_fields(info
):
2421 for numeric_field
in self
._NUMERIC
_FIELDS
:
2422 field
= info
.get(numeric_field
)
2423 if field
is None or isinstance(field
, (int, float)):
2425 report_force_conversion(numeric_field
, 'numeric', 'int')
2426 info
[numeric_field
] = int_or_none(field
)
2428 sanitize_string_field(info_dict
, 'id')
2429 sanitize_numeric_fields(info_dict
)
2430 if info_dict
.get('section_end') and info_dict
.get('section_start') is not None:
2431 info_dict
['duration'] = round(info_dict
['section_end'] - info_dict
['section_start'], 3)
2432 if (info_dict
.get('duration') or 0) <= 0 and info_dict
.pop('duration', None):
2433 self
.report_warning('"duration" field is negative, there is an error in extractor')
2435 chapters
= info_dict
.get('chapters') or []
2436 if chapters
and chapters
[0].get('start_time'):
2437 chapters
.insert(0, {'start_time': 0}
)
2439 dummy_chapter
= {'end_time': 0, 'start_time': info_dict.get('duration')}
2440 for idx
, (prev
, current
, next_
) in enumerate(zip(
2441 (dummy_chapter
, *chapters
), chapters
, (*chapters
[1:], dummy_chapter
)), 1):
2442 if current
.get('start_time') is None:
2443 current
['start_time'] = prev
.get('end_time')
2444 if not current
.get('end_time'):
2445 current
['end_time'] = next_
.get('start_time')
2446 if not current
.get('title'):
2447 current
['title'] = f
'<Untitled Chapter {idx}>'
2449 if 'playlist' not in info_dict
:
2450 # It isn't part of a playlist
2451 info_dict
['playlist'] = None
2452 info_dict
['playlist_index'] = None
2454 self
._sanitize
_thumbnails
(info_dict
)
2456 thumbnail
= info_dict
.get('thumbnail')
2457 thumbnails
= info_dict
.get('thumbnails')
2459 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2461 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2463 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2464 info_dict
['display_id'] = info_dict
['id']
2466 self
._fill
_common
_fields
(info_dict
)
2468 for cc_kind
in ('subtitles', 'automatic_captions'):
2469 cc
= info_dict
.get(cc_kind
)
2471 for _
, subtitle
in cc
.items():
2472 for subtitle_format
in subtitle
:
2473 if subtitle_format
.get('url'):
2474 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2475 if subtitle_format
.get('ext') is None:
2476 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2478 automatic_captions
= info_dict
.get('automatic_captions')
2479 subtitles
= info_dict
.get('subtitles')
2481 info_dict
['requested_subtitles'] = self
.process_subtitles(
2482 info_dict
['id'], subtitles
, automatic_captions
)
2484 if info_dict
.get('formats') is None:
2485 # There's only one format available
2486 formats
= [info_dict
]
2488 formats
= info_dict
['formats']
2490 # or None ensures --clean-infojson removes it
2491 info_dict
['_has_drm'] = any(f
.get('has_drm') for f
in formats
) or None
2492 if not self
.params
.get('allow_unplayable_formats'):
2493 formats
= [f
for f
in formats
if not f
.get('has_drm')]
2494 if info_dict
['_has_drm'] and formats
and all(
2495 f
.get('acodec') == f
.get('vcodec') == 'none' for f
in formats
):
2496 self
.report_warning(
2497 'This video is DRM protected and only images are available for download. '
2498 'Use --list-formats to see them')
2500 get_from_start
= not info_dict
.get('is_live') or bool(self
.params
.get('live_from_start'))
2501 if not get_from_start
:
2502 info_dict
['title'] += ' ' + datetime
.datetime
.now().strftime('%Y-%m-%d %H:%M')
2503 if info_dict
.get('is_live') and formats
:
2504 formats
= [f
for f
in formats
if bool(f
.get('is_from_start')) == get_from_start
]
2505 if get_from_start
and not formats
:
2506 self
.raise_no_formats(info_dict
, msg
=(
2507 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2508 'If you want to download from the current time, use --no-live-from-start'))
2511 self
.raise_no_formats(info_dict
)
2513 def is_wellformed(f
):
2516 self
.report_warning(
2517 '"url" field is missing or empty - skipping format, '
2518 'there is an error in extractor')
2520 if isinstance(url
, bytes):
2521 sanitize_string_field(f
, 'url')
2524 # Filter out malformed formats for better extraction robustness
2525 formats
= list(filter(is_wellformed
, formats
))
2529 # We check that all the formats have the format and format_id fields
2530 for i
, format
in enumerate(formats
):
2531 sanitize_string_field(format
, 'format_id')
2532 sanitize_numeric_fields(format
)
2533 format
['url'] = sanitize_url(format
['url'])
2534 if not format
.get('format_id'):
2535 format
['format_id'] = str(i
)
2537 # Sanitize format_id from characters used in format selector expression
2538 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2539 format_id
= format
['format_id']
2540 if format_id
not in formats_dict
:
2541 formats_dict
[format_id
] = []
2542 formats_dict
[format_id
].append(format
)
2544 # Make sure all formats have unique format_id
2545 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2546 for format_id
, ambiguous_formats
in formats_dict
.items():
2547 ambigious_id
= len(ambiguous_formats
) > 1
2548 for i
, format
in enumerate(ambiguous_formats
):
2550 format
['format_id'] = '%s-%d' % (format_id
, i
)
2551 if format
.get('ext') is None:
2552 format
['ext'] = determine_ext(format
['url']).lower()
2553 # Ensure there is no conflict between id and ext in format selection
2554 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2555 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2556 format
['format_id'] = 'f%s' % format
['format_id']
2558 for i
, format
in enumerate(formats
):
2559 if format
.get('format') is None:
2560 format
['format'] = '{id} - {res}{note}'.format(
2561 id=format
['format_id'],
2562 res
=self
.format_resolution(format
),
2563 note
=format_field(format
, 'format_note', ' (%s)'),
2565 if format
.get('protocol') is None:
2566 format
['protocol'] = determine_protocol(format
)
2567 if format
.get('resolution') is None:
2568 format
['resolution'] = self
.format_resolution(format
, default
=None)
2569 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2570 format
['dynamic_range'] = 'SDR'
2571 if (info_dict
.get('duration') and format
.get('tbr')
2572 and not format
.get('filesize') and not format
.get('filesize_approx')):
2573 format
['filesize_approx'] = int(info_dict
['duration'] * format
['tbr'] * (1024 / 8))
2575 # Add HTTP headers, so that external programs can use them from the
2577 full_format_info
= info_dict
.copy()
2578 full_format_info
.update(format
)
2579 format
['http_headers'] = self
._calc
_headers
(full_format_info
)
2580 # Remove private housekeeping stuff
2581 if '__x_forwarded_for_ip' in info_dict
:
2582 del info_dict
['__x_forwarded_for_ip']
2584 if self
.params
.get('check_formats') is True:
2585 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2587 if not formats
or formats
[0] is not info_dict
:
2588 # only set the 'formats' fields if the original info_dict list them
2589 # otherwise we end up with a circular reference, the first (and unique)
2590 # element in the 'formats' field in info_dict is info_dict itself,
2591 # which can't be exported to json
2592 info_dict
['formats'] = formats
2594 info_dict
, _
= self
.pre_process(info_dict
)
2596 if self
._match
_entry
(info_dict
, incomplete
=self
._format
_fields
) is not None:
2599 self
.post_extract(info_dict
)
2600 info_dict
, _
= self
.pre_process(info_dict
, 'after_filter')
2602 # The pre-processors may have modified the formats
2603 formats
= info_dict
.get('formats', [info_dict
])
2605 list_only
= self
.params
.get('simulate') is None and (
2606 self
.params
.get('list_thumbnails') or self
.params
.get('listformats') or self
.params
.get('listsubtitles'))
2607 interactive_format_selection
= not list_only
and self
.format_selector
== '-'
2608 if self
.params
.get('list_thumbnails'):
2609 self
.list_thumbnails(info_dict
)
2610 if self
.params
.get('listsubtitles'):
2611 if 'automatic_captions' in info_dict
:
2612 self
.list_subtitles(
2613 info_dict
['id'], automatic_captions
, 'automatic captions')
2614 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2615 if self
.params
.get('listformats') or interactive_format_selection
:
2616 self
.list_formats(info_dict
)
2618 # Without this printing, -F --print-json will not work
2619 self
.__forced
_printings
(info_dict
, self
.prepare_filename(info_dict
), incomplete
=True)
2622 format_selector
= self
.format_selector
2623 if format_selector
is None:
2624 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2625 self
.write_debug('Default format spec: %s' % req_format
)
2626 format_selector
= self
.build_format_selector(req_format
)
2629 if interactive_format_selection
:
2631 self
._format
_screen
('\nEnter format selector: ', self
.Styles
.EMPHASIS
))
2633 format_selector
= self
.build_format_selector(req_format
)
2634 except SyntaxError as err
:
2635 self
.report_error(err
, tb
=False, is_error
=False)
2638 formats_to_download
= list(format_selector({
2640 'has_merged_format': any('none' not in (f
.get('acodec'), f
.get('vcodec')) for f
in formats
),
2641 'incomplete_formats': (
2642 # All formats are video-only or
2643 all(f
.get('vcodec') != 'none' and f
.get('acodec') == 'none' for f
in formats
)
2644 # all formats are audio-only
2645 or all(f
.get('vcodec') == 'none' and f
.get('acodec') != 'none' for f
in formats
)),
2647 if interactive_format_selection
and not formats_to_download
:
2648 self
.report_error('Requested format is not available', tb
=False, is_error
=False)
2652 if not formats_to_download
:
2653 if not self
.params
.get('ignore_no_formats_error'):
2654 raise ExtractorError(
2655 'Requested format is not available. Use --list-formats for a list of available formats',
2656 expected
=True, video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2657 self
.report_warning('Requested format is not available')
2658 # Process what we can, even without any available formats.
2659 formats_to_download
= [{}]
2661 requested_ranges
= self
.params
.get('download_ranges')
2662 if requested_ranges
:
2663 requested_ranges
= tuple(requested_ranges(info_dict
, self
))
2665 best_format
, downloaded_formats
= formats_to_download
[-1], []
2668 def to_screen(*msg
):
2669 self
.to_screen(f
'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2671 to_screen(f
'Downloading {len(formats_to_download)} format(s):',
2672 (f
['format_id'] for f
in formats_to_download
))
2673 if requested_ranges
:
2674 to_screen(f
'Downloading {len(requested_ranges)} time ranges:',
2675 (f
'{int(c["start_time"])}-{int(c["end_time"])}' for c
in requested_ranges
))
2676 max_downloads_reached
= False
2678 for fmt
, chapter
in itertools
.product(formats_to_download
, requested_ranges
or [{}]):
2679 new_info
= self
._copy
_infodict
(info_dict
)
2680 new_info
.update(fmt
)
2681 offset
, duration
= info_dict
.get('section_start') or 0, info_dict
.get('duration') or float('inf')
2682 if chapter
or offset
:
2684 'section_start': offset
+ chapter
.get('start_time', 0),
2685 'section_end': offset
+ min(chapter
.get('end_time', duration
), duration
),
2686 'section_title': chapter
.get('title'),
2687 'section_number': chapter
.get('index'),
2689 downloaded_formats
.append(new_info
)
2691 self
.process_info(new_info
)
2692 except MaxDownloadsReached
:
2693 max_downloads_reached
= True
2694 self
._raise
_pending
_errors
(new_info
)
2695 # Remove copied info
2696 for key
, val
in tuple(new_info
.items()):
2697 if info_dict
.get(key
) == val
:
2699 if max_downloads_reached
:
2702 write_archive
= {f.get('__write_download_archive', False) for f in downloaded_formats}
2703 assert write_archive
.issubset({True, False, 'ignore'}
)
2704 if True in write_archive
and False not in write_archive
:
2705 self
.record_download_archive(info_dict
)
2707 info_dict
['requested_downloads'] = downloaded_formats
2708 info_dict
= self
.run_all_pps('after_video', info_dict
)
2709 if max_downloads_reached
:
2710 raise MaxDownloadsReached()
2712 # We update the info dict with the selected best quality format (backwards compatibility)
2713 info_dict
.update(best_format
)
2716 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
2717 """Select the requested subtitles and their format"""
2718 available_subs
, normal_sub_langs
= {}, []
2719 if normal_subtitles
and self
.params
.get('writesubtitles'):
2720 available_subs
.update(normal_subtitles
)
2721 normal_sub_langs
= tuple(normal_subtitles
.keys())
2722 if automatic_captions
and self
.params
.get('writeautomaticsub'):
2723 for lang
, cap_info
in automatic_captions
.items():
2724 if lang
not in available_subs
:
2725 available_subs
[lang
] = cap_info
2727 if (not self
.params
.get('writesubtitles') and not
2728 self
.params
.get('writeautomaticsub') or not
2732 all_sub_langs
= tuple(available_subs
.keys())
2733 if self
.params
.get('allsubtitles', False):
2734 requested_langs
= all_sub_langs
2735 elif self
.params
.get('subtitleslangs', False):
2736 # A list is used so that the order of languages will be the same as
2737 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2738 requested_langs
= []
2739 for lang_re
in self
.params
.get('subtitleslangs'):
2740 discard
= lang_re
[0] == '-'
2742 lang_re
= lang_re
[1:]
2743 if lang_re
== 'all':
2745 requested_langs
= []
2747 requested_langs
.extend(all_sub_langs
)
2749 current_langs
= filter(re
.compile(lang_re
+ '$').match
, all_sub_langs
)
2751 for lang
in current_langs
:
2752 while lang
in requested_langs
:
2753 requested_langs
.remove(lang
)
2755 requested_langs
.extend(current_langs
)
2756 requested_langs
= orderedSet(requested_langs
)
2757 elif normal_sub_langs
:
2758 requested_langs
= ['en'] if 'en' in normal_sub_langs
else normal_sub_langs
[:1]
2760 requested_langs
= ['en'] if 'en' in all_sub_langs
else all_sub_langs
[:1]
2762 self
.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs
))
2764 formats_query
= self
.params
.get('subtitlesformat', 'best')
2765 formats_preference
= formats_query
.split('/') if formats_query
else []
2767 for lang
in requested_langs
:
2768 formats
= available_subs
.get(lang
)
2770 self
.report_warning(f
'{lang} subtitles not available for {video_id}')
2772 for ext
in formats_preference
:
2776 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
2782 self
.report_warning(
2783 'No subtitle format found matching "%s" for language %s, '
2784 'using %s' % (formats_query
, lang
, f
['ext']))
2788 def _forceprint(self
, key
, info_dict
):
2789 if info_dict
is None:
2791 info_copy
= info_dict
.copy()
2792 info_copy
['formats_table'] = self
.render_formats_table(info_dict
)
2793 info_copy
['thumbnails_table'] = self
.render_thumbnails_table(info_dict
)
2794 info_copy
['subtitles_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('subtitles'))
2795 info_copy
['automatic_captions_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('automatic_captions'))
2797 def format_tmpl(tmpl
):
2798 mobj
= re
.match(r
'\w+(=?)$', tmpl
)
2799 if mobj
and mobj
.group(1):
2800 return f
'{tmpl[:-1]} = %({tmpl[:-1]})r'
2802 return f
'%({tmpl})s'
2805 for tmpl
in self
.params
['forceprint'].get(key
, []):
2806 self
.to_stdout(self
.evaluate_outtmpl(format_tmpl(tmpl
), info_copy
))
2808 for tmpl
, file_tmpl
in self
.params
['print_to_file'].get(key
, []):
2809 filename
= self
.prepare_filename(info_dict
, outtmpl
=file_tmpl
)
2810 tmpl
= format_tmpl(tmpl
)
2811 self
.to_screen(f
'[info] Writing {tmpl!r} to: {filename}')
2812 if self
._ensure
_dir
_exists
(filename
):
2813 with open(filename
, 'a', encoding
='utf-8') as f
:
2814 f
.write(self
.evaluate_outtmpl(tmpl
, info_copy
) + '\n')
2816 def __forced_printings(self
, info_dict
, filename
, incomplete
):
2817 def print_mandatory(field
, actual_field
=None):
2818 if actual_field
is None:
2819 actual_field
= field
2820 if (self
.params
.get('force%s' % field
, False)
2821 and (not incomplete
or info_dict
.get(actual_field
) is not None)):
2822 self
.to_stdout(info_dict
[actual_field
])
2824 def print_optional(field
):
2825 if (self
.params
.get('force%s' % field
, False)
2826 and info_dict
.get(field
) is not None):
2827 self
.to_stdout(info_dict
[field
])
2829 info_dict
= info_dict
.copy()
2830 if filename
is not None:
2831 info_dict
['filename'] = filename
2832 if info_dict
.get('requested_formats') is not None:
2833 # For RTMP URLs, also include the playpath
2834 info_dict
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
2835 elif info_dict
.get('url'):
2836 info_dict
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
2838 if (self
.params
.get('forcejson')
2839 or self
.params
['forceprint'].get('video')
2840 or self
.params
['print_to_file'].get('video')):
2841 self
.post_extract(info_dict
)
2842 self
._forceprint
('video', info_dict
)
2844 print_mandatory('title')
2845 print_mandatory('id')
2846 print_mandatory('url', 'urls')
2847 print_optional('thumbnail')
2848 print_optional('description')
2849 print_optional('filename')
2850 if self
.params
.get('forceduration') and info_dict
.get('duration') is not None:
2851 self
.to_stdout(formatSeconds(info_dict
['duration']))
2852 print_mandatory('format')
2854 if self
.params
.get('forcejson'):
2855 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
2857 def dl(self
, name
, info
, subtitle
=False, test
=False):
2858 if not info
.get('url'):
2859 self
.raise_no_formats(info
, True)
2862 verbose
= self
.params
.get('verbose')
2865 'quiet': self
.params
.get('quiet') or not verbose
,
2867 'noprogress': not verbose
,
2869 'skip_unavailable_fragments': False,
2870 'keep_fragments': False,
2872 '_no_ytdl_file': True,
2875 params
= self
.params
2876 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
2878 for ph
in self
._progress
_hooks
:
2879 fd
.add_progress_hook(ph
)
2881 (f
['url'].split(',')[0] + ',<data>' if f
['url'].startswith('data:') else f
['url'])
2882 for f
in info
.get('requested_formats', []) or [info
])
2883 self
.write_debug(f
'Invoking {fd.FD_NAME} downloader on "{urls}"')
2885 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2886 # But it may contain objects that are not deep-copyable
2887 new_info
= self
._copy
_infodict
(info
)
2888 if new_info
.get('http_headers') is None:
2889 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
2890 return fd
.download(name
, new_info
, subtitle
)
2892 def existing_file(self
, filepaths
, *, default_overwrite
=True):
2893 existing_files
= list(filter(os
.path
.exists
, orderedSet(filepaths
)))
2894 if existing_files
and not self
.params
.get('overwrites', default_overwrite
):
2895 return existing_files
[0]
2897 for file in existing_files
:
2898 self
.report_file_delete(file)
2902 def process_info(self
, info_dict
):
2903 """Process a single resolved IE result. (Modifies it in-place)"""
2905 assert info_dict
.get('_type', 'video') == 'video'
2906 original_infodict
= info_dict
2908 if 'format' not in info_dict
and 'ext' in info_dict
:
2909 info_dict
['format'] = info_dict
['ext']
2911 # This is mostly just for backward compatibility of process_info
2912 # As a side-effect, this allows for format-specific filters
2913 if self
._match
_entry
(info_dict
) is not None:
2914 info_dict
['__write_download_archive'] = 'ignore'
2917 # Does nothing under normal operation - for backward compatibility of process_info
2918 self
.post_extract(info_dict
)
2919 self
._num
_downloads
+= 1
2921 # info_dict['_filename'] needs to be set for backward compatibility
2922 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
2923 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
2927 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
2929 def check_max_downloads():
2930 if self
._num
_downloads
>= float(self
.params
.get('max_downloads') or 'inf'):
2931 raise MaxDownloadsReached()
2933 if self
.params
.get('simulate'):
2934 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
2935 check_max_downloads()
2938 if full_filename
is None:
2940 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
2942 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
2945 if self
._write
_description
('video', info_dict
,
2946 self
.prepare_filename(info_dict
, 'description')) is None:
2949 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
2950 if sub_files
is None:
2952 files_to_move
.update(dict(sub_files
))
2954 thumb_files
= self
._write
_thumbnails
(
2955 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
2956 if thumb_files
is None:
2958 files_to_move
.update(dict(thumb_files
))
2960 infofn
= self
.prepare_filename(info_dict
, 'infojson')
2961 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
2962 if _infojson_written
:
2963 info_dict
['infojson_filename'] = infofn
2964 # For backward compatibility, even though it was a private field
2965 info_dict
['__infojson_filename'] = infofn
2966 elif _infojson_written
is None:
2969 # Note: Annotations are deprecated
2971 if self
.params
.get('writeannotations', False):
2972 annofn
= self
.prepare_filename(info_dict
, 'annotation')
2974 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
2976 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
2977 self
.to_screen('[info] Video annotations are already present')
2978 elif not info_dict
.get('annotations'):
2979 self
.report_warning('There are no annotations to write.')
2982 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
2983 with open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
2984 annofile
.write(info_dict
['annotations'])
2985 except (KeyError, TypeError):
2986 self
.report_warning('There are no annotations to write.')
2988 self
.report_error('Cannot write annotations file: ' + annofn
)
2991 # Write internet shortcut files
2992 def _write_link_file(link_type
):
2993 url
= try_get(info_dict
['webpage_url'], iri_to_uri
)
2995 self
.report_warning(
2996 f
'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
2998 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
2999 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
3001 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
3002 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
3005 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
3006 with open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
3007 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
3008 template_vars
= {'url': url}
3009 if link_type
== 'desktop':
3010 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
3011 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
3013 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
3018 'url': self
.params
.get('writeurllink'),
3019 'webloc': self
.params
.get('writewebloclink'),
3020 'desktop': self
.params
.get('writedesktoplink'),
3022 if self
.params
.get('writelink'):
3023 link_type
= ('webloc' if sys
.platform
== 'darwin'
3024 else 'desktop' if sys
.platform
.startswith('linux')
3026 write_links
[link_type
] = True
3028 if any(should_write
and not _write_link_file(link_type
)
3029 for link_type
, should_write
in write_links
.items()):
3032 def replace_info_dict(new_info
):
3034 if new_info
== info_dict
:
3037 info_dict
.update(new_info
)
3039 new_info
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
3040 replace_info_dict(new_info
)
3042 if self
.params
.get('skip_download'):
3043 info_dict
['filepath'] = temp_filename
3044 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3045 info_dict
['__files_to_move'] = files_to_move
3046 replace_info_dict(self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
))
3047 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
3050 info_dict
.setdefault('__postprocessors', [])
3053 def existing_video_file(*filepaths
):
3054 ext
= info_dict
.get('ext')
3055 converted
= lambda file: replace_extension(file, self
.params
.get('final_ext') or ext
, ext
)
3056 file = self
.existing_file(itertools
.chain(*zip(map(converted
, filepaths
), filepaths
)),
3057 default_overwrite
=False)
3059 info_dict
['ext'] = os
.path
.splitext(file)[1][1:]
3062 fd
, success
= None, True
3063 if info_dict
.get('protocol') or info_dict
.get('url'):
3064 fd
= get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-')
3065 if fd
is not FFmpegFD
and (
3066 info_dict
.get('section_start') or info_dict
.get('section_end')):
3067 msg
= ('This format cannot be partially downloaded' if FFmpegFD
.available()
3068 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3069 self
.report_error(f
'{msg}. Aborting')
3072 if info_dict
.get('requested_formats') is not None:
3073 requested_formats
= info_dict
['requested_formats']
3074 old_ext
= info_dict
['ext']
3075 if self
.params
.get('merge_output_format') is None:
3076 if (info_dict
['ext'] == 'webm'
3077 and info_dict
.get('thumbnails')
3078 # check with type instead of pp_key, __name__, or isinstance
3079 # since we dont want any custom PPs to trigger this
3080 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])): # noqa: E721
3081 info_dict
['ext'] = 'mkv'
3082 self
.report_warning(
3083 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3084 new_ext
= info_dict
['ext']
3086 def correct_ext(filename
, ext
=new_ext
):
3089 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
3091 os
.path
.splitext(filename
)[0]
3092 if filename_real_ext
in (old_ext
, new_ext
)
3094 return f
'{filename_wo_ext}.{ext}'
3096 # Ensure filename always has a correct extension for successful merge
3097 full_filename
= correct_ext(full_filename
)
3098 temp_filename
= correct_ext(temp_filename
)
3099 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3100 info_dict
['__real_download'] = False
3102 merger
= FFmpegMergerPP(self
)
3104 if dl_filename
is not None:
3105 self
.report_file_already_downloaded(dl_filename
)
3107 for f
in requested_formats
if fd
!= FFmpegFD
else []:
3108 f
['filepath'] = fname
= prepend_extension(
3109 correct_ext(temp_filename
, info_dict
['ext']),
3110 'f%s' % f
['format_id'], info_dict
['ext'])
3111 downloaded
.append(fname
)
3112 info_dict
['url'] = '\n'.join(f
['url'] for f
in requested_formats
)
3113 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3114 info_dict
['__real_download'] = real_download
3116 if self
.params
.get('allow_unplayable_formats'):
3117 self
.report_warning(
3118 'You have requested merging of multiple formats '
3119 'while also allowing unplayable formats to be downloaded. '
3120 'The formats won\'t be merged to prevent data corruption.')
3121 elif not merger
.available
:
3122 msg
= 'You have requested merging of multiple formats but ffmpeg is not installed'
3123 if not self
.params
.get('ignoreerrors'):
3124 self
.report_error(f
'{msg}. Aborting due to --abort-on-error')
3126 self
.report_warning(f
'{msg}. The formats won\'t be merged')
3128 if temp_filename
== '-':
3129 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
, self
.params
)
3130 else 'but the formats are incompatible for simultaneous download' if merger
.available
3131 else 'but ffmpeg is not installed')
3132 self
.report_warning(
3133 f
'You have requested downloading multiple formats to stdout {reason}. '
3134 'The formats will be streamed one after the other')
3135 fname
= temp_filename
3136 for f
in requested_formats
:
3137 new_info
= dict(info_dict
)
3138 del new_info
['requested_formats']
3140 if temp_filename
!= '-':
3141 fname
= prepend_extension(
3142 correct_ext(temp_filename
, new_info
['ext']),
3143 'f%s' % f
['format_id'], new_info
['ext'])
3144 if not self
._ensure
_dir
_exists
(fname
):
3146 f
['filepath'] = fname
3147 downloaded
.append(fname
)
3148 partial_success
, real_download
= self
.dl(fname
, new_info
)
3149 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
3150 success
= success
and partial_success
3152 if downloaded
and merger
.available
and not self
.params
.get('allow_unplayable_formats'):
3153 info_dict
['__postprocessors'].append(merger
)
3154 info_dict
['__files_to_merge'] = downloaded
3155 # Even if there were no downloads, it is being merged only now
3156 info_dict
['__real_download'] = True
3158 for file in downloaded
:
3159 files_to_move
[file] = None
3161 # Just a single file
3162 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3163 if dl_filename
is None or dl_filename
== temp_filename
:
3164 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3165 # So we should try to resume the download
3166 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3167 info_dict
['__real_download'] = real_download
3169 self
.report_file_already_downloaded(dl_filename
)
3171 dl_filename
= dl_filename
or temp_filename
3172 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3174 except network_exceptions
as err
:
3175 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
3177 except OSError as err
:
3178 raise UnavailableVideoError(err
)
3179 except (ContentTooShortError
, ) as err
:
3180 self
.report_error(f
'content too short (expected {err.expected} bytes and served {err.downloaded})')
3183 self
._raise
_pending
_errors
(info_dict
)
3184 if success
and full_filename
!= '-':
3188 fixup_policy
= self
.params
.get('fixup')
3189 vid
= info_dict
['id']
3191 if fixup_policy
in ('ignore', 'never'):
3193 elif fixup_policy
== 'warn':
3195 elif fixup_policy
!= 'force':
3196 assert fixup_policy
in ('detect_or_warn', None)
3197 if not info_dict
.get('__real_download'):
3200 def ffmpeg_fixup(cndn
, msg
, cls
):
3201 if not (do_fixup
and cndn
):
3203 elif do_fixup
== 'warn':
3204 self
.report_warning(f
'{vid}: {msg}')
3208 info_dict
['__postprocessors'].append(pp
)
3210 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
3212 stretched_ratio
= info_dict
.get('stretched_ratio')
3213 ffmpeg_fixup(stretched_ratio
not in (1, None),
3214 f
'Non-uniform pixel ratio {stretched_ratio}',
3215 FFmpegFixupStretchedPP
)
3217 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
3218 downloader
= downloader
.FD_NAME
if downloader
else None
3220 ext
= info_dict
.get('ext')
3221 postprocessed_by_ffmpeg
= info_dict
.get('requested_formats') or any((
3222 isinstance(pp
, FFmpegVideoConvertorPP
)
3223 and resolve_recode_mapping(ext
, pp
.mapping
)[0] not in (ext
, None)
3224 ) for pp
in self
._pps
['post_process'])
3226 if not postprocessed_by_ffmpeg
:
3227 ffmpeg_fixup(ext
== 'm4a' and info_dict
.get('container') == 'm4a_dash',
3228 'writing DASH m4a. Only some players support this container',
3230 ffmpeg_fixup(downloader
== 'hlsnative' and not self
.params
.get('hls_use_mpegts')
3231 or info_dict
.get('is_live') and self
.params
.get('hls_use_mpegts') is None,
3232 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3234 ffmpeg_fixup(info_dict
.get('is_live') and downloader
== 'DashSegmentsFD',
3235 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP
)
3237 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3238 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP
)
3242 replace_info_dict(self
.post_process(dl_filename
, info_dict
, files_to_move
))
3243 except PostProcessingError
as err
:
3244 self
.report_error('Postprocessing: %s' % str(err
))
3247 for ph
in self
._post
_hooks
:
3248 ph(info_dict
['filepath'])
3249 except Exception as err
:
3250 self
.report_error('post hooks: %s' % str(err
))
3252 info_dict
['__write_download_archive'] = True
3254 assert info_dict
is original_infodict
# Make sure the info_dict was modified in-place
3255 if self
.params
.get('force_write_download_archive'):
3256 info_dict
['__write_download_archive'] = True
3257 check_max_downloads()
3259 def __download_wrapper(self
, func
):
3260 @functools.wraps(func
)
3261 def wrapper(*args
, **kwargs
):
3263 res
= func(*args
, **kwargs
)
3264 except UnavailableVideoError
as e
:
3265 self
.report_error(e
)
3266 except DownloadCancelled
as e
:
3267 self
.to_screen(f
'[info] {e}')
3268 if not self
.params
.get('break_per_url'):
3271 if self
.params
.get('dump_single_json', False):
3272 self
.post_extract(res
)
3273 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3276 def download(self
, url_list
):
3277 """Download a given list of URLs."""
3278 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3279 outtmpl
= self
.params
['outtmpl']['default']
3280 if (len(url_list
) > 1
3282 and '%' not in outtmpl
3283 and self
.params
.get('max_downloads') != 1):
3284 raise SameFileError(outtmpl
)
3286 for url
in url_list
:
3287 self
.__download
_wrapper
(self
.extract_info
)(
3288 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3290 return self
._download
_retcode
3292 def download_with_info_file(self
, info_filename
):
3293 with contextlib
.closing(fileinput
.FileInput(
3294 [info_filename
], mode
='r',
3295 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3296 # FileInput doesn't have a read method, we can't call json.load
3297 info
= self
.sanitize_info(json
.loads('\n'.join(f
)), self
.params
.get('clean_infojson', True))
3299 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3300 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3301 if not isinstance(e
, EntryNotInPlaylist
):
3302 self
.to_stderr('\r')
3303 webpage_url
= info
.get('webpage_url')
3304 if webpage_url
is not None:
3305 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3306 return self
.download([webpage_url
])
3309 return self
._download
_retcode
3312 def sanitize_info(info_dict
, remove_private_keys
=False):
3313 ''' Sanitize the infodict for converting to json '''
3314 if info_dict
is None:
3316 info_dict
.setdefault('epoch', int(time
.time()))
3317 info_dict
.setdefault('_type', 'video')
3319 if remove_private_keys
:
3320 reject
= lambda k
, v
: v
is None or k
.startswith('__') or k
in {
3321 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3322 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
3325 reject
= lambda k
, v
: False
3328 if isinstance(obj
, dict):
3329 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3330 elif isinstance(obj
, (list, tuple, set, LazyList
)):
3331 return list(map(filter_fn
, obj
))
3332 elif obj
is None or isinstance(obj
, (str, int, float, bool)):
3337 return filter_fn(info_dict
)
3340 def filter_requested_info(info_dict
, actually_filter
=True):
3341 ''' Alias of sanitize_info for backward compatibility '''
3342 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3344 def _delete_downloaded_files(self
, *files_to_delete
, info
={}, msg
=None):
3345 for filename
in set(filter(None, files_to_delete
)):
3347 self
.to_screen(msg
% filename
)
3351 self
.report_warning(f
'Unable to delete file {filename}')
3352 if filename
in info
.get('__files_to_move', []): # NB: Delete even if None
3353 del info
['__files_to_move'][filename
]
3356 def post_extract(info_dict
):
3357 def actual_post_extract(info_dict
):
3358 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3359 for video_dict
in info_dict
.get('entries', {}):
3360 actual_post_extract(video_dict
or {})
3363 post_extractor
= info_dict
.pop('__post_extractor', None) or (lambda: {})
3364 info_dict
.update(post_extractor())
3366 actual_post_extract(info_dict
or {})
3368 def run_pp(self
, pp
, infodict
):
3369 files_to_delete
= []
3370 if '__files_to_move' not in infodict
:
3371 infodict
['__files_to_move'] = {}
3373 files_to_delete
, infodict
= pp
.run(infodict
)
3374 except PostProcessingError
as e
:
3375 # Must be True and not 'only_download'
3376 if self
.params
.get('ignoreerrors') is True:
3377 self
.report_error(e
)
3381 if not files_to_delete
:
3383 if self
.params
.get('keepvideo', False):
3384 for f
in files_to_delete
:
3385 infodict
['__files_to_move'].setdefault(f
, '')
3387 self
._delete
_downloaded
_files
(
3388 *files_to_delete
, info
=infodict
, msg
='Deleting original file %s (pass -k to keep)')
3391 def run_all_pps(self
, key
, info
, *, additional_pps
=None):
3392 self
._forceprint
(key
, info
)
3393 for pp
in (additional_pps
or []) + self
._pps
[key
]:
3394 info
= self
.run_pp(pp
, info
)
3397 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3398 info
= dict(ie_info
)
3399 info
['__files_to_move'] = files_to_move
or {}
3401 info
= self
.run_all_pps(key
, info
)
3402 except PostProcessingError
as err
:
3403 msg
= f
'Preprocessing: {err}'
3404 info
.setdefault('__pending_error', msg
)
3405 self
.report_error(msg
, is_error
=False)
3406 return info
, info
.pop('__files_to_move', None)
3408 def post_process(self
, filename
, info
, files_to_move
=None):
3409 """Run all the postprocessors on the given file."""
3410 info
['filepath'] = filename
3411 info
['__files_to_move'] = files_to_move
or {}
3412 info
= self
.run_all_pps('post_process', info
, additional_pps
=info
.get('__postprocessors'))
3413 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3414 del info
['__files_to_move']
3415 return self
.run_all_pps('after_move', info
)
3417 def _make_archive_id(self
, info_dict
):
3418 video_id
= info_dict
.get('id')
3421 # Future-proof against any change in case
3422 # and backwards compatibility with prior versions
3423 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3424 if extractor
is None:
3425 url
= str_or_none(info_dict
.get('url'))
3428 # Try to find matching extractor for the URL and take its ie_key
3429 for ie_key
, ie
in self
._ies
.items():
3430 if ie
.suitable(url
):
3435 return make_archive_id(extractor
, video_id
)
3437 def in_download_archive(self
, info_dict
):
3438 fn
= self
.params
.get('download_archive')
3442 vid_ids
= [self
._make
_archive
_id
(info_dict
)]
3443 vid_ids
.extend(info_dict
.get('_old_archive_ids', []))
3444 return any(id_
in self
.archive
for id_
in vid_ids
)
3446 def record_download_archive(self
, info_dict
):
3447 fn
= self
.params
.get('download_archive')
3450 vid_id
= self
._make
_archive
_id
(info_dict
)
3452 self
.write_debug(f
'Adding to archive: {vid_id}')
3453 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3454 archive_file
.write(vid_id
+ '\n')
3455 self
.archive
.add(vid_id
)
3458 def format_resolution(format
, default
='unknown'):
3459 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3461 if format
.get('resolution') is not None:
3462 return format
['resolution']
3463 if format
.get('width') and format
.get('height'):
3464 return '%dx%d' % (format
['width'], format
['height'])
3465 elif format
.get('height'):
3466 return '%sp' % format
['height']
3467 elif format
.get('width'):
3468 return '%dx?' % format
['width']
3471 def _list_format_headers(self
, *headers
):
3472 if self
.params
.get('listformats_table', True) is not False:
3473 return [self
._format
_out
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3476 def _format_note(self
, fdict
):
3478 if fdict
.get('ext') in ['f4f', 'f4m']:
3479 res
+= '(unsupported)'
3480 if fdict
.get('language'):
3483 res
+= '[%s]' % fdict
['language']
3484 if fdict
.get('format_note') is not None:
3487 res
+= fdict
['format_note']
3488 if fdict
.get('tbr') is not None:
3491 res
+= '%4dk' % fdict
['tbr']
3492 if fdict
.get('container') is not None:
3495 res
+= '%s container' % fdict
['container']
3496 if (fdict
.get('vcodec') is not None
3497 and fdict
.get('vcodec') != 'none'):
3500 res
+= fdict
['vcodec']
3501 if fdict
.get('vbr') is not None:
3503 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3505 if fdict
.get('vbr') is not None:
3506 res
+= '%4dk' % fdict
['vbr']
3507 if fdict
.get('fps') is not None:
3510 res
+= '%sfps' % fdict
['fps']
3511 if fdict
.get('acodec') is not None:
3514 if fdict
['acodec'] == 'none':
3517 res
+= '%-5s' % fdict
['acodec']
3518 elif fdict
.get('abr') is not None:
3522 if fdict
.get('abr') is not None:
3523 res
+= '@%3dk' % fdict
['abr']
3524 if fdict
.get('asr') is not None:
3525 res
+= ' (%5dHz)' % fdict
['asr']
3526 if fdict
.get('filesize') is not None:
3529 res
+= format_bytes(fdict
['filesize'])
3530 elif fdict
.get('filesize_approx') is not None:
3533 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3536 def render_formats_table(self
, info_dict
):
3537 if not info_dict
.get('formats') and not info_dict
.get('url'):
3540 formats
= info_dict
.get('formats', [info_dict
])
3541 if not self
.params
.get('listformats_table', True) is not False:
3544 format_field(f
, 'format_id'),
3545 format_field(f
, 'ext'),
3546 self
.format_resolution(f
),
3547 self
._format
_note
(f
)
3548 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3549 return render_table(['format code', 'extension', 'resolution', 'note'], table
, extra_gap
=1)
3551 def simplified_codec(f
, field
):
3552 assert field
in ('acodec', 'vcodec')
3553 codec
= f
.get(field
, 'unknown')
3556 elif codec
!= 'none':
3557 return '.'.join(codec
.split('.')[:4])
3559 if field
== 'vcodec' and f
.get('acodec') == 'none':
3561 elif field
== 'acodec' and f
.get('vcodec') == 'none':
3563 return self
._format
_out
('audio only' if field
== 'vcodec' else 'video only',
3564 self
.Styles
.SUPPRESS
)
3566 delim
= self
._format
_out
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3569 self
._format
_out
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3570 format_field(f
, 'ext'),
3571 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3572 format_field(f
, 'fps', '\t%d', func
=round),
3573 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3574 format_field(f
, 'audio_channels', '\t%s'),
3576 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
) + format_field(f
, 'filesize_approx', '~\t%s', func
=format_bytes
),
3577 format_field(f
, 'tbr', '\t%dk', func
=round),
3578 shorten_protocol_name(f
.get('protocol', '')),
3580 simplified_codec(f
, 'vcodec'),
3581 format_field(f
, 'vbr', '\t%dk', func
=round),
3582 simplified_codec(f
, 'acodec'),
3583 format_field(f
, 'abr', '\t%dk', func
=round),
3584 format_field(f
, 'asr', '\t%s', func
=format_decimal_suffix
),
3586 self
._format
_out
('UNSUPPORTED', 'light red') if f
.get('ext') in ('f4f', 'f4m') else None,
3587 format_field(f
, 'language', '[%s]'),
3588 join_nonempty(format_field(f
, 'format_note'),
3589 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3592 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3593 header_line
= self
._list
_format
_headers
(
3594 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3595 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3597 return render_table(
3598 header_line
, table
, hide_empty
=True,
3599 delim
=self
._format
_out
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True))
3601 def render_thumbnails_table(self
, info_dict
):
3602 thumbnails
= list(info_dict
.get('thumbnails') or [])
3605 return render_table(
3606 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3607 [[t
.get('id'), t
.get('width', 'unknown'), t
.get('height', 'unknown'), t
['url']] for t
in thumbnails
])
3609 def render_subtitles_table(self
, video_id
, subtitles
):
3610 def _row(lang
, formats
):
3611 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3612 if len(set(names
)) == 1:
3613 names
= [] if names
[0] == 'unknown' else names
[:1]
3614 return [lang
, ', '.join(names
), ', '.join(exts
)]
3618 return render_table(
3619 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3620 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3623 def __list_table(self
, video_id
, name
, func
, *args
):
3626 self
.to_screen(f
'{video_id} has no {name}')
3628 self
.to_screen(f
'[info] Available {name} for {video_id}:')
3629 self
.to_stdout(table
)
3631 def list_formats(self
, info_dict
):
3632 self
.__list
_table
(info_dict
['id'], 'formats', self
.render_formats_table
, info_dict
)
3634 def list_thumbnails(self
, info_dict
):
3635 self
.__list
_table
(info_dict
['id'], 'thumbnails', self
.render_thumbnails_table
, info_dict
)
3637 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3638 self
.__list
_table
(video_id
, name
, self
.render_subtitles_table
, video_id
, subtitles
)
3640 def urlopen(self
, req
):
3641 """ Start an HTTP download """
3642 if isinstance(req
, str):
3643 req
= sanitized_Request(req
)
3644 return self
._opener
.open(req
, timeout
=self
._socket
_timeout
)
3646 def print_debug_header(self
):
3647 if not self
.params
.get('verbose'):
3650 # These imports can be slow. So import them only as needed
3651 from .extractor
.extractors
import _LAZY_LOADER
3652 from .extractor
.extractors
import _PLUGIN_CLASSES
as plugin_extractors
3654 def get_encoding(stream
):
3655 ret
= str(getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__))
3656 if not supports_terminal_sequences(stream
):
3657 from .utils
import WINDOWS_VT_MODE
# Must be imported locally
3658 ret
+= ' (No VT)' if WINDOWS_VT_MODE
is False else ' (No ANSI)'
3661 encoding_str
= 'Encodings: locale %s, fs %s, pref %s, %s' % (
3662 locale
.getpreferredencoding(),
3663 sys
.getfilesystemencoding(),
3664 self
.get_encoding(),
3666 f
'{key} {get_encoding(stream)}' for key
, stream
in self
._out
_files
.items_
3667 if stream
is not None and key
!= 'console')
3670 logger
= self
.params
.get('logger')
3672 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3673 write_debug(encoding_str
)
3675 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3676 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3678 source
= detect_variant()
3679 write_debug(join_nonempty(
3680 'yt-dlp version', __version__
,
3681 f
'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD
else '',
3682 '' if source
== 'unknown' else f
'({source})',
3684 if not _LAZY_LOADER
:
3685 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
3686 write_debug('Lazy loading extractors is forcibly disabled')
3688 write_debug('Lazy loading extractors is disabled')
3689 if plugin_extractors
or plugin_postprocessors
:
3690 write_debug('Plugins: %s' % [
3691 '%s%s' % (klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
3692 for name
, klass
in itertools
.chain(plugin_extractors
.items(), plugin_postprocessors
.items())])
3693 if self
.params
['compat_opts']:
3694 write_debug('Compatibility options: %s' % ', '.join(self
.params
['compat_opts']))
3696 if source
== 'source':
3698 stdout
, _
, _
= Popen
.run(
3699 ['git', 'rev-parse', '--short', 'HEAD'],
3700 text
=True, cwd
=os
.path
.dirname(os
.path
.abspath(__file__
)),
3701 stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
3702 if re
.fullmatch('[0-9a-f]+', stdout
.strip()):
3703 write_debug(f
'Git HEAD: {stdout.strip()}')
3705 with contextlib
.suppress(Exception):
3708 write_debug(system_identifier())
3710 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
3711 ffmpeg_features
= {key for key, val in ffmpeg_features.items() if val}
3713 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features
))
3715 exe_versions
['rtmpdump'] = rtmpdump_version()
3716 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
3717 exe_str
= ', '.join(
3718 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
3720 write_debug('exe versions: %s' % exe_str
)
3722 from .compat
.compat_utils
import get_package_info
3723 from .dependencies
import available_dependencies
3725 write_debug('Optional libraries: %s' % (', '.join(sorted({
3726 join_nonempty(*get_package_info(m
)) for m
in available_dependencies
.values()
3729 self
._setup
_opener
()
3731 for handler
in self
._opener
.handlers
:
3732 if hasattr(handler
, 'proxies'):
3733 proxy_map
.update(handler
.proxies
)
3734 write_debug(f
'Proxy map: {proxy_map}')
3737 if False and self
.params
.get('call_home'):
3738 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode()
3739 write_debug('Public IP address: %s' % ipaddr
)
3740 latest_version
= self
.urlopen(
3741 'https://yt-dl.org/latest/version').read().decode()
3742 if version_tuple(latest_version
) > version_tuple(__version__
):
3743 self
.report_warning(
3744 'You are using an outdated version (newest version: %s)! '
3745 'See https://yt-dl.org/update if you need help updating.' %
3748 def _setup_opener(self
):
3749 if hasattr(self
, '_opener'):
3751 timeout_val
= self
.params
.get('socket_timeout')
3752 self
._socket
_timeout
= 20 if timeout_val
is None else float(timeout_val
)
3754 opts_cookiesfrombrowser
= self
.params
.get('cookiesfrombrowser')
3755 opts_cookiefile
= self
.params
.get('cookiefile')
3756 opts_proxy
= self
.params
.get('proxy')
3758 self
.cookiejar
= load_cookies(opts_cookiefile
, opts_cookiesfrombrowser
, self
)
3760 cookie_processor
= YoutubeDLCookieProcessor(self
.cookiejar
)
3761 if opts_proxy
is not None:
3762 if opts_proxy
== '':
3765 proxies
= {'http': opts_proxy, 'https': opts_proxy}
3767 proxies
= urllib
.request
.getproxies()
3768 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3769 if 'http' in proxies
and 'https' not in proxies
:
3770 proxies
['https'] = proxies
['http']
3771 proxy_handler
= PerRequestProxyHandler(proxies
)
3773 debuglevel
= 1 if self
.params
.get('debug_printtraffic') else 0
3774 https_handler
= make_HTTPS_handler(self
.params
, debuglevel
=debuglevel
)
3775 ydlh
= YoutubeDLHandler(self
.params
, debuglevel
=debuglevel
)
3776 redirect_handler
= YoutubeDLRedirectHandler()
3777 data_handler
= urllib
.request
.DataHandler()
3779 # When passing our own FileHandler instance, build_opener won't add the
3780 # default FileHandler and allows us to disable the file protocol, which
3781 # can be used for malicious purposes (see
3782 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3783 file_handler
= urllib
.request
.FileHandler()
3785 def file_open(*args
, **kwargs
):
3786 raise urllib
.error
.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3787 file_handler
.file_open
= file_open
3789 opener
= urllib
.request
.build_opener(
3790 proxy_handler
, https_handler
, cookie_processor
, ydlh
, redirect_handler
, data_handler
, file_handler
)
3792 # Delete the default user-agent header, which would otherwise apply in
3793 # cases where our custom HTTP handler doesn't come into play
3794 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3795 opener
.addheaders
= []
3796 self
._opener
= opener
3798 def encode(self
, s
):
3799 if isinstance(s
, bytes):
3800 return s
# Already encoded
3803 return s
.encode(self
.get_encoding())
3804 except UnicodeEncodeError as err
:
3805 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
3808 def get_encoding(self
):
3809 encoding
= self
.params
.get('encoding')
3810 if encoding
is None:
3811 encoding
= preferredencoding()
3814 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
3815 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3816 if overwrite
is None:
3817 overwrite
= self
.params
.get('overwrites', True)
3818 if not self
.params
.get('writeinfojson'):
3821 self
.write_debug(f
'Skipping writing {label} infojson')
3823 elif not self
._ensure
_dir
_exists
(infofn
):
3825 elif not overwrite
and os
.path
.exists(infofn
):
3826 self
.to_screen(f
'[info] {label.title()} metadata is already present')
3829 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
3831 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
3834 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
3837 def _write_description(self
, label
, ie_result
, descfn
):
3838 ''' Write description and returns True = written, False = skip, None = error '''
3839 if not self
.params
.get('writedescription'):
3842 self
.write_debug(f
'Skipping writing {label} description')
3844 elif not self
._ensure
_dir
_exists
(descfn
):
3846 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
3847 self
.to_screen(f
'[info] {label.title()} description is already present')
3848 elif ie_result
.get('description') is None:
3849 self
.report_warning(f
'There\'s no {label} description to write')
3853 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
3854 with open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
3855 descfile
.write(ie_result
['description'])
3857 self
.report_error(f
'Cannot write {label} description file {descfn}')
3861 def _write_subtitles(self
, info_dict
, filename
):
3862 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3864 subtitles
= info_dict
.get('requested_subtitles')
3865 if not subtitles
or not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
3866 # subtitles download errors are already managed as troubles in relevant IE
3867 # that way it will silently go on when used with unsupporting IE
3870 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
3871 if not sub_filename_base
:
3872 self
.to_screen('[info] Skipping writing video subtitles')
3874 for sub_lang
, sub_info
in subtitles
.items():
3875 sub_format
= sub_info
['ext']
3876 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
3877 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
3878 existing_sub
= self
.existing_file((sub_filename_final
, sub_filename
))
3880 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3881 sub_info
['filepath'] = existing_sub
3882 ret
.append((existing_sub
, sub_filename_final
))
3885 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
3886 if sub_info
.get('data') is not None:
3888 # Use newline='' to prevent conversion of newline characters
3889 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3890 with open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
3891 subfile
.write(sub_info
['data'])
3892 sub_info
['filepath'] = sub_filename
3893 ret
.append((sub_filename
, sub_filename_final
))
3896 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
3900 sub_copy
= sub_info
.copy()
3901 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
3902 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
3903 sub_info
['filepath'] = sub_filename
3904 ret
.append((sub_filename
, sub_filename_final
))
3905 except (DownloadError
, ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
3906 msg
= f
'Unable to download video subtitles for {sub_lang!r}: {err}'
3907 if self
.params
.get('ignoreerrors') is not True: # False or 'only_download'
3908 if not self
.params
.get('ignoreerrors'):
3909 self
.report_error(msg
)
3910 raise DownloadError(msg
)
3911 self
.report_warning(msg
)
3914 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
3915 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3916 write_all
= self
.params
.get('write_all_thumbnails', False)
3917 thumbnails
, ret
= [], []
3918 if write_all
or self
.params
.get('writethumbnail', False):
3919 thumbnails
= info_dict
.get('thumbnails') or []
3920 multiple
= write_all
and len(thumbnails
) > 1
3922 if thumb_filename_base
is None:
3923 thumb_filename_base
= filename
3924 if thumbnails
and not thumb_filename_base
:
3925 self
.write_debug(f
'Skipping writing {label} thumbnail')
3928 for idx
, t
in list(enumerate(thumbnails
))[::-1]:
3929 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
3930 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
3931 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
3932 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
3934 existing_thumb
= self
.existing_file((thumb_filename_final
, thumb_filename
))
3936 self
.to_screen('[info] %s is already present' % (
3937 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
3938 t
['filepath'] = existing_thumb
3939 ret
.append((existing_thumb
, thumb_filename_final
))
3941 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
3943 uf
= self
.urlopen(sanitized_Request(t
['url'], headers
=t
.get('http_headers', {})))
3944 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
3945 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
3946 shutil
.copyfileobj(uf
, thumbf
)
3947 ret
.append((thumb_filename
, thumb_filename_final
))
3948 t
['filepath'] = thumb_filename
3949 except network_exceptions
as err
:
3951 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
3952 if ret
and not write_all
: