25 from string
import ascii_letters
27 from .cache
import Cache
28 from .compat
import HAS_LEGACY
as compat_has_legacy
29 from .compat
import compat_os_name
, compat_shlex_quote
30 from .cookies
import load_cookies
31 from .downloader
import FFmpegFD
, get_suitable_downloader
, shorten_protocol_name
32 from .downloader
.rtmp
import rtmpdump_version
33 from .extractor
import gen_extractor_classes
, get_info_extractor
34 from .extractor
.openload
import PhantomJSwrapper
35 from .minicurses
import format_text
36 from .postprocessor
import _PLUGIN_CLASSES
as plugin_postprocessors
37 from .postprocessor
import (
39 FFmpegFixupDuplicateMoovPP
,
40 FFmpegFixupDurationPP
,
43 FFmpegFixupStretchedPP
,
44 FFmpegFixupTimestampPP
,
47 MoveFilesAfterDownloadPP
,
50 from .update
import detect_variant
75 PerRequestProxyHandler
,
82 UnavailableVideoError
,
83 YoutubeDLCookieProcessor
,
85 YoutubeDLRedirectHandler
,
98 format_decimal_suffix
,
116 register_socks_protocols
,
117 remove_terminal_sequences
,
128 supports_terminal_sequences
,
136 windows_enable_vt_mode
,
140 from .version
import RELEASE_GIT_HEAD
, __version__
142 if compat_os_name
== 'nt':
149 YoutubeDL objects are the ones responsible of downloading the
150 actual video file and writing it to disk if the user has requested
151 it, among some other tasks. In most cases there should be one per
152 program. As, given a video URL, the downloader doesn't know how to
153 extract all the needed information, task that InfoExtractors do, it
154 has to pass the URL to one of them.
156 For this, YoutubeDL objects have a method that allows
157 InfoExtractors to be registered in a given order. When it is passed
158 a URL, the YoutubeDL object handles it to the first InfoExtractor it
159 finds that reports being able to handle it. The InfoExtractor extracts
160 all the information about the video or videos the URL refers to, and
161 YoutubeDL process the extracted information, possibly using a File
162 Downloader to download the video.
164 YoutubeDL objects accept a lot of parameters. In order not to saturate
165 the object constructor with arguments, it receives a dictionary of
166 options instead. These options are available through the params
167 attribute for the InfoExtractors to use. The YoutubeDL also
168 registers itself as the downloader in charge for the InfoExtractors
169 that are added to it, so this is a "mutual registration".
173 username: Username for authentication purposes.
174 password: Password for authentication purposes.
175 videopassword: Password for accessing a video.
176 ap_mso: Adobe Pass multiple-system operator identifier.
177 ap_username: Multiple-system operator account username.
178 ap_password: Multiple-system operator account password.
179 usenetrc: Use netrc for authentication instead.
180 verbose: Print additional info to stdout.
181 quiet: Do not print messages to stdout.
182 no_warnings: Do not print out anything for warnings.
183 forceprint: A dict with keys WHEN mapped to a list of templates to
184 print to stdout. The allowed keys are video or any of the
185 items in utils.POSTPROCESS_WHEN.
186 For compatibility, a single list is also accepted
187 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
188 a list of tuples with (template, filename)
189 forcejson: Force printing info_dict as JSON.
190 dump_single_json: Force printing the info_dict of the whole playlist
191 (or video) as a single JSON line.
192 force_write_download_archive: Force writing download archive regardless
193 of 'skip_download' or 'simulate'.
194 simulate: Do not download the video files. If unset (or None),
195 simulate only if listsubtitles, listformats or list_thumbnails is used
196 format: Video format code. see "FORMAT SELECTION" for more details.
197 You can also pass a function. The function takes 'ctx' as
198 argument and returns the formats to download.
199 See "build_format_selector" for an implementation
200 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
201 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
202 extracting metadata even if the video is not actually
203 available for download (experimental)
204 format_sort: A list of fields by which to sort the video formats.
205 See "Sorting Formats" for more details.
206 format_sort_force: Force the given format_sort. see "Sorting Formats"
208 prefer_free_formats: Whether to prefer video formats with free containers
209 over non-free ones of same quality.
210 allow_multiple_video_streams: Allow multiple video streams to be merged
212 allow_multiple_audio_streams: Allow multiple audio streams to be merged
214 check_formats Whether to test if the formats are downloadable.
215 Can be True (check all), False (check none),
216 'selected' (check selected formats),
217 or None (check only if requested by extractor)
218 paths: Dictionary of output paths. The allowed keys are 'home'
219 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
220 outtmpl: Dictionary of templates for output names. Allowed keys
221 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
222 For compatibility with youtube-dl, a single string can also be used
223 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
224 restrictfilenames: Do not allow "&" and spaces in file names
225 trim_file_name: Limit length of filename (extension excluded)
226 windowsfilenames: Force the filenames to be windows compatible
227 ignoreerrors: Do not stop on download/postprocessing errors.
228 Can be 'only_download' to ignore only download errors.
229 Default is 'only_download' for CLI, but False for API
230 skip_playlist_after_errors: Number of allowed failures until the rest of
231 the playlist is skipped
232 force_generic_extractor: Force downloader to use the generic extractor
233 overwrites: Overwrite all video and metadata files if True,
234 overwrite only non-video files if None
235 and don't overwrite any file if False
236 For compatibility with youtube-dl,
237 "nooverwrites" may also be used instead
238 playlist_items: Specific indices of playlist to download.
239 playlistrandom: Download playlist items in random order.
240 lazy_playlist: Process playlist entries as they are received.
241 matchtitle: Download only matching titles.
242 rejecttitle: Reject downloads for matching titles.
243 logger: Log messages to a logging.Logger instance.
244 logtostderr: Log messages to stderr instead of stdout.
245 consoletitle: Display progress in console window's titlebar.
246 writedescription: Write the video description to a .description file
247 writeinfojson: Write the video description to a .info.json file
248 clean_infojson: Remove private fields from the infojson
249 getcomments: Extract video comments. This will not be written to disk
250 unless writeinfojson is also given
251 writeannotations: Write the video annotations to a .annotations.xml file
252 writethumbnail: Write the thumbnail image to a file
253 allow_playlist_files: Whether to write playlists' description, infojson etc
254 also to disk when using the 'write*' options
255 write_all_thumbnails: Write all thumbnail formats to files
256 writelink: Write an internet shortcut file, depending on the
257 current platform (.url/.webloc/.desktop)
258 writeurllink: Write a Windows internet shortcut file (.url)
259 writewebloclink: Write a macOS internet shortcut file (.webloc)
260 writedesktoplink: Write a Linux internet shortcut file (.desktop)
261 writesubtitles: Write the video subtitles to a file
262 writeautomaticsub: Write the automatically generated subtitles to a file
263 listsubtitles: Lists all available subtitles for the video
264 subtitlesformat: The format code for subtitles
265 subtitleslangs: List of languages of the subtitles to download (can be regex).
266 The list may contain "all" to refer to all the available
267 subtitles. The language can be prefixed with a "-" to
268 exclude it from the requested languages. Eg: ['all', '-live_chat']
269 keepvideo: Keep the video file after post-processing
270 daterange: A DateRange object, download only if the upload_date is in the range.
271 skip_download: Skip the actual download of the video file
272 cachedir: Location of the cache files in the filesystem.
273 False to disable filesystem cache.
274 noplaylist: Download single video instead of a playlist if in doubt.
275 age_limit: An integer representing the user's age in years.
276 Unsuitable videos for the given age are skipped.
277 min_views: An integer representing the minimum view count the video
278 must have in order to not be skipped.
279 Videos without view count information are always
280 downloaded. None for no limit.
281 max_views: An integer representing the maximum view count.
282 Videos that are more popular than that are not
284 Videos without view count information are always
285 downloaded. None for no limit.
286 download_archive: File name of a file where all downloads are recorded.
287 Videos already present in the file are not downloaded
289 break_on_existing: Stop the download process after attempting to download a
290 file that is in the archive.
291 break_on_reject: Stop the download process when encountering a video that
292 has been filtered out.
293 break_per_url: Whether break_on_reject and break_on_existing
294 should act on each input URL as opposed to for the entire queue
295 cookiefile: File name or text stream from where cookies should be read and dumped to
296 cookiesfrombrowser: A tuple containing the name of the browser, the profile
297 name/pathfrom where cookies are loaded, and the name of the
298 keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
299 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
300 support RFC 5746 secure renegotiation
301 nocheckcertificate: Do not verify SSL certificates
302 client_certificate: Path to client certificate file in PEM format. May include the private key
303 client_certificate_key: Path to private key file for client certificate
304 client_certificate_password: Password for client certificate private key, if encrypted.
305 If not provided and the key is encrypted, yt-dlp will ask interactively
306 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
307 At the moment, this is only supported by YouTube.
308 http_headers: A dictionary of custom headers to be used for all requests
309 proxy: URL of the proxy server to use
310 geo_verification_proxy: URL of the proxy to use for IP address verification
311 on geo-restricted sites.
312 socket_timeout: Time to wait for unresponsive hosts, in seconds
313 bidi_workaround: Work around buggy terminals without bidirectional text
314 support, using fridibi
315 debug_printtraffic:Print out sent and received HTTP traffic
316 default_search: Prepend this string if an input url is not valid.
317 'auto' for elaborate guessing
318 encoding: Use this encoding instead of the system-specified.
319 extract_flat: Do not resolve URLs, return the immediate result.
320 Pass in 'in_playlist' to only show this behavior for
322 wait_for_video: If given, wait for scheduled streams to become available.
323 The value should be a tuple containing the range
324 (min_secs, max_secs) to wait between retries
325 postprocessors: A list of dictionaries, each with an entry
326 * key: The name of the postprocessor. See
327 yt_dlp/postprocessor/__init__.py for a list.
328 * when: When to run the postprocessor. Allowed values are
329 the entries of utils.POSTPROCESS_WHEN
330 Assumed to be 'post_process' if not given
331 progress_hooks: A list of functions that get called on download
332 progress, with a dictionary with the entries
333 * status: One of "downloading", "error", or "finished".
334 Check this first and ignore unknown values.
335 * info_dict: The extracted info_dict
337 If status is one of "downloading", or "finished", the
338 following properties may also be present:
339 * filename: The final filename (always present)
340 * tmpfilename: The filename we're currently writing to
341 * downloaded_bytes: Bytes on disk
342 * total_bytes: Size of the whole file, None if unknown
343 * total_bytes_estimate: Guess of the eventual file size,
345 * elapsed: The number of seconds since download started.
346 * eta: The estimated time in seconds, None if unknown
347 * speed: The download speed in bytes/second, None if
349 * fragment_index: The counter of the currently
350 downloaded video fragment.
351 * fragment_count: The number of fragments (= individual
352 files that will be merged)
354 Progress hooks are guaranteed to be called at least once
355 (with status "finished") if the download is successful.
356 postprocessor_hooks: A list of functions that get called on postprocessing
357 progress, with a dictionary with the entries
358 * status: One of "started", "processing", or "finished".
359 Check this first and ignore unknown values.
360 * postprocessor: Name of the postprocessor
361 * info_dict: The extracted info_dict
363 Progress hooks are guaranteed to be called at least twice
364 (with status "started" and "finished") if the processing is successful.
365 merge_output_format: Extension to use when merging formats.
366 final_ext: Expected final extension; used to detect when the file was
367 already downloaded and converted
368 fixup: Automatically correct known faults of the file.
370 - "never": do nothing
371 - "warn": only emit a warning
372 - "detect_or_warn": check whether we can do anything
373 about it, warn otherwise (default)
374 source_address: Client-side IP address to bind to.
375 sleep_interval_requests: Number of seconds to sleep between requests
377 sleep_interval: Number of seconds to sleep before each download when
378 used alone or a lower bound of a range for randomized
379 sleep before each download (minimum possible number
380 of seconds to sleep) when used along with
382 max_sleep_interval:Upper bound of a range for randomized sleep before each
383 download (maximum possible number of seconds to sleep).
384 Must only be used along with sleep_interval.
385 Actual sleep time will be a random float from range
386 [sleep_interval; max_sleep_interval].
387 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
388 listformats: Print an overview of available video formats and exit.
389 list_thumbnails: Print a table of all thumbnails and exit.
390 match_filter: A function that gets called for every video with the signature
391 (info_dict, *, incomplete: bool) -> Optional[str]
392 For backward compatibility with youtube-dl, the signature
393 (info_dict) -> Optional[str] is also allowed.
394 - If it returns a message, the video is ignored.
395 - If it returns None, the video is downloaded.
396 - If it returns utils.NO_DEFAULT, the user is interactively
397 asked whether to download the video.
398 match_filter_func in utils.py is one example for this.
399 no_color: Do not emit color codes in output.
400 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
403 Two-letter ISO 3166-2 country code that will be used for
404 explicit geographic restriction bypassing via faking
405 X-Forwarded-For HTTP header
407 IP range in CIDR notation that will be used similarly to
409 external_downloader: A dictionary of protocol keys and the executable of the
410 external downloader to use for it. The allowed protocols
411 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
412 Set the value to 'native' to use the native downloader
413 compat_opts: Compatibility options. See "Differences in default behavior".
414 The following options do not work when used through the API:
415 filename, abort-on-error, multistreams, no-live-chat, format-sort
416 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
417 Refer __init__.py for their implementation
418 progress_template: Dictionary of templates for progress outputs.
419 Allowed keys are 'download', 'postprocess',
420 'download-title' (console title) and 'postprocess-title'.
421 The template is mapped on a dictionary with keys 'progress' and 'info'
422 retry_sleep_functions: Dictionary of functions that takes the number of attempts
423 as argument and returns the time to sleep in seconds.
424 Allowed keys are 'http', 'fragment', 'file_access'
425 download_ranges: A function that gets called for every video with the signature
426 (info_dict, *, ydl) -> Iterable[Section].
427 Only the returned sections will be downloaded. Each Section contains:
428 * start_time: Start time of the section in seconds
429 * end_time: End time of the section in seconds
430 * title: Section title (Optional)
431 * index: Section number (Optional)
433 The following parameters are not used by YoutubeDL itself, they are used by
434 the downloader (see yt_dlp/downloader/common.py):
435 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
436 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
437 continuedl, noprogress, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
438 external_downloader_args, concurrent_fragment_downloads.
440 The following options are used by the post processors:
441 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
442 to the binary or its containing directory.
443 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
444 and a list of additional command-line arguments for the
445 postprocessor/executable. The dict can also have "PP+EXE" keys
446 which are used when the given exe is used by the given PP.
447 Use 'default' as the name for arguments to passed to all PP
448 For compatibility with youtube-dl, a single list of args
451 The following options are used by the extractors:
452 extractor_retries: Number of times to retry for known errors
453 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
454 hls_split_discontinuity: Split HLS playlists to different formats at
455 discontinuities such as ad breaks (default: False)
456 extractor_args: A dictionary of arguments to be passed to the extractors.
457 See "EXTRACTOR ARGUMENTS" for details.
458 Eg: {'youtube': {'skip': ['dash', 'hls']}}
459 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
461 The following options are deprecated and may be removed in the future:
463 playliststart: - Use playlist_items
464 Playlist item to start at.
465 playlistend: - Use playlist_items
466 Playlist item to end at.
467 playlistreverse: - Use playlist_items
468 Download playlist items in reverse order.
469 forceurl: - Use forceprint
470 Force printing final URL.
471 forcetitle: - Use forceprint
472 Force printing title.
473 forceid: - Use forceprint
475 forcethumbnail: - Use forceprint
476 Force printing thumbnail URL.
477 forcedescription: - Use forceprint
478 Force printing description.
479 forcefilename: - Use forceprint
480 Force printing final filename.
481 forceduration: - Use forceprint
482 Force printing duration.
483 allsubtitles: - Use subtitleslangs = ['all']
484 Downloads all the subtitles of the video
485 (requires writesubtitles or writeautomaticsub)
486 include_ads: - Doesn't work
488 call_home: - Not implemented
489 Boolean, true iff we are allowed to contact the
490 yt-dlp servers for debugging.
491 post_hooks: - Register a custom postprocessor
492 A list of functions that get called as the final step
493 for each video file, after all postprocessors have been
494 called. The filename will be passed as the only argument.
495 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
496 Use the native HLS downloader instead of ffmpeg/avconv
497 if True, otherwise use ffmpeg/avconv if False, otherwise
498 use downloader suggested by extractor if None.
499 prefer_ffmpeg: - avconv support is deprecated
500 If False, use avconv instead of ffmpeg if both are available,
501 otherwise prefer ffmpeg.
502 youtube_include_dash_manifest: - Use extractor_args
503 If True (default), DASH manifests and related
504 data will be downloaded and processed by extractor.
505 You can reduce network I/O by disabling it if you don't
506 care about DASH. (only for youtube)
507 youtube_include_hls_manifest: - Use extractor_args
508 If True (default), HLS manifests and related
509 data will be downloaded and processed by extractor.
510 You can reduce network I/O by disabling it if you don't
511 care about HLS. (only for youtube)
515 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
516 'timestamp', 'release_timestamp',
517 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
518 'average_rating', 'comment_count', 'age_limit',
519 'start_time', 'end_time',
520 'chapter_number', 'season_number', 'episode_number',
521 'track_number', 'disc_number', 'release_year',
525 # NB: Keep in sync with the docstring of extractor/common.py
526 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
527 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
528 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
529 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
530 'preference', 'language', 'language_preference', 'quality', 'source_preference',
531 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
532 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
534 _format_selection_exts
= {
535 'audio': {'m4a', 'mp3', 'ogg', 'aac'}
,
536 'video': {'mp4', 'flv', 'webm', '3gp'}
,
537 'storyboards': {'mhtml'}
,
540 def __init__(self
, params
=None, auto_init
=True):
541 """Create a FileDownloader object with the given options.
542 @param auto_init Whether to load the default extractors and print header (if verbose).
543 Set to 'no_verbose_header' to not print the header
549 self
._ies
_instances
= {}
550 self
._pps
= {k: [] for k in POSTPROCESS_WHEN}
551 self
._printed
_messages
= set()
552 self
._first
_webpage
_request
= True
553 self
._post
_hooks
= []
554 self
._progress
_hooks
= []
555 self
._postprocessor
_hooks
= []
556 self
._download
_retcode
= 0
557 self
._num
_downloads
= 0
559 self
._playlist
_level
= 0
560 self
._playlist
_urls
= set()
561 self
.cache
= Cache(self
)
563 windows_enable_vt_mode()
564 stdout
= sys
.stderr
if self
.params
.get('logtostderr') else sys
.stdout
565 self
._out
_files
= Namespace(
568 screen
=sys
.stderr
if self
.params
.get('quiet') else stdout
,
569 console
=None if compat_os_name
== 'nt' else next(
570 filter(supports_terminal_sequences
, (sys
.stderr
, sys
.stdout
)), None)
572 self
._allow
_colors
= Namespace(**{
573 type_
: not self
.params
.get('no_color') and supports_terminal_sequences(stream
)
574 for type_
, stream
in self
._out
_files
.items_
if type_
!= 'console'
577 MIN_SUPPORTED
, MIN_RECOMMENDED
= (3, 6), (3, 7)
578 current_version
= sys
.version_info
[:2]
579 if current_version
< MIN_RECOMMENDED
:
580 msg
= 'Support for Python version %d.%d has been deprecated and will break in future versions of yt-dlp'
581 if current_version
< MIN_SUPPORTED
:
582 msg
= 'Python version %d.%d is no longer supported'
583 self
.deprecation_warning(
584 f
'{msg}! Please update to Python %d.%d or above' % (*current_version
, *MIN_RECOMMENDED
))
586 if self
.params
.get('allow_unplayable_formats'):
588 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
589 'This is a developer option intended for debugging. \n'
590 ' If you experience any issues while using this option, '
591 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
593 def check_deprecated(param
, option
, suggestion
):
594 if self
.params
.get(param
) is not None:
595 self
.report_warning(f
'{option} is deprecated. Use {suggestion} instead')
599 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
600 if self
.params
.get('geo_verification_proxy') is None:
601 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
603 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
604 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
605 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
607 for msg
in self
.params
.get('_warnings', []):
608 self
.report_warning(msg
)
609 for msg
in self
.params
.get('_deprecation_warnings', []):
610 self
.deprecation_warning(msg
)
612 self
.params
['compat_opts'] = set(self
.params
.get('compat_opts', ()))
613 if not compat_has_legacy
:
614 self
.params
['compat_opts'].add('no-compat-legacy')
615 if 'list-formats' in self
.params
['compat_opts']:
616 self
.params
['listformats_table'] = False
618 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
619 # nooverwrites was unnecessarily changed to overwrites
620 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
621 # This ensures compatibility with both keys
622 self
.params
['overwrites'] = not self
.params
['nooverwrites']
623 elif self
.params
.get('overwrites') is None:
624 self
.params
.pop('overwrites', None)
626 self
.params
['nooverwrites'] = not self
.params
['overwrites']
628 self
.params
.setdefault('forceprint', {})
629 self
.params
.setdefault('print_to_file', {})
631 # Compatibility with older syntax
632 if not isinstance(params
['forceprint'], dict):
633 self
.params
['forceprint'] = {'video': params['forceprint']}
635 if self
.params
.get('bidi_workaround', False):
638 master
, slave
= pty
.openpty()
639 width
= shutil
.get_terminal_size().columns
640 width_args
= [] if width
is None else ['-w', str(width
)]
641 sp_kwargs
= {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
643 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
645 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
646 self
._output
_channel
= os
.fdopen(master
, 'rb')
647 except OSError as ose
:
648 if ose
.errno
== errno
.ENOENT
:
650 'Could not find fribidi executable, ignoring --bidi-workaround. '
651 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
656 if auto_init
!= 'no_verbose_header':
657 self
.print_debug_header()
658 self
.add_default_info_extractors()
660 if (sys
.platform
!= 'win32'
661 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
662 and not self
.params
.get('restrictfilenames', False)):
663 # Unicode filesystem API will throw errors (#1474, #13027)
665 'Assuming --restrict-filenames since file system encoding '
666 'cannot encode all characters. '
667 'Set the LC_ALL environment variable to fix this.')
668 self
.params
['restrictfilenames'] = True
670 self
._parse
_outtmpl
()
672 # Creating format selector here allows us to catch syntax errors before the extraction
673 self
.format_selector
= (
674 self
.params
.get('format') if self
.params
.get('format') in (None, '-')
675 else self
.params
['format'] if callable(self
.params
['format'])
676 else self
.build_format_selector(self
.params
['format']))
678 # Set http_headers defaults according to std_headers
679 self
.params
['http_headers'] = merge_headers(std_headers
, self
.params
.get('http_headers', {}))
682 'post_hooks': self
.add_post_hook
,
683 'progress_hooks': self
.add_progress_hook
,
684 'postprocessor_hooks': self
.add_postprocessor_hook
,
686 for opt
, fn
in hooks
.items():
687 for ph
in self
.params
.get(opt
, []):
690 for pp_def_raw
in self
.params
.get('postprocessors', []):
691 pp_def
= dict(pp_def_raw
)
692 when
= pp_def
.pop('when', 'post_process')
693 self
.add_post_processor(
694 get_postprocessor(pp_def
.pop('key'))(self
, **pp_def
),
698 register_socks_protocols()
700 def preload_download_archive(fn
):
701 """Preload the archive, if any is specified"""
704 self
.write_debug(f
'Loading archive file {fn!r}')
706 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
707 for line
in archive_file
:
708 self
.archive
.add(line
.strip())
709 except OSError as ioe
:
710 if ioe
.errno
!= errno
.ENOENT
:
716 preload_download_archive(self
.params
.get('download_archive'))
718 def warn_if_short_id(self
, argv
):
719 # short YouTube ID starting with dash?
721 i
for i
, a
in enumerate(argv
)
722 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
726 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
727 + ['--'] + [argv
[i
] for i
in idxs
]
730 'Long argument string detected. '
731 'Use -- to separate parameters and URLs, like this:\n%s' %
732 args_to_str(correct_argv
))
734 def add_info_extractor(self
, ie
):
735 """Add an InfoExtractor object to the end of the list."""
737 self
._ies
[ie_key
] = ie
738 if not isinstance(ie
, type):
739 self
._ies
_instances
[ie_key
] = ie
740 ie
.set_downloader(self
)
742 def _get_info_extractor_class(self
, ie_key
):
743 ie
= self
._ies
.get(ie_key
)
745 ie
= get_info_extractor(ie_key
)
746 self
.add_info_extractor(ie
)
749 def get_info_extractor(self
, ie_key
):
751 Get an instance of an IE with name ie_key, it will try to get one from
752 the _ies list, if there's no instance it will create a new one and add
753 it to the extractor list.
755 ie
= self
._ies
_instances
.get(ie_key
)
757 ie
= get_info_extractor(ie_key
)()
758 self
.add_info_extractor(ie
)
761 def add_default_info_extractors(self
):
763 Add the InfoExtractors returned by gen_extractors to the end of the list
765 for ie
in gen_extractor_classes():
766 self
.add_info_extractor(ie
)
768 def add_post_processor(self
, pp
, when
='post_process'):
769 """Add a PostProcessor object to the end of the chain."""
770 assert when
in POSTPROCESS_WHEN
, f
'Invalid when={when}'
771 self
._pps
[when
].append(pp
)
772 pp
.set_downloader(self
)
774 def add_post_hook(self
, ph
):
775 """Add the post hook"""
776 self
._post
_hooks
.append(ph
)
778 def add_progress_hook(self
, ph
):
779 """Add the download progress hook"""
780 self
._progress
_hooks
.append(ph
)
782 def add_postprocessor_hook(self
, ph
):
783 """Add the postprocessing progress hook"""
784 self
._postprocessor
_hooks
.append(ph
)
785 for pps
in self
._pps
.values():
787 pp
.add_progress_hook(ph
)
789 def _bidi_workaround(self
, message
):
790 if not hasattr(self
, '_output_channel'):
793 assert hasattr(self
, '_output_process')
794 assert isinstance(message
, str)
795 line_count
= message
.count('\n') + 1
796 self
._output
_process
.stdin
.write((message
+ '\n').encode())
797 self
._output
_process
.stdin
.flush()
798 res
= ''.join(self
._output
_channel
.readline().decode()
799 for _
in range(line_count
))
800 return res
[:-len('\n')]
802 def _write_string(self
, message
, out
=None, only_once
=False):
804 if message
in self
._printed
_messages
:
806 self
._printed
_messages
.add(message
)
807 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
809 def to_stdout(self
, message
, skip_eol
=False, quiet
=None):
810 """Print message to stdout"""
811 if quiet
is not None:
812 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
813 if skip_eol
is not False:
814 self
.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
815 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.out
)
817 def to_screen(self
, message
, skip_eol
=False, quiet
=None):
818 """Print message to screen if not in quiet mode"""
819 if self
.params
.get('logger'):
820 self
.params
['logger'].debug(message
)
822 if (self
.params
.get('quiet') if quiet
is None else quiet
) and not self
.params
.get('verbose'):
825 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
826 self
._out
_files
.screen
)
828 def to_stderr(self
, message
, only_once
=False):
829 """Print message to stderr"""
830 assert isinstance(message
, str)
831 if self
.params
.get('logger'):
832 self
.params
['logger'].error(message
)
834 self
._write
_string
(f
'{self._bidi_workaround(message)}\n', self
._out
_files
.error
, only_once
=only_once
)
836 def _send_console_code(self
, code
):
837 if compat_os_name
== 'nt' or not self
._out
_files
.console
:
839 self
._write
_string
(code
, self
._out
_files
.console
)
841 def to_console_title(self
, message
):
842 if not self
.params
.get('consoletitle', False):
844 message
= remove_terminal_sequences(message
)
845 if compat_os_name
== 'nt':
846 if ctypes
.windll
.kernel32
.GetConsoleWindow():
847 # c_wchar_p() might not be necessary if `message` is
848 # already of type unicode()
849 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
851 self
._send
_console
_code
(f
'\033]0;{message}\007')
853 def save_console_title(self
):
854 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
856 self
._send
_console
_code
('\033[22;0t') # Save the title on stack
858 def restore_console_title(self
):
859 if not self
.params
.get('consoletitle') or self
.params
.get('simulate'):
861 self
._send
_console
_code
('\033[23;0t') # Restore the title from stack
864 self
.save_console_title()
867 def __exit__(self
, *args
):
868 self
.restore_console_title()
870 if self
.params
.get('cookiefile') is not None:
871 self
.cookiejar
.save(ignore_discard
=True, ignore_expires
=True)
873 def trouble(self
, message
=None, tb
=None, is_error
=True):
874 """Determine action to take when a download problem appears.
876 Depending on if the downloader has been configured to ignore
877 download errors or not, this method may throw an exception or
878 not when errors are found, after printing the message.
880 @param tb If given, is additional traceback information
881 @param is_error Whether to raise error according to ignorerrors
883 if message
is not None:
884 self
.to_stderr(message
)
885 if self
.params
.get('verbose'):
887 if sys
.exc_info()[0]: # if .trouble has been called from an except block
889 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
890 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
891 tb
+= encode_compat_str(traceback
.format_exc())
893 tb_data
= traceback
.format_list(traceback
.extract_stack())
894 tb
= ''.join(tb_data
)
899 if not self
.params
.get('ignoreerrors'):
900 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
901 exc_info
= sys
.exc_info()[1].exc_info
903 exc_info
= sys
.exc_info()
904 raise DownloadError(message
, exc_info
)
905 self
._download
_retcode
= 1
909 EMPHASIS
='light blue',
915 SUPPRESS
='light black',
918 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
922 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
923 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', None) or 'ascii'
924 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
925 if fallback
is not None and text
!= original_text
:
927 return format_text(text
, f
) if allow_colors
else text
if fallback
is None else fallback
929 def _format_out(self
, *args
, **kwargs
):
930 return self
._format
_text
(self
._out
_files
.out
, self
._allow
_colors
.out
, *args
, **kwargs
)
932 def _format_screen(self
, *args
, **kwargs
):
933 return self
._format
_text
(self
._out
_files
.screen
, self
._allow
_colors
.screen
, *args
, **kwargs
)
935 def _format_err(self
, *args
, **kwargs
):
936 return self
._format
_text
(self
._out
_files
.error
, self
._allow
_colors
.error
, *args
, **kwargs
)
938 def report_warning(self
, message
, only_once
=False):
940 Print the message to stderr, it will be prefixed with 'WARNING:'
941 If stderr is a tty file the 'WARNING:' will be colored
943 if self
.params
.get('logger') is not None:
944 self
.params
['logger'].warning(message
)
946 if self
.params
.get('no_warnings'):
948 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
950 def deprecation_warning(self
, message
):
951 if self
.params
.get('logger') is not None:
952 self
.params
['logger'].warning(f
'DeprecationWarning: {message}')
954 self
.to_stderr(f
'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
956 def report_error(self
, message
, *args
, **kwargs
):
958 Do the same as trouble, but prefixes the message with 'ERROR:', colored
959 in red if stderr is a tty file.
961 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args
, **kwargs
)
963 def write_debug(self
, message
, only_once
=False):
964 '''Log debug message or Print message to stderr'''
965 if not self
.params
.get('verbose', False):
967 message
= f
'[debug] {message}'
968 if self
.params
.get('logger'):
969 self
.params
['logger'].debug(message
)
971 self
.to_stderr(message
, only_once
)
973 def report_file_already_downloaded(self
, file_name
):
974 """Report file has already been fully downloaded."""
976 self
.to_screen('[download] %s has already been downloaded' % file_name
)
977 except UnicodeEncodeError:
978 self
.to_screen('[download] The file has already been downloaded')
980 def report_file_delete(self
, file_name
):
981 """Report that existing file will be deleted."""
983 self
.to_screen('Deleting existing file %s' % file_name
)
984 except UnicodeEncodeError:
985 self
.to_screen('Deleting existing file')
987 def raise_no_formats(self
, info
, forced
=False, *, msg
=None):
988 has_drm
= info
.get('_has_drm')
989 ignored
, expected
= self
.params
.get('ignore_no_formats_error'), bool(msg
)
990 msg
= msg
or has_drm
and 'This video is DRM protected' or 'No video formats found!'
991 if forced
or not ignored
:
992 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
993 expected
=has_drm
or ignored
or expected
)
995 self
.report_warning(msg
)
997 def parse_outtmpl(self
):
998 self
.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
999 self
._parse
_outtmpl
()
1000 return self
.params
['outtmpl']
1002 def _parse_outtmpl(self
):
1004 if self
.params
.get('restrictfilenames'): # Remove spaces in the default template
1005 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
1007 outtmpl
= self
.params
.setdefault('outtmpl', {})
1008 if not isinstance(outtmpl
, dict):
1009 self
.params
['outtmpl'] = outtmpl
= {'default': outtmpl}
1010 outtmpl
.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None}
)
1012 def get_output_path(self
, dir_type
='', filename
=None):
1013 paths
= self
.params
.get('paths', {})
1014 assert isinstance(paths
, dict)
1015 path
= os
.path
.join(
1016 expand_path(paths
.get('home', '').strip()),
1017 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
1019 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
1022 def _outtmpl_expandpath(outtmpl
):
1023 # expand_path translates '%%' into '%' and '$$' into '$'
1024 # correspondingly that is not what we want since we need to keep
1025 # '%%' intact for template dict substitution step. Working around
1026 # with boundary-alike separator hack.
1027 sep
= ''.join([random
.choice(ascii_letters
) for _
in range(32)])
1028 outtmpl
= outtmpl
.replace('%%', f
'%{sep}%').replace('$$', f
'${sep}$')
1030 # outtmpl should be expand_path'ed before template dict substitution
1031 # because meta fields may contain env variables we don't want to
1032 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
1033 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1034 return expand_path(outtmpl
).replace(sep
, '')
1037 def escape_outtmpl(outtmpl
):
1038 ''' Escape any remaining strings like %s, %abc% etc. '''
1040 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
1041 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
1045 def validate_outtmpl(cls
, outtmpl
):
1046 ''' @return None or Exception object '''
1048 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljqBUDS]'),
1049 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1050 cls
._outtmpl
_expandpath
(outtmpl
))
1052 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1054 except ValueError as err
:
1058 def _copy_infodict(info_dict
):
1059 info_dict
= dict(info_dict
)
1060 info_dict
.pop('__postprocessors', None)
1061 info_dict
.pop('__pending_error', None)
1064 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=False):
1065 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1066 @param sanitize Whether to sanitize the output as a filename.
1067 For backward compatibility, a function can also be passed
1070 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1072 info_dict
= self
._copy
_infodict
(info_dict
)
1073 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1074 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1075 if info_dict
.get('duration', None) is not None
1077 info_dict
['autonumber'] = int(self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
)
1078 info_dict
['video_autonumber'] = self
._num
_videos
1079 if info_dict
.get('resolution') is None:
1080 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1082 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1083 # of %(field)s to %(field)0Nd for backward compatibility
1084 field_size_compat_map
= {
1085 'playlist_index': number_of_digits(info_dict
.get('__last_playlist_index') or 0),
1086 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1087 'autonumber': self
.params
.get('autonumber_size') or 5,
1091 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljqBUDS]'))
1096 # Field is of the form key1.key2...
1097 # where keys (except first) can be string, int or slice
1098 FIELD_RE
= r
'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num
=r
'(?:-?\d+)')
1099 MATH_FIELD_RE
= rf
'(?:{FIELD_RE}|-?{NUMBER_RE})'
1100 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1101 INTERNAL_FORMAT_RE
= re
.compile(rf
'''(?x)
1103 (?P<fields>{FIELD_RE})
1104 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1105 (?:>(?P<strf_format>.+?))?
1107 (?P<alternate>(?<!\\),[^|&)]+)?
1108 (?:&(?P<replacement>.*?))?
1109 (?:\|(?P<default>.*?))?
1112 def _traverse_infodict(k
):
1116 return traverse_obj(info_dict
, k
, is_user_input
=True, traverse_string
=True)
1118 def get_value(mdict
):
1120 value
= _traverse_infodict(mdict
['fields'])
1123 value
= float_or_none(value
)
1124 if value
is not None:
1127 offset_key
= mdict
['maths']
1129 value
= float_or_none(value
)
1133 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1134 offset_key
).group(0)
1135 offset_key
= offset_key
[len(item
):]
1136 if operator
is None:
1137 operator
= MATH_FUNCTIONS
[item
]
1139 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1140 offset
= float_or_none(item
)
1142 offset
= float_or_none(_traverse_infodict(item
))
1144 value
= operator(value
, multiplier
* offset
)
1145 except (TypeError, ZeroDivisionError):
1148 # Datetime formatting
1149 if mdict
['strf_format']:
1150 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1154 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1156 def filename_sanitizer(key
, value
, restricted
=self
.params
.get('restrictfilenames')):
1157 return sanitize_filename(str(value
), restricted
=restricted
, is_id
=(
1158 bool(re
.search(r
'(^|[_.])id(\.|$)', key
))
1159 if 'filename-sanitization' in self
.params
['compat_opts']
1162 sanitizer
= sanitize
if callable(sanitize
) else filename_sanitizer
1163 sanitize
= bool(sanitize
)
1165 def _dumpjson_default(obj
):
1166 if isinstance(obj
, (set, LazyList
)):
1170 def create_key(outer_mobj
):
1171 if not outer_mobj
.group('has_key'):
1172 return outer_mobj
.group(0)
1173 key
= outer_mobj
.group('key')
1174 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1175 initial_field
= mobj
.group('fields') if mobj
else ''
1176 value
, replacement
, default
= None, None, na
1178 mobj
= mobj
.groupdict()
1179 default
= mobj
['default'] if mobj
['default'] is not None else default
1180 value
= get_value(mobj
)
1181 replacement
= mobj
['replacement']
1182 if value
is None and mobj
['alternate']:
1183 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['remaining'][1:])
1187 fmt
= outer_mobj
.group('format')
1188 if fmt
== 's' and value
is not None and key
in field_size_compat_map
.keys():
1189 fmt
= f
'0{field_size_compat_map[key]:d}d'
1191 value
= default
if value
is None else value
if replacement
is None else replacement
1193 flags
= outer_mobj
.group('conversion') or ''
1194 str_fmt
= f
'{fmt[:-1]}s'
1195 if fmt
[-1] == 'l': # list
1196 delim
= '\n' if '#' in flags
else ', '
1197 value
, fmt
= delim
.join(map(str, variadic(value
, allowed_types
=(str, bytes)))), str_fmt
1198 elif fmt
[-1] == 'j': # json
1199 value
, fmt
= json
.dumps(value
, default
=_dumpjson_default
, indent
=4 if '#' in flags
else None), str_fmt
1200 elif fmt
[-1] == 'q': # quoted
1201 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1202 value
, fmt
= ' '.join(map(compat_shlex_quote
, value
)), str_fmt
1203 elif fmt
[-1] == 'B': # bytes
1204 value
= f
'%{str_fmt}'.encode() % str(value
).encode()
1205 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1206 elif fmt
[-1] == 'U': # unicode normalized
1207 value
, fmt
= unicodedata
.normalize(
1208 # "+" = compatibility equivalence, "#" = NFD
1209 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1211 elif fmt
[-1] == 'D': # decimal suffix
1212 num_fmt
, fmt
= fmt
[:-1].replace('#', ''), 's'
1213 value
= format_decimal_suffix(value
, f
'%{num_fmt}f%s' if num_fmt
else '%d%s',
1214 factor
=1024 if '#' in flags
else 1000)
1215 elif fmt
[-1] == 'S': # filename sanitization
1216 value
, fmt
= filename_sanitizer(initial_field
, value
, restricted
='#' in flags
), str_fmt
1217 elif fmt
[-1] == 'c':
1219 value
= str(value
)[0]
1222 elif fmt
[-1] not in 'rs': # numeric
1223 value
= float_or_none(value
)
1225 value
, fmt
= default
, 's'
1229 # If value is an object, sanitize might convert it to a string
1230 # So we convert it to repr first
1231 value
, fmt
= repr(value
), str_fmt
1232 if fmt
[-1] in 'csr':
1233 value
= sanitizer(initial_field
, value
)
1235 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1236 TMPL_DICT
[key
] = value
1237 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1239 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1241 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1242 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1243 return self
.escape_outtmpl(outtmpl
) % info_dict
1245 def _prepare_filename(self
, info_dict
, *, outtmpl
=None, tmpl_type
=None):
1246 assert None in (outtmpl
, tmpl_type
), 'outtmpl and tmpl_type are mutually exclusive'
1248 outtmpl
= self
.params
['outtmpl'].get(tmpl_type
or 'default', self
.params
['outtmpl']['default'])
1250 outtmpl
= self
._outtmpl
_expandpath
(outtmpl
)
1251 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, True)
1255 if tmpl_type
in ('', 'temp'):
1256 final_ext
, ext
= self
.params
.get('final_ext'), info_dict
.get('ext')
1257 if final_ext
and ext
and final_ext
!= ext
and filename
.endswith(f
'.{final_ext}'):
1258 filename
= replace_extension(filename
, ext
, final_ext
)
1260 force_ext
= OUTTMPL_TYPES
[tmpl_type
]
1262 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1264 # https://github.com/blackjack4494/youtube-dlc/issues/85
1265 trim_file_name
= self
.params
.get('trim_file_name', False)
1267 no_ext
, *ext
= filename
.rsplit('.', 2)
1268 filename
= join_nonempty(no_ext
[:trim_file_name
], *ext
, delim
='.')
1271 except ValueError as err
:
1272 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1275 def prepare_filename(self
, info_dict
, dir_type
='', *, outtmpl
=None, warn
=False):
1276 """Generate the output filename"""
1278 assert not dir_type
, 'outtmpl and dir_type are mutually exclusive'
1280 filename
= self
._prepare
_filename
(info_dict
, tmpl_type
=dir_type
, outtmpl
=outtmpl
)
1281 if not filename
and dir_type
not in ('', 'temp'):
1285 if not self
.params
.get('paths'):
1287 elif filename
== '-':
1288 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1289 elif os
.path
.isabs(filename
):
1290 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1291 if filename
== '-' or not filename
:
1294 return self
.get_output_path(dir_type
, filename
)
1296 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1297 """ Returns None if the file should be downloaded """
1299 video_title
= info_dict
.get('title', info_dict
.get('id', 'video'))
1302 if 'title' in info_dict
:
1303 # This can happen when we're just evaluating the playlist
1304 title
= info_dict
['title']
1305 matchtitle
= self
.params
.get('matchtitle', False)
1307 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1308 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1309 rejecttitle
= self
.params
.get('rejecttitle', False)
1311 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1312 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1313 date
= info_dict
.get('upload_date')
1314 if date
is not None:
1315 dateRange
= self
.params
.get('daterange', DateRange())
1316 if date
not in dateRange
:
1317 return f
'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1318 view_count
= info_dict
.get('view_count')
1319 if view_count
is not None:
1320 min_views
= self
.params
.get('min_views')
1321 if min_views
is not None and view_count
< min_views
:
1322 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1323 max_views
= self
.params
.get('max_views')
1324 if max_views
is not None and view_count
> max_views
:
1325 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1326 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1327 return 'Skipping "%s" because it is age restricted' % video_title
1329 match_filter
= self
.params
.get('match_filter')
1330 if match_filter
is not None:
1332 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1334 # For backward compatibility
1335 ret
= None if incomplete
else match_filter(info_dict
)
1336 if ret
is NO_DEFAULT
:
1338 filename
= self
._format
_screen
(self
.prepare_filename(info_dict
), self
.Styles
.FILENAME
)
1339 reply
= input(self
._format
_screen
(
1340 f
'Download "{filename}"? (Y/n): ', self
.Styles
.EMPHASIS
)).lower().strip()
1341 if reply
in {'y', ''}
:
1344 return f
'Skipping {video_title}'
1345 elif ret
is not None:
1349 if self
.in_download_archive(info_dict
):
1350 reason
= '%s has already been recorded in the archive' % video_title
1351 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1353 reason
= check_filter()
1354 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1355 if reason
is not None:
1357 self
.to_screen('[download] ' + reason
)
1358 if self
.params
.get(break_opt
, False):
1363 def add_extra_info(info_dict
, extra_info
):
1364 '''Set the keys from extra_info in info dict if they are missing'''
1365 for key
, value
in extra_info
.items():
1366 info_dict
.setdefault(key
, value
)
1368 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1369 process
=True, force_generic_extractor
=False):
1371 Return a list with a dictionary for each video extracted.
1374 url -- URL to extract
1377 download -- whether to download videos during extraction
1378 ie_key -- extractor key hint
1379 extra_info -- dictionary containing the extra values to add to each result
1380 process -- whether to resolve all unresolved references (URLs, playlist items),
1381 must be True for download to work.
1382 force_generic_extractor -- force using the generic extractor
1385 if extra_info
is None:
1388 if not ie_key
and force_generic_extractor
:
1392 ies
= {ie_key: self._get_info_extractor_class(ie_key)}
1396 for ie_key
, ie
in ies
.items():
1397 if not ie
.suitable(url
):
1400 if not ie
.working():
1401 self
.report_warning('The program functionality for this site has been marked as broken, '
1402 'and will probably not work.')
1404 temp_id
= ie
.get_temp_id(url
)
1405 if temp_id
is not None and self
.in_download_archive({'id': temp_id, 'ie_key': ie_key}
):
1406 self
.to_screen(f
'[{ie_key}] {temp_id}: has already been recorded in the archive')
1407 if self
.params
.get('break_on_existing', False):
1408 raise ExistingVideoReached()
1410 return self
.__extract
_info
(url
, self
.get_info_extractor(ie_key
), download
, extra_info
, process
)
1412 self
.report_error('no suitable InfoExtractor for URL %s' % url
)
1414 def _handle_extraction_exceptions(func
):
1415 @functools.wraps(func
)
1416 def wrapper(self
, *args
, **kwargs
):
1419 return func(self
, *args
, **kwargs
)
1420 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1422 except ReExtractInfo
as e
:
1424 self
.to_screen(f
'{e}; Re-extracting data')
1426 self
.to_stderr('\r')
1427 self
.report_warning(f
'{e}; Re-extracting data')
1429 except GeoRestrictedError
as e
:
1432 msg
+= '\nThis video is available in %s.' % ', '.join(
1433 map(ISO3166Utils
.short2full
, e
.countries
))
1434 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1435 self
.report_error(msg
)
1436 except ExtractorError
as e
: # An error we somewhat expected
1437 self
.report_error(str(e
), e
.format_traceback())
1438 except Exception as e
:
1439 if self
.params
.get('ignoreerrors'):
1440 self
.report_error(str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1446 def _wait_for_video(self
, ie_result
):
1447 if (not self
.params
.get('wait_for_video')
1448 or ie_result
.get('_type', 'video') != 'video'
1449 or ie_result
.get('formats') or ie_result
.get('url')):
1452 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1457 self
.to_screen(msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r', skip_eol
=True)
1460 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1461 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1462 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1463 diff
= round(random
.uniform(min_wait
, max_wait
) if (max_wait
and min_wait
) else (max_wait
or min_wait
), 0)
1464 self
.report_warning('Release time of video is not known')
1465 elif (diff
or 0) <= 0:
1466 self
.report_warning('Video should already be available according to extracted info')
1467 diff
= min(max(diff
or 0, min_wait
or 0), max_wait
or float('inf'))
1468 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1470 wait_till
= time
.time() + diff
1473 diff
= wait_till
- time
.time()
1476 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1477 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1479 except KeyboardInterrupt:
1481 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1482 except BaseException
as e
:
1483 if not isinstance(e
, ReExtractInfo
):
1487 @_handle_extraction_exceptions
1488 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1489 ie_result
= ie
.extract(url
)
1490 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1492 if isinstance(ie_result
, list):
1493 # Backwards compatibility: old IE result format
1495 '_type': 'compat_list',
1496 'entries': ie_result
,
1498 if extra_info
.get('original_url'):
1499 ie_result
.setdefault('original_url', extra_info
['original_url'])
1500 self
.add_default_extra_info(ie_result
, ie
, url
)
1502 self
._wait
_for
_video
(ie_result
)
1503 return self
.process_ie_result(ie_result
, download
, extra_info
)
1507 def add_default_extra_info(self
, ie_result
, ie
, url
):
1509 self
.add_extra_info(ie_result
, {
1511 'original_url': url
,
1513 webpage_url
= ie_result
.get('webpage_url')
1515 self
.add_extra_info(ie_result
, {
1516 'webpage_url_basename': url_basename(webpage_url
),
1517 'webpage_url_domain': get_domain(webpage_url
),
1520 self
.add_extra_info(ie_result
, {
1521 'extractor': ie
.IE_NAME
,
1522 'extractor_key': ie
.ie_key(),
1525 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1527 Take the result of the ie(may be modified) and resolve all unresolved
1528 references (URLs, playlist items).
1530 It will also download the videos if 'download'.
1531 Returns the resolved ie_result.
1533 if extra_info
is None:
1535 result_type
= ie_result
.get('_type', 'video')
1537 if result_type
in ('url', 'url_transparent'):
1538 ie_result
['url'] = sanitize_url(ie_result
['url'])
1539 if ie_result
.get('original_url'):
1540 extra_info
.setdefault('original_url', ie_result
['original_url'])
1542 extract_flat
= self
.params
.get('extract_flat', False)
1543 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1544 or extract_flat
is True):
1545 info_copy
= ie_result
.copy()
1546 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1547 if ie
and not ie_result
.get('id'):
1548 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1549 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1550 self
.add_extra_info(info_copy
, extra_info
)
1551 info_copy
, _
= self
.pre_process(info_copy
)
1552 self
.__forced
_printings
(info_copy
, self
.prepare_filename(info_copy
), incomplete
=True)
1553 self
._raise
_pending
_errors
(info_copy
)
1554 if self
.params
.get('force_write_download_archive', False):
1555 self
.record_download_archive(info_copy
)
1558 if result_type
== 'video':
1559 self
.add_extra_info(ie_result
, extra_info
)
1560 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1561 self
._raise
_pending
_errors
(ie_result
)
1562 additional_urls
= (ie_result
or {}).get('additional_urls')
1564 # TODO: Improve MetadataParserPP to allow setting a list
1565 if isinstance(additional_urls
, str):
1566 additional_urls
= [additional_urls
]
1568 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1569 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1570 ie_result
['additional_entries'] = [
1572 url
, download
, extra_info
=extra_info
,
1573 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1574 for url
in additional_urls
1577 elif result_type
== 'url':
1578 # We have to add extra_info to the results because it may be
1579 # contained in a playlist
1580 return self
.extract_info(
1581 ie_result
['url'], download
,
1582 ie_key
=ie_result
.get('ie_key'),
1583 extra_info
=extra_info
)
1584 elif result_type
== 'url_transparent':
1585 # Use the information from the embedding page
1586 info
= self
.extract_info(
1587 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1588 extra_info
=extra_info
, download
=False, process
=False)
1590 # extract_info may return None when ignoreerrors is enabled and
1591 # extraction failed with an error, don't crash and return early
1596 exempted_fields
= {'_type', 'url', 'ie_key'}
1597 if not ie_result
.get('section_end') and ie_result
.get('section_start') is None:
1598 # For video clips, the id etc of the clip extractor should be used
1599 exempted_fields |
= {'id', 'extractor', 'extractor_key'}
1601 new_result
= info
.copy()
1602 new_result
.update(filter_dict(ie_result
, lambda k
, v
: v
is not None and k
not in exempted_fields
))
1604 # Extracted info may not be a video result (i.e.
1605 # info.get('_type', 'video') != video) but rather an url or
1606 # url_transparent. In such cases outer metadata (from ie_result)
1607 # should be propagated to inner one (info). For this to happen
1608 # _type of info should be overridden with url_transparent. This
1609 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1610 if new_result
.get('_type') == 'url':
1611 new_result
['_type'] = 'url_transparent'
1613 return self
.process_ie_result(
1614 new_result
, download
=download
, extra_info
=extra_info
)
1615 elif result_type
in ('playlist', 'multi_video'):
1616 # Protect from infinite recursion due to recursively nested playlists
1617 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1618 webpage_url
= ie_result
['webpage_url']
1619 if webpage_url
in self
._playlist
_urls
:
1621 '[download] Skipping already downloaded playlist: %s'
1622 % ie_result
.get('title') or ie_result
.get('id'))
1625 self
._playlist
_level
+= 1
1626 self
._playlist
_urls
.add(webpage_url
)
1627 self
._fill
_common
_fields
(ie_result
, False)
1628 self
._sanitize
_thumbnails
(ie_result
)
1630 return self
.__process
_playlist
(ie_result
, download
)
1632 self
._playlist
_level
-= 1
1633 if not self
._playlist
_level
:
1634 self
._playlist
_urls
.clear()
1635 elif result_type
== 'compat_list':
1636 self
.report_warning(
1637 'Extractor %s returned a compat_list result. '
1638 'It needs to be updated.' % ie_result
.get('extractor'))
1641 self
.add_extra_info(r
, {
1642 'extractor': ie_result
['extractor'],
1643 'webpage_url': ie_result
['webpage_url'],
1644 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1645 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1646 'extractor_key': ie_result
['extractor_key'],
1649 ie_result
['entries'] = [
1650 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1651 for r
in ie_result
['entries']
1655 raise Exception('Invalid result type: %s' % result_type
)
1657 def _ensure_dir_exists(self
, path
):
1658 return make_dir(path
, self
.report_error
)
1661 def _playlist_infodict(ie_result
, **kwargs
):
1664 'playlist': ie_result
.get('title') or ie_result
.get('id'),
1665 'playlist_id': ie_result
.get('id'),
1666 'playlist_title': ie_result
.get('title'),
1667 'playlist_uploader': ie_result
.get('uploader'),
1668 'playlist_uploader_id': ie_result
.get('uploader_id'),
1669 'playlist_index': 0,
1673 def __process_playlist(self
, ie_result
, download
):
1674 """Process each entry in the playlist"""
1675 title
= ie_result
.get('title') or ie_result
.get('id') or '<Untitled>'
1676 self
.to_screen(f
'[download] Downloading playlist: {title}')
1678 all_entries
= PlaylistEntries(self
, ie_result
)
1679 entries
= orderedSet(all_entries
.get_requested_items(), lazy
=True)
1681 lazy
= self
.params
.get('lazy_playlist')
1683 resolved_entries
, n_entries
= [], 'N/A'
1684 ie_result
['requested_entries'], ie_result
['entries'] = None, None
1686 entries
= resolved_entries
= list(entries
)
1687 n_entries
= len(resolved_entries
)
1688 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1689 if not ie_result
.get('playlist_count'):
1690 # Better to do this after potentially exhausting entries
1691 ie_result
['playlist_count'] = all_entries
.get_full_count()
1693 _infojson_written
= False
1694 write_playlist_files
= self
.params
.get('allow_playlist_files', True)
1695 if write_playlist_files
and self
.params
.get('list_thumbnails'):
1696 self
.list_thumbnails(ie_result
)
1697 if write_playlist_files
and not self
.params
.get('simulate'):
1698 ie_copy
= self
._playlist
_infodict
(ie_result
, n_entries
=int_or_none(n_entries
))
1699 _infojson_written
= self
._write
_info
_json
(
1700 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1701 if _infojson_written
is None:
1703 if self
._write
_description
('playlist', ie_result
,
1704 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1706 # TODO: This should be passed to ThumbnailsConvertor if necessary
1707 self
._write
_thumbnails
('playlist', ie_copy
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1710 if self
.params
.get('playlistreverse') or self
.params
.get('playlistrandom'):
1711 self
.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once
=True)
1712 elif self
.params
.get('playlistreverse'):
1714 elif self
.params
.get('playlistrandom'):
1715 random
.shuffle(entries
)
1717 self
.to_screen(f
'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1718 f
'{format_field(ie_result, "playlist_count", " of %s")}')
1721 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
1722 for i
, (playlist_index
, entry
) in enumerate(entries
):
1724 resolved_entries
.append((playlist_index
, entry
))
1726 # TODO: Add auto-generated fields
1727 if not entry
or self
._match
_entry
(entry
, incomplete
=True) is not None:
1730 self
.to_screen('[download] Downloading video %s of %s' % (
1731 self
._format
_screen
(i
+ 1, self
.Styles
.ID
), self
._format
_screen
(n_entries
, self
.Styles
.EMPHASIS
)))
1733 entry
['__x_forwarded_for_ip'] = ie_result
.get('__x_forwarded_for_ip')
1734 if not lazy
and 'playlist-index' in self
.params
.get('compat_opts', []):
1735 playlist_index
= ie_result
['requested_entries'][i
]
1737 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, {
1738 'n_entries': int_or_none(n_entries
),
1739 '__last_playlist_index': max(ie_result
['requested_entries'] or (0, 0)),
1740 'playlist_count': ie_result
.get('playlist_count'),
1741 'playlist_index': playlist_index
,
1742 'playlist_autonumber': i
+ 1,
1744 'playlist_id': ie_result
.get('id'),
1745 'playlist_title': ie_result
.get('title'),
1746 'playlist_uploader': ie_result
.get('uploader'),
1747 'playlist_uploader_id': ie_result
.get('uploader_id'),
1748 'extractor': ie_result
['extractor'],
1749 'webpage_url': ie_result
['webpage_url'],
1750 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1751 'webpage_url_domain': get_domain(ie_result
['webpage_url']),
1752 'extractor_key': ie_result
['extractor_key'],
1754 if not entry_result
:
1756 if failures
>= max_failures
:
1758 f
'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1760 resolved_entries
[i
] = (playlist_index
, entry_result
)
1762 # Update with processed data
1763 ie_result
['requested_entries'], ie_result
['entries'] = tuple(zip(*resolved_entries
)) or ([], [])
1765 # Write the updated info to json
1766 if _infojson_written
is True and self
._write
_info
_json
(
1767 'updated playlist', ie_result
,
1768 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
1771 ie_result
= self
.run_all_pps('playlist', ie_result
)
1772 self
.to_screen(f
'[download] Finished downloading playlist: {title}')
1775 @_handle_extraction_exceptions
1776 def __process_iterable_entry(self
, entry
, download
, extra_info
):
1777 return self
.process_ie_result(
1778 entry
, download
=download
, extra_info
=extra_info
)
1780 def _build_format_filter(self
, filter_spec
):
1781 " Returns a function to filter the formats according to the filter_spec "
1791 operator_rex
= re
.compile(r
'''(?x)\s*
1792 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1793 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1794 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1795 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
1796 m
= operator_rex
.fullmatch(filter_spec
)
1799 comparison_value
= int(m
.group('value'))
1801 comparison_value
= parse_filesize(m
.group('value'))
1802 if comparison_value
is None:
1803 comparison_value
= parse_filesize(m
.group('value') + 'B')
1804 if comparison_value
is None:
1806 'Invalid value %r in format specification %r' % (
1807 m
.group('value'), filter_spec
))
1808 op
= OPERATORS
[m
.group('op')]
1813 '^=': lambda attr
, value
: attr
.startswith(value
),
1814 '$=': lambda attr
, value
: attr
.endswith(value
),
1815 '*=': lambda attr
, value
: value
in attr
,
1816 '~=': lambda attr
, value
: value
.search(attr
) is not None
1818 str_operator_rex
= re
.compile(r
'''(?x)\s*
1819 (?P<key>[a-zA-Z0-9._-]+)\s*
1820 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1822 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1823 (?(quote)(?P=quote))\s*
1824 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
1825 m
= str_operator_rex
.fullmatch(filter_spec
)
1827 if m
.group('op') == '~=':
1828 comparison_value
= re
.compile(m
.group('value'))
1830 comparison_value
= re
.sub(r
'''\\([\\"'])''', r
'\1', m
.group('value'))
1831 str_op
= STR_OPERATORS
[m
.group('op')]
1832 if m
.group('negation'):
1833 op
= lambda attr
, value
: not str_op(attr
, value
)
1838 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
1841 actual_value
= f
.get(m
.group('key'))
1842 if actual_value
is None:
1843 return m
.group('none_inclusive')
1844 return op(actual_value
, comparison_value
)
1847 def _check_formats(self
, formats
):
1849 self
.to_screen('[info] Testing format %s' % f
['format_id'])
1850 path
= self
.get_output_path('temp')
1851 if not self
._ensure
_dir
_exists
(f
'{path}/'):
1853 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
1856 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
1857 except (DownloadError
, OSError, ValueError) + network_exceptions
:
1860 if os
.path
.exists(temp_file
.name
):
1862 os
.remove(temp_file
.name
)
1864 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
1868 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
1870 def _default_format_spec(self
, info_dict
, download
=True):
1873 merger
= FFmpegMergerPP(self
)
1874 return merger
.available
and merger
.can_merge()
1877 not self
.params
.get('simulate')
1881 or info_dict
.get('is_live') and not self
.params
.get('live_from_start')
1882 or self
.params
['outtmpl']['default'] == '-'))
1885 or self
.params
.get('allow_multiple_audio_streams', False)
1886 or 'format-spec' in self
.params
['compat_opts'])
1889 'best/bestvideo+bestaudio' if prefer_best
1890 else 'bestvideo*+bestaudio/best' if not compat
1891 else 'bestvideo+bestaudio/best')
1893 def build_format_selector(self
, format_spec
):
1894 def syntax_error(note
, start
):
1896 'Invalid format specification: '
1897 '{}\n\t{}\n\t{}^'.format(note
, format_spec
, ' ' * start
[1]))
1898 return SyntaxError(message
)
1900 PICKFIRST
= 'PICKFIRST'
1904 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1906 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
1907 'video': self
.params
.get('allow_multiple_video_streams', False)}
1909 check_formats
= self
.params
.get('check_formats') == 'selected'
1911 def _parse_filter(tokens
):
1913 for type, string
, start
, _
, _
in tokens
:
1914 if type == tokenize
.OP
and string
== ']':
1915 return ''.join(filter_parts
)
1917 filter_parts
.append(string
)
1919 def _remove_unused_ops(tokens
):
1920 # Remove operators that we don't use and join them with the surrounding strings
1921 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1922 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
1923 last_string
, last_start
, last_end
, last_line
= None, None, None, None
1924 for type, string
, start
, end
, line
in tokens
:
1925 if type == tokenize
.OP
and string
== '[':
1927 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1929 yield type, string
, start
, end
, line
1930 # everything inside brackets will be handled by _parse_filter
1931 for type, string
, start
, end
, line
in tokens
:
1932 yield type, string
, start
, end
, line
1933 if type == tokenize
.OP
and string
== ']':
1935 elif type == tokenize
.OP
and string
in ALLOWED_OPS
:
1937 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1939 yield type, string
, start
, end
, line
1940 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
1942 last_string
= string
1946 last_string
+= string
1948 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1950 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
1952 current_selector
= None
1953 for type, string
, start
, _
, _
in tokens
:
1954 # ENCODING is only defined in python 3.x
1955 if type == getattr(tokenize
, 'ENCODING', None):
1957 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
1958 current_selector
= FormatSelector(SINGLE
, string
, [])
1959 elif type == tokenize
.OP
:
1961 if not inside_group
:
1962 # ')' will be handled by the parentheses group
1963 tokens
.restore_last_token()
1965 elif inside_merge
and string
in ['/', ',']:
1966 tokens
.restore_last_token()
1968 elif inside_choice
and string
== ',':
1969 tokens
.restore_last_token()
1972 if not current_selector
:
1973 raise syntax_error('"," must follow a format selector', start
)
1974 selectors
.append(current_selector
)
1975 current_selector
= None
1977 if not current_selector
:
1978 raise syntax_error('"/" must follow a format selector', start
)
1979 first_choice
= current_selector
1980 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
1981 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
1983 if not current_selector
:
1984 current_selector
= FormatSelector(SINGLE
, 'best', [])
1985 format_filter
= _parse_filter(tokens
)
1986 current_selector
.filters
.append(format_filter
)
1988 if current_selector
:
1989 raise syntax_error('Unexpected "("', start
)
1990 group
= _parse_format_selection(tokens
, inside_group
=True)
1991 current_selector
= FormatSelector(GROUP
, group
, [])
1993 if not current_selector
:
1994 raise syntax_error('Unexpected "+"', start
)
1995 selector_1
= current_selector
1996 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
1998 raise syntax_error('Expected a selector', start
)
1999 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
2001 raise syntax_error(f
'Operator not recognized: "{string}"', start
)
2002 elif type == tokenize
.ENDMARKER
:
2004 if current_selector
:
2005 selectors
.append(current_selector
)
2008 def _merge(formats_pair
):
2009 format_1
, format_2
= formats_pair
2012 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
2013 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
2015 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
2016 get_no_more
= {'video': False, 'audio': False}
2017 for (i
, fmt_info
) in enumerate(formats_info
):
2018 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
2021 for aud_vid
in ['audio', 'video']:
2022 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
2023 if get_no_more
[aud_vid
]:
2026 get_no_more
[aud_vid
] = True
2028 if len(formats_info
) == 1:
2029 return formats_info
[0]
2031 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
2032 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
2034 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
2035 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
2037 output_ext
= self
.params
.get('merge_output_format')
2040 output_ext
= the_only_video
['ext']
2041 elif the_only_audio
and not video_fmts
:
2042 output_ext
= the_only_audio
['ext']
2046 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2049 'requested_formats': formats_info
,
2050 'format': '+'.join(filtered('format')),
2051 'format_id': '+'.join(filtered('format_id')),
2053 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2054 'language': '+'.join(orderedSet(filtered('language'))) or None,
2055 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2056 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2057 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2062 'width': the_only_video
.get('width'),
2063 'height': the_only_video
.get('height'),
2064 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2065 'fps': the_only_video
.get('fps'),
2066 'dynamic_range': the_only_video
.get('dynamic_range'),
2067 'vcodec': the_only_video
.get('vcodec'),
2068 'vbr': the_only_video
.get('vbr'),
2069 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2074 'acodec': the_only_audio
.get('acodec'),
2075 'abr': the_only_audio
.get('abr'),
2076 'asr': the_only_audio
.get('asr'),
2081 def _check_formats(formats
):
2082 if not check_formats
:
2085 yield from self
._check
_formats
(formats
)
2087 def _build_selector_function(selector
):
2088 if isinstance(selector
, list): # ,
2089 fs
= [_build_selector_function(s
) for s
in selector
]
2091 def selector_function(ctx
):
2094 return selector_function
2096 elif selector
.type == GROUP
: # ()
2097 selector_function
= _build_selector_function(selector
.selector
)
2099 elif selector
.type == PICKFIRST
: # /
2100 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2102 def selector_function(ctx
):
2104 picked_formats
= list(f(ctx
))
2106 return picked_formats
2109 elif selector
.type == MERGE
: # +
2110 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2112 def selector_function(ctx
):
2113 for pair
in itertools
.product(selector_1(ctx
), selector_2(ctx
)):
2116 elif selector
.type == SINGLE
: # atom
2117 format_spec
= selector
.selector
or 'best'
2119 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2120 if format_spec
== 'all':
2121 def selector_function(ctx
):
2122 yield from _check_formats(ctx
['formats'][::-1])
2123 elif format_spec
== 'mergeall':
2124 def selector_function(ctx
):
2125 formats
= list(_check_formats(
2126 f
for f
in ctx
['formats'] if f
.get('vcodec') != 'none' or f
.get('acodec') != 'none'))
2129 merged_format
= formats
[-1]
2130 for f
in formats
[-2::-1]:
2131 merged_format
= _merge((merged_format
, f
))
2135 format_fallback
, seperate_fallback
, format_reverse
, format_idx
= False, None, True, 1
2137 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2139 if mobj
is not None:
2140 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2141 format_reverse
= mobj
.group('bw')[0] == 'b'
2142 format_type
= (mobj
.group('type') or [None])[0]
2143 not_format_type
= {'v': 'a', 'a': 'v'}
.get(format_type
)
2144 format_modified
= mobj
.group('mod') is not None
2146 format_fallback
= not format_type
and not format_modified
# for b, w
2148 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2149 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2150 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2151 if format_type
# bv, ba, wv, wa
2152 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2153 if not format_modified
# b, w
2154 else lambda f
: True) # b*, w*
2155 filter_f
= lambda f
: _filter_f(f
) and (
2156 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2158 if format_spec
in self
._format
_selection
_exts
['audio']:
2159 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2160 elif format_spec
in self
._format
_selection
_exts
['video']:
2161 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2162 seperate_fallback
= lambda f
: f
.get('ext') == format_spec
and f
.get('vcodec') != 'none'
2163 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2164 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2166 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2168 def selector_function(ctx
):
2169 formats
= list(ctx
['formats'])
2170 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2172 if format_fallback
and ctx
['incomplete_formats']:
2173 # for extractors with incomplete formats (audio only (soundcloud)
2174 # or video only (imgur)) best/worst will fallback to
2175 # best/worst {video,audio}-only format
2177 elif seperate_fallback
and not ctx
['has_merged_format']:
2178 # for compatibility with youtube-dl when there is no pre-merged format
2179 matches
= list(filter(seperate_fallback
, formats
))
2180 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2182 yield matches
[format_idx
- 1]
2183 except LazyList
.IndexError:
2186 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2188 def final_selector(ctx
):
2189 ctx_copy
= dict(ctx
)
2190 for _filter
in filters
:
2191 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2192 return selector_function(ctx_copy
)
2193 return final_selector
2195 stream
= io
.BytesIO(format_spec
.encode())
2197 tokens
= list(_remove_unused_ops(tokenize
.tokenize(stream
.readline
)))
2198 except tokenize
.TokenError
:
2199 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2201 class TokenIterator
:
2202 def __init__(self
, tokens
):
2203 self
.tokens
= tokens
2210 if self
.counter
>= len(self
.tokens
):
2211 raise StopIteration()
2212 value
= self
.tokens
[self
.counter
]
2218 def restore_last_token(self
):
2221 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2222 return _build_selector_function(parsed_selector
)
2224 def _calc_headers(self
, info_dict
):
2225 res
= merge_headers(self
.params
['http_headers'], info_dict
.get('http_headers') or {})
2227 cookies
= self
._calc
_cookies
(info_dict
['url'])
2229 res
['Cookie'] = cookies
2231 if 'X-Forwarded-For' not in res
:
2232 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2233 if x_forwarded_for_ip
:
2234 res
['X-Forwarded-For'] = x_forwarded_for_ip
2238 def _calc_cookies(self
, url
):
2239 pr
= sanitized_Request(url
)
2240 self
.cookiejar
.add_cookie_header(pr
)
2241 return pr
.get_header('Cookie')
2243 def _sort_thumbnails(self
, thumbnails
):
2244 thumbnails
.sort(key
=lambda t
: (
2245 t
.get('preference') if t
.get('preference') is not None else -1,
2246 t
.get('width') if t
.get('width') is not None else -1,
2247 t
.get('height') if t
.get('height') is not None else -1,
2248 t
.get('id') if t
.get('id') is not None else '',
2251 def _sanitize_thumbnails(self
, info_dict
):
2252 thumbnails
= info_dict
.get('thumbnails')
2253 if thumbnails
is None:
2254 thumbnail
= info_dict
.get('thumbnail')
2256 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail}
]
2260 def check_thumbnails(thumbnails
):
2261 for t
in thumbnails
:
2262 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2264 self
.urlopen(HEADRequest(t
['url']))
2265 except network_exceptions
as err
:
2266 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2270 self
._sort
_thumbnails
(thumbnails
)
2271 for i
, t
in enumerate(thumbnails
):
2272 if t
.get('id') is None:
2274 if t
.get('width') and t
.get('height'):
2275 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2276 t
['url'] = sanitize_url(t
['url'])
2278 if self
.params
.get('check_formats') is True:
2279 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2281 info_dict
['thumbnails'] = thumbnails
2283 def _fill_common_fields(self
, info_dict
, is_video
=True):
2284 # TODO: move sanitization here
2286 # playlists are allowed to lack "title"
2287 title
= info_dict
.get('title', NO_DEFAULT
)
2288 if title
is NO_DEFAULT
:
2289 raise ExtractorError('Missing "title" field in extractor result',
2290 video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2291 info_dict
['fulltitle'] = title
2294 self
.write_debug('Extractor gave empty title. Creating a generic title')
2296 self
.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2297 info_dict
['title'] = f
'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2299 if info_dict
.get('duration') is not None:
2300 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2302 for ts_key
, date_key
in (
2303 ('timestamp', 'upload_date'),
2304 ('release_timestamp', 'release_date'),
2305 ('modified_timestamp', 'modified_date'),
2307 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2308 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2309 # see http://bugs.python.org/issue1646728)
2310 with contextlib
.suppress(ValueError, OverflowError, OSError):
2311 upload_date
= datetime
.datetime
.utcfromtimestamp(info_dict
[ts_key
])
2312 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2314 live_keys
= ('is_live', 'was_live')
2315 live_status
= info_dict
.get('live_status')
2316 if live_status
is None:
2317 for key
in live_keys
:
2318 if info_dict
.get(key
) is False:
2320 if info_dict
.get(key
):
2323 if all(info_dict
.get(key
) is False for key
in live_keys
):
2324 live_status
= 'not_live'
2326 info_dict
['live_status'] = live_status
2327 for key
in live_keys
:
2328 if info_dict
.get(key
) is None:
2329 info_dict
[key
] = (live_status
== key
)
2331 # Auto generate title fields corresponding to the *_number fields when missing
2332 # in order to always have clean titles. This is very common for TV series.
2333 for field
in ('chapter', 'season', 'episode'):
2334 if info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2335 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2337 def _raise_pending_errors(self
, info
):
2338 err
= info
.pop('__pending_error', None)
2340 self
.report_error(err
, tb
=False)
2342 def process_video_result(self
, info_dict
, download
=True):
2343 assert info_dict
.get('_type', 'video') == 'video'
2344 self
._num
_videos
+= 1
2346 if 'id' not in info_dict
:
2347 raise ExtractorError('Missing "id" field in extractor result', ie
=info_dict
['extractor'])
2348 elif not info_dict
.get('id'):
2349 raise ExtractorError('Extractor failed to obtain "id"', ie
=info_dict
['extractor'])
2351 def report_force_conversion(field
, field_not
, conversion
):
2352 self
.report_warning(
2353 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2354 % (field
, field_not
, conversion
))
2356 def sanitize_string_field(info
, string_field
):
2357 field
= info
.get(string_field
)
2358 if field
is None or isinstance(field
, str):
2360 report_force_conversion(string_field
, 'a string', 'string')
2361 info
[string_field
] = str(field
)
2363 def sanitize_numeric_fields(info
):
2364 for numeric_field
in self
._NUMERIC
_FIELDS
:
2365 field
= info
.get(numeric_field
)
2366 if field
is None or isinstance(field
, (int, float)):
2368 report_force_conversion(numeric_field
, 'numeric', 'int')
2369 info
[numeric_field
] = int_or_none(field
)
2371 sanitize_string_field(info_dict
, 'id')
2372 sanitize_numeric_fields(info_dict
)
2373 if info_dict
.get('section_end') and info_dict
.get('section_start') is not None:
2374 info_dict
['duration'] = round(info_dict
['section_end'] - info_dict
['section_start'], 3)
2375 if (info_dict
.get('duration') or 0) <= 0 and info_dict
.pop('duration', None):
2376 self
.report_warning('"duration" field is negative, there is an error in extractor')
2378 chapters
= info_dict
.get('chapters') or []
2379 dummy_chapter
= {'end_time': 0, 'start_time': info_dict.get('duration')}
2380 for prev
, current
, next_
in zip(
2381 (dummy_chapter
, *chapters
), chapters
, (*chapters
[1:], dummy_chapter
)):
2382 if current
.get('start_time') is None:
2383 current
['start_time'] = prev
.get('end_time')
2384 if not current
.get('end_time'):
2385 current
['end_time'] = next_
.get('start_time')
2387 if 'playlist' not in info_dict
:
2388 # It isn't part of a playlist
2389 info_dict
['playlist'] = None
2390 info_dict
['playlist_index'] = None
2392 self
._sanitize
_thumbnails
(info_dict
)
2394 thumbnail
= info_dict
.get('thumbnail')
2395 thumbnails
= info_dict
.get('thumbnails')
2397 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2399 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2401 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2402 info_dict
['display_id'] = info_dict
['id']
2404 self
._fill
_common
_fields
(info_dict
)
2406 for cc_kind
in ('subtitles', 'automatic_captions'):
2407 cc
= info_dict
.get(cc_kind
)
2409 for _
, subtitle
in cc
.items():
2410 for subtitle_format
in subtitle
:
2411 if subtitle_format
.get('url'):
2412 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2413 if subtitle_format
.get('ext') is None:
2414 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2416 automatic_captions
= info_dict
.get('automatic_captions')
2417 subtitles
= info_dict
.get('subtitles')
2419 info_dict
['requested_subtitles'] = self
.process_subtitles(
2420 info_dict
['id'], subtitles
, automatic_captions
)
2422 if info_dict
.get('formats') is None:
2423 # There's only one format available
2424 formats
= [info_dict
]
2426 formats
= info_dict
['formats']
2428 # or None ensures --clean-infojson removes it
2429 info_dict
['_has_drm'] = any(f
.get('has_drm') for f
in formats
) or None
2430 if not self
.params
.get('allow_unplayable_formats'):
2431 formats
= [f
for f
in formats
if not f
.get('has_drm')]
2432 if info_dict
['_has_drm'] and all(
2433 f
.get('acodec') == f
.get('vcodec') == 'none' for f
in formats
):
2434 self
.report_warning(
2435 'This video is DRM protected and only images are available for download. '
2436 'Use --list-formats to see them')
2438 get_from_start
= not info_dict
.get('is_live') or bool(self
.params
.get('live_from_start'))
2439 if not get_from_start
:
2440 info_dict
['title'] += ' ' + datetime
.datetime
.now().strftime('%Y-%m-%d %H:%M')
2441 if info_dict
.get('is_live') and formats
:
2442 formats
= [f
for f
in formats
if bool(f
.get('is_from_start')) == get_from_start
]
2443 if get_from_start
and not formats
:
2444 self
.raise_no_formats(info_dict
, msg
=(
2445 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2446 'If you want to download from the current time, use --no-live-from-start'))
2449 self
.raise_no_formats(info_dict
)
2451 def is_wellformed(f
):
2454 self
.report_warning(
2455 '"url" field is missing or empty - skipping format, '
2456 'there is an error in extractor')
2458 if isinstance(url
, bytes):
2459 sanitize_string_field(f
, 'url')
2462 # Filter out malformed formats for better extraction robustness
2463 formats
= list(filter(is_wellformed
, formats
))
2467 # We check that all the formats have the format and format_id fields
2468 for i
, format
in enumerate(formats
):
2469 sanitize_string_field(format
, 'format_id')
2470 sanitize_numeric_fields(format
)
2471 format
['url'] = sanitize_url(format
['url'])
2472 if not format
.get('format_id'):
2473 format
['format_id'] = str(i
)
2475 # Sanitize format_id from characters used in format selector expression
2476 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2477 format_id
= format
['format_id']
2478 if format_id
not in formats_dict
:
2479 formats_dict
[format_id
] = []
2480 formats_dict
[format_id
].append(format
)
2482 # Make sure all formats have unique format_id
2483 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2484 for format_id
, ambiguous_formats
in formats_dict
.items():
2485 ambigious_id
= len(ambiguous_formats
) > 1
2486 for i
, format
in enumerate(ambiguous_formats
):
2488 format
['format_id'] = '%s-%d' % (format_id
, i
)
2489 if format
.get('ext') is None:
2490 format
['ext'] = determine_ext(format
['url']).lower()
2491 # Ensure there is no conflict between id and ext in format selection
2492 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2493 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2494 format
['format_id'] = 'f%s' % format
['format_id']
2496 for i
, format
in enumerate(formats
):
2497 if format
.get('format') is None:
2498 format
['format'] = '{id} - {res}{note}'.format(
2499 id=format
['format_id'],
2500 res
=self
.format_resolution(format
),
2501 note
=format_field(format
, 'format_note', ' (%s)'),
2503 if format
.get('protocol') is None:
2504 format
['protocol'] = determine_protocol(format
)
2505 if format
.get('resolution') is None:
2506 format
['resolution'] = self
.format_resolution(format
, default
=None)
2507 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2508 format
['dynamic_range'] = 'SDR'
2509 if (info_dict
.get('duration') and format
.get('tbr')
2510 and not format
.get('filesize') and not format
.get('filesize_approx')):
2511 format
['filesize_approx'] = int(info_dict
['duration'] * format
['tbr'] * (1024 / 8))
2513 # Add HTTP headers, so that external programs can use them from the
2515 full_format_info
= info_dict
.copy()
2516 full_format_info
.update(format
)
2517 format
['http_headers'] = self
._calc
_headers
(full_format_info
)
2518 # Remove private housekeeping stuff
2519 if '__x_forwarded_for_ip' in info_dict
:
2520 del info_dict
['__x_forwarded_for_ip']
2522 if self
.params
.get('check_formats') is True:
2523 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2525 if not formats
or formats
[0] is not info_dict
:
2526 # only set the 'formats' fields if the original info_dict list them
2527 # otherwise we end up with a circular reference, the first (and unique)
2528 # element in the 'formats' field in info_dict is info_dict itself,
2529 # which can't be exported to json
2530 info_dict
['formats'] = formats
2532 info_dict
, _
= self
.pre_process(info_dict
)
2534 if self
._match
_entry
(info_dict
, incomplete
=self
._format
_fields
) is not None:
2537 self
.post_extract(info_dict
)
2538 info_dict
, _
= self
.pre_process(info_dict
, 'after_filter')
2540 # The pre-processors may have modified the formats
2541 formats
= info_dict
.get('formats', [info_dict
])
2543 list_only
= self
.params
.get('simulate') is None and (
2544 self
.params
.get('list_thumbnails') or self
.params
.get('listformats') or self
.params
.get('listsubtitles'))
2545 interactive_format_selection
= not list_only
and self
.format_selector
== '-'
2546 if self
.params
.get('list_thumbnails'):
2547 self
.list_thumbnails(info_dict
)
2548 if self
.params
.get('listsubtitles'):
2549 if 'automatic_captions' in info_dict
:
2550 self
.list_subtitles(
2551 info_dict
['id'], automatic_captions
, 'automatic captions')
2552 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2553 if self
.params
.get('listformats') or interactive_format_selection
:
2554 self
.list_formats(info_dict
)
2556 # Without this printing, -F --print-json will not work
2557 self
.__forced
_printings
(info_dict
, self
.prepare_filename(info_dict
), incomplete
=True)
2560 format_selector
= self
.format_selector
2561 if format_selector
is None:
2562 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2563 self
.write_debug('Default format spec: %s' % req_format
)
2564 format_selector
= self
.build_format_selector(req_format
)
2567 if interactive_format_selection
:
2569 self
._format
_screen
('\nEnter format selector: ', self
.Styles
.EMPHASIS
))
2571 format_selector
= self
.build_format_selector(req_format
)
2572 except SyntaxError as err
:
2573 self
.report_error(err
, tb
=False, is_error
=False)
2576 formats_to_download
= list(format_selector({
2578 'has_merged_format': any('none' not in (f
.get('acodec'), f
.get('vcodec')) for f
in formats
),
2579 'incomplete_formats': (
2580 # All formats are video-only or
2581 all(f
.get('vcodec') != 'none' and f
.get('acodec') == 'none' for f
in formats
)
2582 # all formats are audio-only
2583 or all(f
.get('vcodec') == 'none' and f
.get('acodec') != 'none' for f
in formats
)),
2585 if interactive_format_selection
and not formats_to_download
:
2586 self
.report_error('Requested format is not available', tb
=False, is_error
=False)
2590 if not formats_to_download
:
2591 if not self
.params
.get('ignore_no_formats_error'):
2592 raise ExtractorError(
2593 'Requested format is not available. Use --list-formats for a list of available formats',
2594 expected
=True, video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2595 self
.report_warning('Requested format is not available')
2596 # Process what we can, even without any available formats.
2597 formats_to_download
= [{}]
2599 requested_ranges
= self
.params
.get('download_ranges')
2600 if requested_ranges
:
2601 requested_ranges
= tuple(requested_ranges(info_dict
, self
))
2603 best_format
, downloaded_formats
= formats_to_download
[-1], []
2606 def to_screen(*msg
):
2607 self
.to_screen(f
'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2609 to_screen(f
'Downloading {len(formats_to_download)} format(s):',
2610 (f
['format_id'] for f
in formats_to_download
))
2611 if requested_ranges
:
2612 to_screen(f
'Downloading {len(requested_ranges)} time ranges:',
2613 (f
'{int(c["start_time"])}-{int(c["end_time"])}' for c
in requested_ranges
))
2614 max_downloads_reached
= False
2616 for fmt
, chapter
in itertools
.product(formats_to_download
, requested_ranges
or [{}]):
2617 new_info
= self
._copy
_infodict
(info_dict
)
2618 new_info
.update(fmt
)
2619 offset
, duration
= info_dict
.get('section_start') or 0, info_dict
.get('duration') or float('inf')
2620 if chapter
or offset
:
2622 'section_start': offset
+ chapter
.get('start_time', 0),
2623 'section_end': offset
+ min(chapter
.get('end_time', duration
), duration
),
2624 'section_title': chapter
.get('title'),
2625 'section_number': chapter
.get('index'),
2627 downloaded_formats
.append(new_info
)
2629 self
.process_info(new_info
)
2630 except MaxDownloadsReached
:
2631 max_downloads_reached
= True
2632 self
._raise
_pending
_errors
(new_info
)
2633 # Remove copied info
2634 for key
, val
in tuple(new_info
.items()):
2635 if info_dict
.get(key
) == val
:
2637 if max_downloads_reached
:
2640 write_archive
= {f.get('__write_download_archive', False) for f in downloaded_formats}
2641 assert write_archive
.issubset({True, False, 'ignore'}
)
2642 if True in write_archive
and False not in write_archive
:
2643 self
.record_download_archive(info_dict
)
2645 info_dict
['requested_downloads'] = downloaded_formats
2646 info_dict
= self
.run_all_pps('after_video', info_dict
)
2647 if max_downloads_reached
:
2648 raise MaxDownloadsReached()
2650 # We update the info dict with the selected best quality format (backwards compatibility)
2651 info_dict
.update(best_format
)
2654 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
2655 """Select the requested subtitles and their format"""
2656 available_subs
, normal_sub_langs
= {}, []
2657 if normal_subtitles
and self
.params
.get('writesubtitles'):
2658 available_subs
.update(normal_subtitles
)
2659 normal_sub_langs
= tuple(normal_subtitles
.keys())
2660 if automatic_captions
and self
.params
.get('writeautomaticsub'):
2661 for lang
, cap_info
in automatic_captions
.items():
2662 if lang
not in available_subs
:
2663 available_subs
[lang
] = cap_info
2665 if (not self
.params
.get('writesubtitles') and not
2666 self
.params
.get('writeautomaticsub') or not
2670 all_sub_langs
= tuple(available_subs
.keys())
2671 if self
.params
.get('allsubtitles', False):
2672 requested_langs
= all_sub_langs
2673 elif self
.params
.get('subtitleslangs', False):
2674 # A list is used so that the order of languages will be the same as
2675 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2676 requested_langs
= []
2677 for lang_re
in self
.params
.get('subtitleslangs'):
2678 discard
= lang_re
[0] == '-'
2680 lang_re
= lang_re
[1:]
2681 if lang_re
== 'all':
2683 requested_langs
= []
2685 requested_langs
.extend(all_sub_langs
)
2687 current_langs
= filter(re
.compile(lang_re
+ '$').match
, all_sub_langs
)
2689 for lang
in current_langs
:
2690 while lang
in requested_langs
:
2691 requested_langs
.remove(lang
)
2693 requested_langs
.extend(current_langs
)
2694 requested_langs
= orderedSet(requested_langs
)
2695 elif normal_sub_langs
:
2696 requested_langs
= ['en'] if 'en' in normal_sub_langs
else normal_sub_langs
[:1]
2698 requested_langs
= ['en'] if 'en' in all_sub_langs
else all_sub_langs
[:1]
2700 self
.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs
))
2702 formats_query
= self
.params
.get('subtitlesformat', 'best')
2703 formats_preference
= formats_query
.split('/') if formats_query
else []
2705 for lang
in requested_langs
:
2706 formats
= available_subs
.get(lang
)
2708 self
.report_warning(f
'{lang} subtitles not available for {video_id}')
2710 for ext
in formats_preference
:
2714 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
2720 self
.report_warning(
2721 'No subtitle format found matching "%s" for language %s, '
2722 'using %s' % (formats_query
, lang
, f
['ext']))
2726 def _forceprint(self
, key
, info_dict
):
2727 if info_dict
is None:
2729 info_copy
= info_dict
.copy()
2730 info_copy
['formats_table'] = self
.render_formats_table(info_dict
)
2731 info_copy
['thumbnails_table'] = self
.render_thumbnails_table(info_dict
)
2732 info_copy
['subtitles_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('subtitles'))
2733 info_copy
['automatic_captions_table'] = self
.render_subtitles_table(info_dict
.get('id'), info_dict
.get('automatic_captions'))
2735 def format_tmpl(tmpl
):
2736 mobj
= re
.match(r
'\w+(=?)$', tmpl
)
2737 if mobj
and mobj
.group(1):
2738 return f
'{tmpl[:-1]} = %({tmpl[:-1]})r'
2740 return f
'%({tmpl})s'
2743 for tmpl
in self
.params
['forceprint'].get(key
, []):
2744 self
.to_stdout(self
.evaluate_outtmpl(format_tmpl(tmpl
), info_copy
))
2746 for tmpl
, file_tmpl
in self
.params
['print_to_file'].get(key
, []):
2747 filename
= self
.prepare_filename(info_dict
, outtmpl
=file_tmpl
)
2748 tmpl
= format_tmpl(tmpl
)
2749 self
.to_screen(f
'[info] Writing {tmpl!r} to: {filename}')
2750 if self
._ensure
_dir
_exists
(filename
):
2751 with open(filename
, 'a', encoding
='utf-8') as f
:
2752 f
.write(self
.evaluate_outtmpl(tmpl
, info_copy
) + '\n')
2754 def __forced_printings(self
, info_dict
, filename
, incomplete
):
2755 def print_mandatory(field
, actual_field
=None):
2756 if actual_field
is None:
2757 actual_field
= field
2758 if (self
.params
.get('force%s' % field
, False)
2759 and (not incomplete
or info_dict
.get(actual_field
) is not None)):
2760 self
.to_stdout(info_dict
[actual_field
])
2762 def print_optional(field
):
2763 if (self
.params
.get('force%s' % field
, False)
2764 and info_dict
.get(field
) is not None):
2765 self
.to_stdout(info_dict
[field
])
2767 info_dict
= info_dict
.copy()
2768 if filename
is not None:
2769 info_dict
['filename'] = filename
2770 if info_dict
.get('requested_formats') is not None:
2771 # For RTMP URLs, also include the playpath
2772 info_dict
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
2773 elif info_dict
.get('url'):
2774 info_dict
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
2776 if (self
.params
.get('forcejson')
2777 or self
.params
['forceprint'].get('video')
2778 or self
.params
['print_to_file'].get('video')):
2779 self
.post_extract(info_dict
)
2780 self
._forceprint
('video', info_dict
)
2782 print_mandatory('title')
2783 print_mandatory('id')
2784 print_mandatory('url', 'urls')
2785 print_optional('thumbnail')
2786 print_optional('description')
2787 print_optional('filename')
2788 if self
.params
.get('forceduration') and info_dict
.get('duration') is not None:
2789 self
.to_stdout(formatSeconds(info_dict
['duration']))
2790 print_mandatory('format')
2792 if self
.params
.get('forcejson'):
2793 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
2795 def dl(self
, name
, info
, subtitle
=False, test
=False):
2796 if not info
.get('url'):
2797 self
.raise_no_formats(info
, True)
2800 verbose
= self
.params
.get('verbose')
2803 'quiet': self
.params
.get('quiet') or not verbose
,
2805 'noprogress': not verbose
,
2807 'skip_unavailable_fragments': False,
2808 'keep_fragments': False,
2810 '_no_ytdl_file': True,
2813 params
= self
.params
2814 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
2816 for ph
in self
._progress
_hooks
:
2817 fd
.add_progress_hook(ph
)
2819 (f
['url'].split(',')[0] + ',<data>' if f
['url'].startswith('data:') else f
['url'])
2820 for f
in info
.get('requested_formats', []) or [info
])
2821 self
.write_debug(f
'Invoking {fd.FD_NAME} downloader on "{urls}"')
2823 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2824 # But it may contain objects that are not deep-copyable
2825 new_info
= self
._copy
_infodict
(info
)
2826 if new_info
.get('http_headers') is None:
2827 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
2828 return fd
.download(name
, new_info
, subtitle
)
2830 def existing_file(self
, filepaths
, *, default_overwrite
=True):
2831 existing_files
= list(filter(os
.path
.exists
, orderedSet(filepaths
)))
2832 if existing_files
and not self
.params
.get('overwrites', default_overwrite
):
2833 return existing_files
[0]
2835 for file in existing_files
:
2836 self
.report_file_delete(file)
2840 def process_info(self
, info_dict
):
2841 """Process a single resolved IE result. (Modifies it in-place)"""
2843 assert info_dict
.get('_type', 'video') == 'video'
2844 original_infodict
= info_dict
2846 if 'format' not in info_dict
and 'ext' in info_dict
:
2847 info_dict
['format'] = info_dict
['ext']
2849 # This is mostly just for backward compatibility of process_info
2850 # As a side-effect, this allows for format-specific filters
2851 if self
._match
_entry
(info_dict
) is not None:
2852 info_dict
['__write_download_archive'] = 'ignore'
2855 # Does nothing under normal operation - for backward compatibility of process_info
2856 self
.post_extract(info_dict
)
2857 self
._num
_downloads
+= 1
2859 # info_dict['_filename'] needs to be set for backward compatibility
2860 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
2861 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
2865 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
2867 def check_max_downloads():
2868 if self
._num
_downloads
>= float(self
.params
.get('max_downloads') or 'inf'):
2869 raise MaxDownloadsReached()
2871 if self
.params
.get('simulate'):
2872 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
2873 check_max_downloads()
2876 if full_filename
is None:
2878 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
2880 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
2883 if self
._write
_description
('video', info_dict
,
2884 self
.prepare_filename(info_dict
, 'description')) is None:
2887 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
2888 if sub_files
is None:
2890 files_to_move
.update(dict(sub_files
))
2892 thumb_files
= self
._write
_thumbnails
(
2893 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
2894 if thumb_files
is None:
2896 files_to_move
.update(dict(thumb_files
))
2898 infofn
= self
.prepare_filename(info_dict
, 'infojson')
2899 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
2900 if _infojson_written
:
2901 info_dict
['infojson_filename'] = infofn
2902 # For backward compatibility, even though it was a private field
2903 info_dict
['__infojson_filename'] = infofn
2904 elif _infojson_written
is None:
2907 # Note: Annotations are deprecated
2909 if self
.params
.get('writeannotations', False):
2910 annofn
= self
.prepare_filename(info_dict
, 'annotation')
2912 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
2914 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
2915 self
.to_screen('[info] Video annotations are already present')
2916 elif not info_dict
.get('annotations'):
2917 self
.report_warning('There are no annotations to write.')
2920 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
2921 with open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
2922 annofile
.write(info_dict
['annotations'])
2923 except (KeyError, TypeError):
2924 self
.report_warning('There are no annotations to write.')
2926 self
.report_error('Cannot write annotations file: ' + annofn
)
2929 # Write internet shortcut files
2930 def _write_link_file(link_type
):
2931 url
= try_get(info_dict
['webpage_url'], iri_to_uri
)
2933 self
.report_warning(
2934 f
'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
2936 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
2937 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
2939 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
2940 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
2943 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
2944 with open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
2945 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
2946 template_vars
= {'url': url}
2947 if link_type
== 'desktop':
2948 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
2949 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
2951 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
2956 'url': self
.params
.get('writeurllink'),
2957 'webloc': self
.params
.get('writewebloclink'),
2958 'desktop': self
.params
.get('writedesktoplink'),
2960 if self
.params
.get('writelink'):
2961 link_type
= ('webloc' if sys
.platform
== 'darwin'
2962 else 'desktop' if sys
.platform
.startswith('linux')
2964 write_links
[link_type
] = True
2966 if any(should_write
and not _write_link_file(link_type
)
2967 for link_type
, should_write
in write_links
.items()):
2970 def replace_info_dict(new_info
):
2972 if new_info
== info_dict
:
2975 info_dict
.update(new_info
)
2977 new_info
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
2978 replace_info_dict(new_info
)
2980 if self
.params
.get('skip_download'):
2981 info_dict
['filepath'] = temp_filename
2982 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
2983 info_dict
['__files_to_move'] = files_to_move
2984 replace_info_dict(self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
))
2985 info_dict
['__write_download_archive'] = self
.params
.get('force_write_download_archive')
2988 info_dict
.setdefault('__postprocessors', [])
2991 def existing_video_file(*filepaths
):
2992 ext
= info_dict
.get('ext')
2993 converted
= lambda file: replace_extension(file, self
.params
.get('final_ext') or ext
, ext
)
2994 file = self
.existing_file(itertools
.chain(*zip(map(converted
, filepaths
), filepaths
)),
2995 default_overwrite
=False)
2997 info_dict
['ext'] = os
.path
.splitext(file)[1][1:]
3000 fd
, success
= None, True
3001 if info_dict
.get('protocol') or info_dict
.get('url'):
3002 fd
= get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-')
3003 if fd
is not FFmpegFD
and (
3004 info_dict
.get('section_start') or info_dict
.get('section_end')):
3005 msg
= ('This format cannot be partially downloaded' if FFmpegFD
.available()
3006 else 'You have requested downloading the video partially, but ffmpeg is not installed')
3007 self
.report_error(f
'{msg}. Aborting')
3010 if info_dict
.get('requested_formats') is not None:
3012 def compatible_formats(formats
):
3013 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
3014 video_formats
= [format
for format
in formats
if format
.get('vcodec') != 'none']
3015 audio_formats
= [format
for format
in formats
if format
.get('acodec') != 'none']
3016 if len(video_formats
) > 2 or len(audio_formats
) > 2:
3020 exts
= {format.get('ext') for format in formats}
3022 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'}
,
3025 for ext_sets
in COMPATIBLE_EXTS
:
3026 if ext_sets
.issuperset(exts
):
3028 # TODO: Check acodec/vcodec
3031 requested_formats
= info_dict
['requested_formats']
3032 old_ext
= info_dict
['ext']
3033 if self
.params
.get('merge_output_format') is None:
3034 if not compatible_formats(requested_formats
):
3035 info_dict
['ext'] = 'mkv'
3036 self
.report_warning(
3037 'Requested formats are incompatible for merge and will be merged into mkv')
3038 if (info_dict
['ext'] == 'webm'
3039 and info_dict
.get('thumbnails')
3040 # check with type instead of pp_key, __name__, or isinstance
3041 # since we dont want any custom PPs to trigger this
3042 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])): # noqa: E721
3043 info_dict
['ext'] = 'mkv'
3044 self
.report_warning(
3045 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3046 new_ext
= info_dict
['ext']
3048 def correct_ext(filename
, ext
=new_ext
):
3051 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
3053 os
.path
.splitext(filename
)[0]
3054 if filename_real_ext
in (old_ext
, new_ext
)
3056 return f
'{filename_wo_ext}.{ext}'
3058 # Ensure filename always has a correct extension for successful merge
3059 full_filename
= correct_ext(full_filename
)
3060 temp_filename
= correct_ext(temp_filename
)
3061 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3062 info_dict
['__real_download'] = False
3064 merger
= FFmpegMergerPP(self
)
3066 if dl_filename
is not None:
3067 self
.report_file_already_downloaded(dl_filename
)
3069 for f
in requested_formats
if fd
!= FFmpegFD
else []:
3070 f
['filepath'] = fname
= prepend_extension(
3071 correct_ext(temp_filename
, info_dict
['ext']),
3072 'f%s' % f
['format_id'], info_dict
['ext'])
3073 downloaded
.append(fname
)
3074 info_dict
['url'] = '\n'.join(f
['url'] for f
in requested_formats
)
3075 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3076 info_dict
['__real_download'] = real_download
3078 if self
.params
.get('allow_unplayable_formats'):
3079 self
.report_warning(
3080 'You have requested merging of multiple formats '
3081 'while also allowing unplayable formats to be downloaded. '
3082 'The formats won\'t be merged to prevent data corruption.')
3083 elif not merger
.available
:
3084 msg
= 'You have requested merging of multiple formats but ffmpeg is not installed'
3085 if not self
.params
.get('ignoreerrors'):
3086 self
.report_error(f
'{msg}. Aborting due to --abort-on-error')
3088 self
.report_warning(f
'{msg}. The formats won\'t be merged')
3090 if temp_filename
== '-':
3091 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
, self
.params
)
3092 else 'but the formats are incompatible for simultaneous download' if merger
.available
3093 else 'but ffmpeg is not installed')
3094 self
.report_warning(
3095 f
'You have requested downloading multiple formats to stdout {reason}. '
3096 'The formats will be streamed one after the other')
3097 fname
= temp_filename
3098 for f
in requested_formats
:
3099 new_info
= dict(info_dict
)
3100 del new_info
['requested_formats']
3102 if temp_filename
!= '-':
3103 fname
= prepend_extension(
3104 correct_ext(temp_filename
, new_info
['ext']),
3105 'f%s' % f
['format_id'], new_info
['ext'])
3106 if not self
._ensure
_dir
_exists
(fname
):
3108 f
['filepath'] = fname
3109 downloaded
.append(fname
)
3110 partial_success
, real_download
= self
.dl(fname
, new_info
)
3111 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
3112 success
= success
and partial_success
3114 if downloaded
and merger
.available
and not self
.params
.get('allow_unplayable_formats'):
3115 info_dict
['__postprocessors'].append(merger
)
3116 info_dict
['__files_to_merge'] = downloaded
3117 # Even if there were no downloads, it is being merged only now
3118 info_dict
['__real_download'] = True
3120 for file in downloaded
:
3121 files_to_move
[file] = None
3123 # Just a single file
3124 dl_filename
= existing_video_file(full_filename
, temp_filename
)
3125 if dl_filename
is None or dl_filename
== temp_filename
:
3126 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3127 # So we should try to resume the download
3128 success
, real_download
= self
.dl(temp_filename
, info_dict
)
3129 info_dict
['__real_download'] = real_download
3131 self
.report_file_already_downloaded(dl_filename
)
3133 dl_filename
= dl_filename
or temp_filename
3134 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
3136 except network_exceptions
as err
:
3137 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
3139 except OSError as err
:
3140 raise UnavailableVideoError(err
)
3141 except (ContentTooShortError
, ) as err
:
3142 self
.report_error(f
'content too short (expected {err.expected} bytes and served {err.downloaded})')
3145 self
._raise
_pending
_errors
(info_dict
)
3146 if success
and full_filename
!= '-':
3150 fixup_policy
= self
.params
.get('fixup')
3151 vid
= info_dict
['id']
3153 if fixup_policy
in ('ignore', 'never'):
3155 elif fixup_policy
== 'warn':
3157 elif fixup_policy
!= 'force':
3158 assert fixup_policy
in ('detect_or_warn', None)
3159 if not info_dict
.get('__real_download'):
3162 def ffmpeg_fixup(cndn
, msg
, cls
):
3163 if not (do_fixup
and cndn
):
3165 elif do_fixup
== 'warn':
3166 self
.report_warning(f
'{vid}: {msg}')
3170 info_dict
['__postprocessors'].append(pp
)
3172 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
3174 stretched_ratio
= info_dict
.get('stretched_ratio')
3176 stretched_ratio
not in (1, None),
3177 f
'Non-uniform pixel ratio {stretched_ratio}',
3178 FFmpegFixupStretchedPP
)
3181 (info_dict
.get('requested_formats') is None
3182 and info_dict
.get('container') == 'm4a_dash'
3183 and info_dict
.get('ext') == 'm4a'),
3184 'writing DASH m4a. Only some players support this container',
3187 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
3188 downloader
= downloader
.FD_NAME
if downloader
else None
3190 if info_dict
.get('requested_formats') is None: # Not necessary if doing merger
3191 ffmpeg_fixup(downloader
== 'hlsnative' and not self
.params
.get('hls_use_mpegts')
3192 or info_dict
.get('is_live') and self
.params
.get('hls_use_mpegts') is None,
3193 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3195 ffmpeg_fixup(info_dict
.get('is_live') and downloader
== 'DashSegmentsFD',
3196 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP
)
3198 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3199 ffmpeg_fixup(downloader
== 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP
)
3203 replace_info_dict(self
.post_process(dl_filename
, info_dict
, files_to_move
))
3204 except PostProcessingError
as err
:
3205 self
.report_error('Postprocessing: %s' % str(err
))
3208 for ph
in self
._post
_hooks
:
3209 ph(info_dict
['filepath'])
3210 except Exception as err
:
3211 self
.report_error('post hooks: %s' % str(err
))
3213 info_dict
['__write_download_archive'] = True
3215 assert info_dict
is original_infodict
# Make sure the info_dict was modified in-place
3216 if self
.params
.get('force_write_download_archive'):
3217 info_dict
['__write_download_archive'] = True
3218 check_max_downloads()
3220 def __download_wrapper(self
, func
):
3221 @functools.wraps(func
)
3222 def wrapper(*args
, **kwargs
):
3224 res
= func(*args
, **kwargs
)
3225 except UnavailableVideoError
as e
:
3226 self
.report_error(e
)
3227 except DownloadCancelled
as e
:
3228 self
.to_screen(f
'[info] {e}')
3229 if not self
.params
.get('break_per_url'):
3232 if self
.params
.get('dump_single_json', False):
3233 self
.post_extract(res
)
3234 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3237 def download(self
, url_list
):
3238 """Download a given list of URLs."""
3239 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3240 outtmpl
= self
.params
['outtmpl']['default']
3241 if (len(url_list
) > 1
3243 and '%' not in outtmpl
3244 and self
.params
.get('max_downloads') != 1):
3245 raise SameFileError(outtmpl
)
3247 for url
in url_list
:
3248 self
.__download
_wrapper
(self
.extract_info
)(
3249 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3251 return self
._download
_retcode
3253 def download_with_info_file(self
, info_filename
):
3254 with contextlib
.closing(fileinput
.FileInput(
3255 [info_filename
], mode
='r',
3256 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3257 # FileInput doesn't have a read method, we can't call json.load
3258 info
= self
.sanitize_info(json
.loads('\n'.join(f
)), self
.params
.get('clean_infojson', True))
3260 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3261 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3262 if not isinstance(e
, EntryNotInPlaylist
):
3263 self
.to_stderr('\r')
3264 webpage_url
= info
.get('webpage_url')
3265 if webpage_url
is not None:
3266 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3267 return self
.download([webpage_url
])
3270 return self
._download
_retcode
3273 def sanitize_info(info_dict
, remove_private_keys
=False):
3274 ''' Sanitize the infodict for converting to json '''
3275 if info_dict
is None:
3277 info_dict
.setdefault('epoch', int(time
.time()))
3278 info_dict
.setdefault('_type', 'video')
3280 if remove_private_keys
:
3281 reject
= lambda k
, v
: v
is None or k
.startswith('__') or k
in {
3282 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3283 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
3286 reject
= lambda k
, v
: False
3289 if isinstance(obj
, dict):
3290 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3291 elif isinstance(obj
, (list, tuple, set, LazyList
)):
3292 return list(map(filter_fn
, obj
))
3293 elif obj
is None or isinstance(obj
, (str, int, float, bool)):
3298 return filter_fn(info_dict
)
3301 def filter_requested_info(info_dict
, actually_filter
=True):
3302 ''' Alias of sanitize_info for backward compatibility '''
3303 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3305 def _delete_downloaded_files(self
, *files_to_delete
, info
={}, msg
=None):
3306 for filename
in set(filter(None, files_to_delete
)):
3308 self
.to_screen(msg
% filename
)
3312 self
.report_warning(f
'Unable to delete file {filename}')
3313 if filename
in info
.get('__files_to_move', []): # NB: Delete even if None
3314 del info
['__files_to_move'][filename
]
3317 def post_extract(info_dict
):
3318 def actual_post_extract(info_dict
):
3319 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3320 for video_dict
in info_dict
.get('entries', {}):
3321 actual_post_extract(video_dict
or {})
3324 post_extractor
= info_dict
.pop('__post_extractor', None) or (lambda: {})
3325 info_dict
.update(post_extractor())
3327 actual_post_extract(info_dict
or {})
3329 def run_pp(self
, pp
, infodict
):
3330 files_to_delete
= []
3331 if '__files_to_move' not in infodict
:
3332 infodict
['__files_to_move'] = {}
3334 files_to_delete
, infodict
= pp
.run(infodict
)
3335 except PostProcessingError
as e
:
3336 # Must be True and not 'only_download'
3337 if self
.params
.get('ignoreerrors') is True:
3338 self
.report_error(e
)
3342 if not files_to_delete
:
3344 if self
.params
.get('keepvideo', False):
3345 for f
in files_to_delete
:
3346 infodict
['__files_to_move'].setdefault(f
, '')
3348 self
._delete
_downloaded
_files
(
3349 *files_to_delete
, info
=infodict
, msg
='Deleting original file %s (pass -k to keep)')
3352 def run_all_pps(self
, key
, info
, *, additional_pps
=None):
3353 self
._forceprint
(key
, info
)
3354 for pp
in (additional_pps
or []) + self
._pps
[key
]:
3355 info
= self
.run_pp(pp
, info
)
3358 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3359 info
= dict(ie_info
)
3360 info
['__files_to_move'] = files_to_move
or {}
3362 info
= self
.run_all_pps(key
, info
)
3363 except PostProcessingError
as err
:
3364 msg
= f
'Preprocessing: {err}'
3365 info
.setdefault('__pending_error', msg
)
3366 self
.report_error(msg
, is_error
=False)
3367 return info
, info
.pop('__files_to_move', None)
3369 def post_process(self
, filename
, info
, files_to_move
=None):
3370 """Run all the postprocessors on the given file."""
3371 info
['filepath'] = filename
3372 info
['__files_to_move'] = files_to_move
or {}
3373 info
= self
.run_all_pps('post_process', info
, additional_pps
=info
.get('__postprocessors'))
3374 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3375 del info
['__files_to_move']
3376 return self
.run_all_pps('after_move', info
)
3378 def _make_archive_id(self
, info_dict
):
3379 video_id
= info_dict
.get('id')
3382 # Future-proof against any change in case
3383 # and backwards compatibility with prior versions
3384 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3385 if extractor
is None:
3386 url
= str_or_none(info_dict
.get('url'))
3389 # Try to find matching extractor for the URL and take its ie_key
3390 for ie_key
, ie
in self
._ies
.items():
3391 if ie
.suitable(url
):
3396 return f
'{extractor.lower()} {video_id}'
3398 def in_download_archive(self
, info_dict
):
3399 fn
= self
.params
.get('download_archive')
3403 vid_id
= self
._make
_archive
_id
(info_dict
)
3405 return False # Incomplete video information
3407 return vid_id
in self
.archive
3409 def record_download_archive(self
, info_dict
):
3410 fn
= self
.params
.get('download_archive')
3413 vid_id
= self
._make
_archive
_id
(info_dict
)
3415 self
.write_debug(f
'Adding to archive: {vid_id}')
3416 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3417 archive_file
.write(vid_id
+ '\n')
3418 self
.archive
.add(vid_id
)
3421 def format_resolution(format
, default
='unknown'):
3422 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3424 if format
.get('resolution') is not None:
3425 return format
['resolution']
3426 if format
.get('width') and format
.get('height'):
3427 return '%dx%d' % (format
['width'], format
['height'])
3428 elif format
.get('height'):
3429 return '%sp' % format
['height']
3430 elif format
.get('width'):
3431 return '%dx?' % format
['width']
3434 def _list_format_headers(self
, *headers
):
3435 if self
.params
.get('listformats_table', True) is not False:
3436 return [self
._format
_out
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3439 def _format_note(self
, fdict
):
3441 if fdict
.get('ext') in ['f4f', 'f4m']:
3442 res
+= '(unsupported)'
3443 if fdict
.get('language'):
3446 res
+= '[%s]' % fdict
['language']
3447 if fdict
.get('format_note') is not None:
3450 res
+= fdict
['format_note']
3451 if fdict
.get('tbr') is not None:
3454 res
+= '%4dk' % fdict
['tbr']
3455 if fdict
.get('container') is not None:
3458 res
+= '%s container' % fdict
['container']
3459 if (fdict
.get('vcodec') is not None
3460 and fdict
.get('vcodec') != 'none'):
3463 res
+= fdict
['vcodec']
3464 if fdict
.get('vbr') is not None:
3466 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3468 if fdict
.get('vbr') is not None:
3469 res
+= '%4dk' % fdict
['vbr']
3470 if fdict
.get('fps') is not None:
3473 res
+= '%sfps' % fdict
['fps']
3474 if fdict
.get('acodec') is not None:
3477 if fdict
['acodec'] == 'none':
3480 res
+= '%-5s' % fdict
['acodec']
3481 elif fdict
.get('abr') is not None:
3485 if fdict
.get('abr') is not None:
3486 res
+= '@%3dk' % fdict
['abr']
3487 if fdict
.get('asr') is not None:
3488 res
+= ' (%5dHz)' % fdict
['asr']
3489 if fdict
.get('filesize') is not None:
3492 res
+= format_bytes(fdict
['filesize'])
3493 elif fdict
.get('filesize_approx') is not None:
3496 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3499 def render_formats_table(self
, info_dict
):
3500 if not info_dict
.get('formats') and not info_dict
.get('url'):
3503 formats
= info_dict
.get('formats', [info_dict
])
3504 if not self
.params
.get('listformats_table', True) is not False:
3507 format_field(f
, 'format_id'),
3508 format_field(f
, 'ext'),
3509 self
.format_resolution(f
),
3510 self
._format
_note
(f
)
3511 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3512 return render_table(['format code', 'extension', 'resolution', 'note'], table
, extra_gap
=1)
3514 delim
= self
._format
_out
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3517 self
._format
_out
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3518 format_field(f
, 'ext'),
3519 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3520 format_field(f
, 'fps', '\t%d'),
3521 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3523 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
) + format_field(f
, 'filesize_approx', '~\t%s', func
=format_bytes
),
3524 format_field(f
, 'tbr', '\t%dk'),
3525 shorten_protocol_name(f
.get('protocol', '')),
3527 format_field(f
, 'vcodec', default
='unknown').replace(
3528 'none', 'images' if f
.get('acodec') == 'none'
3529 else self
._format
_out
('audio only', self
.Styles
.SUPPRESS
)),
3530 format_field(f
, 'vbr', '\t%dk'),
3531 format_field(f
, 'acodec', default
='unknown').replace(
3532 'none', '' if f
.get('vcodec') == 'none'
3533 else self
._format
_out
('video only', self
.Styles
.SUPPRESS
)),
3534 format_field(f
, 'abr', '\t%dk'),
3535 format_field(f
, 'asr', '\t%dHz'),
3537 self
._format
_out
('UNSUPPORTED', 'light red') if f
.get('ext') in ('f4f', 'f4m') else None,
3538 format_field(f
, 'language', '[%s]'),
3539 join_nonempty(format_field(f
, 'format_note'),
3540 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3543 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3544 header_line
= self
._list
_format
_headers
(
3545 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3546 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3548 return render_table(
3549 header_line
, table
, hide_empty
=True,
3550 delim
=self
._format
_out
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True))
3552 def render_thumbnails_table(self
, info_dict
):
3553 thumbnails
= list(info_dict
.get('thumbnails') or [])
3556 return render_table(
3557 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3558 [[t
.get('id'), t
.get('width', 'unknown'), t
.get('height', 'unknown'), t
['url']] for t
in thumbnails
])
3560 def render_subtitles_table(self
, video_id
, subtitles
):
3561 def _row(lang
, formats
):
3562 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3563 if len(set(names
)) == 1:
3564 names
= [] if names
[0] == 'unknown' else names
[:1]
3565 return [lang
, ', '.join(names
), ', '.join(exts
)]
3569 return render_table(
3570 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3571 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3574 def __list_table(self
, video_id
, name
, func
, *args
):
3577 self
.to_screen(f
'{video_id} has no {name}')
3579 self
.to_screen(f
'[info] Available {name} for {video_id}:')
3580 self
.to_stdout(table
)
3582 def list_formats(self
, info_dict
):
3583 self
.__list
_table
(info_dict
['id'], 'formats', self
.render_formats_table
, info_dict
)
3585 def list_thumbnails(self
, info_dict
):
3586 self
.__list
_table
(info_dict
['id'], 'thumbnails', self
.render_thumbnails_table
, info_dict
)
3588 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3589 self
.__list
_table
(video_id
, name
, self
.render_subtitles_table
, video_id
, subtitles
)
3591 def urlopen(self
, req
):
3592 """ Start an HTTP download """
3593 if isinstance(req
, str):
3594 req
= sanitized_Request(req
)
3595 return self
._opener
.open(req
, timeout
=self
._socket
_timeout
)
3597 def print_debug_header(self
):
3598 if not self
.params
.get('verbose'):
3601 # These imports can be slow. So import them only as needed
3602 from .extractor
.extractors
import _LAZY_LOADER
3603 from .extractor
.extractors
import _PLUGIN_CLASSES
as plugin_extractors
3605 def get_encoding(stream
):
3606 ret
= str(getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__))
3607 if not supports_terminal_sequences(stream
):
3608 from .utils
import WINDOWS_VT_MODE
# Must be imported locally
3609 ret
+= ' (No VT)' if WINDOWS_VT_MODE
is False else ' (No ANSI)'
3612 encoding_str
= 'Encodings: locale %s, fs %s, pref %s, %s' % (
3613 locale
.getpreferredencoding(),
3614 sys
.getfilesystemencoding(),
3615 self
.get_encoding(),
3617 f
'{key} {get_encoding(stream)}' for key
, stream
in self
._out
_files
.items_
3618 if stream
is not None and key
!= 'console')
3621 logger
= self
.params
.get('logger')
3623 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3624 write_debug(encoding_str
)
3626 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3627 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3629 source
= detect_variant()
3630 write_debug(join_nonempty(
3631 'yt-dlp version', __version__
,
3632 f
'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD
else '',
3633 '' if source
== 'unknown' else f
'({source})',
3635 if not _LAZY_LOADER
:
3636 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
3637 write_debug('Lazy loading extractors is forcibly disabled')
3639 write_debug('Lazy loading extractors is disabled')
3640 if plugin_extractors
or plugin_postprocessors
:
3641 write_debug('Plugins: %s' % [
3642 '%s%s' % (klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
3643 for name
, klass
in itertools
.chain(plugin_extractors
.items(), plugin_postprocessors
.items())])
3644 if self
.params
['compat_opts']:
3645 write_debug('Compatibility options: %s' % ', '.join(self
.params
['compat_opts']))
3647 if source
== 'source':
3649 stdout
, _
, _
= Popen
.run(
3650 ['git', 'rev-parse', '--short', 'HEAD'],
3651 text
=True, cwd
=os
.path
.dirname(os
.path
.abspath(__file__
)),
3652 stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
3653 if re
.fullmatch('[0-9a-f]+', stdout
.strip()):
3654 write_debug(f
'Git HEAD: {stdout.strip()}')
3656 with contextlib
.suppress(Exception):
3659 def python_implementation():
3660 impl_name
= platform
.python_implementation()
3661 if impl_name
== 'PyPy' and hasattr(sys
, 'pypy_version_info'):
3662 return impl_name
+ ' version %d.%d.%d' % sys
.pypy_version_info
[:3]
3665 write_debug('Python version %s (%s %s) - %s' % (
3666 platform
.python_version(),
3667 python_implementation(),
3668 platform
.architecture()[0],
3671 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
3672 ffmpeg_features
= {key for key, val in ffmpeg_features.items() if val}
3674 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features
))
3676 exe_versions
['rtmpdump'] = rtmpdump_version()
3677 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
3678 exe_str
= ', '.join(
3679 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
3681 write_debug('exe versions: %s' % exe_str
)
3683 from .compat
.compat_utils
import get_package_info
3684 from .dependencies
import available_dependencies
3686 write_debug('Optional libraries: %s' % (', '.join(sorted({
3687 join_nonempty(*get_package_info(m
)) for m
in available_dependencies
.values()
3690 self
._setup
_opener
()
3692 for handler
in self
._opener
.handlers
:
3693 if hasattr(handler
, 'proxies'):
3694 proxy_map
.update(handler
.proxies
)
3695 write_debug(f
'Proxy map: {proxy_map}')
3698 if False and self
.params
.get('call_home'):
3699 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode()
3700 write_debug('Public IP address: %s' % ipaddr
)
3701 latest_version
= self
.urlopen(
3702 'https://yt-dl.org/latest/version').read().decode()
3703 if version_tuple(latest_version
) > version_tuple(__version__
):
3704 self
.report_warning(
3705 'You are using an outdated version (newest version: %s)! '
3706 'See https://yt-dl.org/update if you need help updating.' %
3709 def _setup_opener(self
):
3710 if hasattr(self
, '_opener'):
3712 timeout_val
= self
.params
.get('socket_timeout')
3713 self
._socket
_timeout
= 20 if timeout_val
is None else float(timeout_val
)
3715 opts_cookiesfrombrowser
= self
.params
.get('cookiesfrombrowser')
3716 opts_cookiefile
= self
.params
.get('cookiefile')
3717 opts_proxy
= self
.params
.get('proxy')
3719 self
.cookiejar
= load_cookies(opts_cookiefile
, opts_cookiesfrombrowser
, self
)
3721 cookie_processor
= YoutubeDLCookieProcessor(self
.cookiejar
)
3722 if opts_proxy
is not None:
3723 if opts_proxy
== '':
3726 proxies
= {'http': opts_proxy, 'https': opts_proxy}
3728 proxies
= urllib
.request
.getproxies()
3729 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3730 if 'http' in proxies
and 'https' not in proxies
:
3731 proxies
['https'] = proxies
['http']
3732 proxy_handler
= PerRequestProxyHandler(proxies
)
3734 debuglevel
= 1 if self
.params
.get('debug_printtraffic') else 0
3735 https_handler
= make_HTTPS_handler(self
.params
, debuglevel
=debuglevel
)
3736 ydlh
= YoutubeDLHandler(self
.params
, debuglevel
=debuglevel
)
3737 redirect_handler
= YoutubeDLRedirectHandler()
3738 data_handler
= urllib
.request
.DataHandler()
3740 # When passing our own FileHandler instance, build_opener won't add the
3741 # default FileHandler and allows us to disable the file protocol, which
3742 # can be used for malicious purposes (see
3743 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3744 file_handler
= urllib
.request
.FileHandler()
3746 def file_open(*args
, **kwargs
):
3747 raise urllib
.error
.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3748 file_handler
.file_open
= file_open
3750 opener
= urllib
.request
.build_opener(
3751 proxy_handler
, https_handler
, cookie_processor
, ydlh
, redirect_handler
, data_handler
, file_handler
)
3753 # Delete the default user-agent header, which would otherwise apply in
3754 # cases where our custom HTTP handler doesn't come into play
3755 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3756 opener
.addheaders
= []
3757 self
._opener
= opener
3759 def encode(self
, s
):
3760 if isinstance(s
, bytes):
3761 return s
# Already encoded
3764 return s
.encode(self
.get_encoding())
3765 except UnicodeEncodeError as err
:
3766 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
3769 def get_encoding(self
):
3770 encoding
= self
.params
.get('encoding')
3771 if encoding
is None:
3772 encoding
= preferredencoding()
3775 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
3776 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3777 if overwrite
is None:
3778 overwrite
= self
.params
.get('overwrites', True)
3779 if not self
.params
.get('writeinfojson'):
3782 self
.write_debug(f
'Skipping writing {label} infojson')
3784 elif not self
._ensure
_dir
_exists
(infofn
):
3786 elif not overwrite
and os
.path
.exists(infofn
):
3787 self
.to_screen(f
'[info] {label.title()} metadata is already present')
3790 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
3792 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
3795 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
3798 def _write_description(self
, label
, ie_result
, descfn
):
3799 ''' Write description and returns True = written, False = skip, None = error '''
3800 if not self
.params
.get('writedescription'):
3803 self
.write_debug(f
'Skipping writing {label} description')
3805 elif not self
._ensure
_dir
_exists
(descfn
):
3807 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
3808 self
.to_screen(f
'[info] {label.title()} description is already present')
3809 elif ie_result
.get('description') is None:
3810 self
.report_warning(f
'There\'s no {label} description to write')
3814 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
3815 with open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
3816 descfile
.write(ie_result
['description'])
3818 self
.report_error(f
'Cannot write {label} description file {descfn}')
3822 def _write_subtitles(self
, info_dict
, filename
):
3823 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3825 subtitles
= info_dict
.get('requested_subtitles')
3826 if not subtitles
or not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
3827 # subtitles download errors are already managed as troubles in relevant IE
3828 # that way it will silently go on when used with unsupporting IE
3831 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
3832 if not sub_filename_base
:
3833 self
.to_screen('[info] Skipping writing video subtitles')
3835 for sub_lang
, sub_info
in subtitles
.items():
3836 sub_format
= sub_info
['ext']
3837 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
3838 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
3839 existing_sub
= self
.existing_file((sub_filename_final
, sub_filename
))
3841 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3842 sub_info
['filepath'] = existing_sub
3843 ret
.append((existing_sub
, sub_filename_final
))
3846 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
3847 if sub_info
.get('data') is not None:
3849 # Use newline='' to prevent conversion of newline characters
3850 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3851 with open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
3852 subfile
.write(sub_info
['data'])
3853 sub_info
['filepath'] = sub_filename
3854 ret
.append((sub_filename
, sub_filename_final
))
3857 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
3861 sub_copy
= sub_info
.copy()
3862 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
3863 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
3864 sub_info
['filepath'] = sub_filename
3865 ret
.append((sub_filename
, sub_filename_final
))
3866 except (DownloadError
, ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
3867 msg
= f
'Unable to download video subtitles for {sub_lang!r}: {err}'
3868 if self
.params
.get('ignoreerrors') is not True: # False or 'only_download'
3869 if not self
.params
.get('ignoreerrors'):
3870 self
.report_error(msg
)
3871 raise DownloadError(msg
)
3872 self
.report_warning(msg
)
3875 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
3876 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3877 write_all
= self
.params
.get('write_all_thumbnails', False)
3878 thumbnails
, ret
= [], []
3879 if write_all
or self
.params
.get('writethumbnail', False):
3880 thumbnails
= info_dict
.get('thumbnails') or []
3881 multiple
= write_all
and len(thumbnails
) > 1
3883 if thumb_filename_base
is None:
3884 thumb_filename_base
= filename
3885 if thumbnails
and not thumb_filename_base
:
3886 self
.write_debug(f
'Skipping writing {label} thumbnail')
3889 for idx
, t
in list(enumerate(thumbnails
))[::-1]:
3890 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
3891 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
3892 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
3893 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
3895 existing_thumb
= self
.existing_file((thumb_filename_final
, thumb_filename
))
3897 self
.to_screen('[info] %s is already present' % (
3898 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
3899 t
['filepath'] = existing_thumb
3900 ret
.append((existing_thumb
, thumb_filename_final
))
3902 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
3904 uf
= self
.urlopen(sanitized_Request(t
['url'], headers
=t
.get('http_headers', {})))
3905 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
3906 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
3907 shutil
.copyfileobj(uf
, thumbf
)
3908 ret
.append((thumb_filename
, thumb_filename_final
))
3909 t
['filepath'] = thumb_filename
3910 except network_exceptions
as err
:
3912 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
3913 if ret
and not write_all
: