]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
[cleanup] Consistent style for file heads
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 import collections
2 import contextlib
3 import datetime
4 import errno
5 import fileinput
6 import functools
7 import io
8 import itertools
9 import json
10 import locale
11 import operator
12 import os
13 import platform
14 import random
15 import re
16 import shutil
17 import subprocess
18 import sys
19 import tempfile
20 import time
21 import tokenize
22 import traceback
23 import unicodedata
24 import urllib.request
25 from string import ascii_letters
26
27 from .cache import Cache
28 from .compat import HAS_LEGACY as compat_has_legacy
29 from .compat import compat_os_name, compat_shlex_quote, compat_str
30 from .cookies import load_cookies
31 from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
32 from .downloader.rtmp import rtmpdump_version
33 from .extractor import gen_extractor_classes, get_info_extractor
34 from .extractor.openload import PhantomJSwrapper
35 from .minicurses import format_text
36 from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
37 from .postprocessor import (
38 EmbedThumbnailPP,
39 FFmpegFixupDuplicateMoovPP,
40 FFmpegFixupDurationPP,
41 FFmpegFixupM3u8PP,
42 FFmpegFixupM4aPP,
43 FFmpegFixupStretchedPP,
44 FFmpegFixupTimestampPP,
45 FFmpegMergerPP,
46 FFmpegPostProcessor,
47 MoveFilesAfterDownloadPP,
48 get_postprocessor,
49 )
50 from .update import detect_variant
51 from .utils import (
52 DEFAULT_OUTTMPL,
53 IDENTITY,
54 LINK_TEMPLATES,
55 NO_DEFAULT,
56 NUMBER_RE,
57 OUTTMPL_TYPES,
58 POSTPROCESS_WHEN,
59 STR_FORMAT_RE_TMPL,
60 STR_FORMAT_TYPES,
61 ContentTooShortError,
62 DateRange,
63 DownloadCancelled,
64 DownloadError,
65 EntryNotInPlaylist,
66 ExistingVideoReached,
67 ExtractorError,
68 GeoRestrictedError,
69 HEADRequest,
70 ISO3166Utils,
71 LazyList,
72 MaxDownloadsReached,
73 Namespace,
74 PagedList,
75 PerRequestProxyHandler,
76 PlaylistEntries,
77 Popen,
78 PostProcessingError,
79 ReExtractInfo,
80 RejectedVideoReached,
81 SameFileError,
82 UnavailableVideoError,
83 YoutubeDLCookieProcessor,
84 YoutubeDLHandler,
85 YoutubeDLRedirectHandler,
86 age_restricted,
87 args_to_str,
88 date_from_str,
89 determine_ext,
90 determine_protocol,
91 encode_compat_str,
92 encodeFilename,
93 error_to_compat_str,
94 expand_path,
95 filter_dict,
96 float_or_none,
97 format_bytes,
98 format_decimal_suffix,
99 format_field,
100 formatSeconds,
101 get_domain,
102 int_or_none,
103 iri_to_uri,
104 join_nonempty,
105 locked_file,
106 make_dir,
107 make_HTTPS_handler,
108 merge_headers,
109 network_exceptions,
110 number_of_digits,
111 orderedSet,
112 parse_filesize,
113 platform_name,
114 preferredencoding,
115 prepend_extension,
116 register_socks_protocols,
117 remove_terminal_sequences,
118 render_table,
119 replace_extension,
120 sanitize_filename,
121 sanitize_path,
122 sanitize_url,
123 sanitized_Request,
124 std_headers,
125 str_or_none,
126 strftime_or_none,
127 subtitles_filename,
128 supports_terminal_sequences,
129 timetuple_from_msec,
130 to_high_limit_path,
131 traverse_obj,
132 try_get,
133 url_basename,
134 variadic,
135 version_tuple,
136 windows_enable_vt_mode,
137 write_json_file,
138 write_string,
139 )
140 from .version import RELEASE_GIT_HEAD, __version__
141
142 if compat_os_name == 'nt':
143 import ctypes
144
145
146 class YoutubeDL:
147 """YoutubeDL class.
148
149 YoutubeDL objects are the ones responsible of downloading the
150 actual video file and writing it to disk if the user has requested
151 it, among some other tasks. In most cases there should be one per
152 program. As, given a video URL, the downloader doesn't know how to
153 extract all the needed information, task that InfoExtractors do, it
154 has to pass the URL to one of them.
155
156 For this, YoutubeDL objects have a method that allows
157 InfoExtractors to be registered in a given order. When it is passed
158 a URL, the YoutubeDL object handles it to the first InfoExtractor it
159 finds that reports being able to handle it. The InfoExtractor extracts
160 all the information about the video or videos the URL refers to, and
161 YoutubeDL process the extracted information, possibly using a File
162 Downloader to download the video.
163
164 YoutubeDL objects accept a lot of parameters. In order not to saturate
165 the object constructor with arguments, it receives a dictionary of
166 options instead. These options are available through the params
167 attribute for the InfoExtractors to use. The YoutubeDL also
168 registers itself as the downloader in charge for the InfoExtractors
169 that are added to it, so this is a "mutual registration".
170
171 Available options:
172
173 username: Username for authentication purposes.
174 password: Password for authentication purposes.
175 videopassword: Password for accessing a video.
176 ap_mso: Adobe Pass multiple-system operator identifier.
177 ap_username: Multiple-system operator account username.
178 ap_password: Multiple-system operator account password.
179 usenetrc: Use netrc for authentication instead.
180 verbose: Print additional info to stdout.
181 quiet: Do not print messages to stdout.
182 no_warnings: Do not print out anything for warnings.
183 forceprint: A dict with keys WHEN mapped to a list of templates to
184 print to stdout. The allowed keys are video or any of the
185 items in utils.POSTPROCESS_WHEN.
186 For compatibility, a single list is also accepted
187 print_to_file: A dict with keys WHEN (same as forceprint) mapped to
188 a list of tuples with (template, filename)
189 forcejson: Force printing info_dict as JSON.
190 dump_single_json: Force printing the info_dict of the whole playlist
191 (or video) as a single JSON line.
192 force_write_download_archive: Force writing download archive regardless
193 of 'skip_download' or 'simulate'.
194 simulate: Do not download the video files. If unset (or None),
195 simulate only if listsubtitles, listformats or list_thumbnails is used
196 format: Video format code. see "FORMAT SELECTION" for more details.
197 You can also pass a function. The function takes 'ctx' as
198 argument and returns the formats to download.
199 See "build_format_selector" for an implementation
200 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
201 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
202 extracting metadata even if the video is not actually
203 available for download (experimental)
204 format_sort: A list of fields by which to sort the video formats.
205 See "Sorting Formats" for more details.
206 format_sort_force: Force the given format_sort. see "Sorting Formats"
207 for more details.
208 prefer_free_formats: Whether to prefer video formats with free containers
209 over non-free ones of same quality.
210 allow_multiple_video_streams: Allow multiple video streams to be merged
211 into a single file
212 allow_multiple_audio_streams: Allow multiple audio streams to be merged
213 into a single file
214 check_formats Whether to test if the formats are downloadable.
215 Can be True (check all), False (check none),
216 'selected' (check selected formats),
217 or None (check only if requested by extractor)
218 paths: Dictionary of output paths. The allowed keys are 'home'
219 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
220 outtmpl: Dictionary of templates for output names. Allowed keys
221 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
222 For compatibility with youtube-dl, a single string can also be used
223 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
224 restrictfilenames: Do not allow "&" and spaces in file names
225 trim_file_name: Limit length of filename (extension excluded)
226 windowsfilenames: Force the filenames to be windows compatible
227 ignoreerrors: Do not stop on download/postprocessing errors.
228 Can be 'only_download' to ignore only download errors.
229 Default is 'only_download' for CLI, but False for API
230 skip_playlist_after_errors: Number of allowed failures until the rest of
231 the playlist is skipped
232 force_generic_extractor: Force downloader to use the generic extractor
233 overwrites: Overwrite all video and metadata files if True,
234 overwrite only non-video files if None
235 and don't overwrite any file if False
236 For compatibility with youtube-dl,
237 "nooverwrites" may also be used instead
238 playlist_items: Specific indices of playlist to download.
239 playlistrandom: Download playlist items in random order.
240 lazy_playlist: Process playlist entries as they are received.
241 matchtitle: Download only matching titles.
242 rejecttitle: Reject downloads for matching titles.
243 logger: Log messages to a logging.Logger instance.
244 logtostderr: Log messages to stderr instead of stdout.
245 consoletitle: Display progress in console window's titlebar.
246 writedescription: Write the video description to a .description file
247 writeinfojson: Write the video description to a .info.json file
248 clean_infojson: Remove private fields from the infojson
249 getcomments: Extract video comments. This will not be written to disk
250 unless writeinfojson is also given
251 writeannotations: Write the video annotations to a .annotations.xml file
252 writethumbnail: Write the thumbnail image to a file
253 allow_playlist_files: Whether to write playlists' description, infojson etc
254 also to disk when using the 'write*' options
255 write_all_thumbnails: Write all thumbnail formats to files
256 writelink: Write an internet shortcut file, depending on the
257 current platform (.url/.webloc/.desktop)
258 writeurllink: Write a Windows internet shortcut file (.url)
259 writewebloclink: Write a macOS internet shortcut file (.webloc)
260 writedesktoplink: Write a Linux internet shortcut file (.desktop)
261 writesubtitles: Write the video subtitles to a file
262 writeautomaticsub: Write the automatically generated subtitles to a file
263 listsubtitles: Lists all available subtitles for the video
264 subtitlesformat: The format code for subtitles
265 subtitleslangs: List of languages of the subtitles to download (can be regex).
266 The list may contain "all" to refer to all the available
267 subtitles. The language can be prefixed with a "-" to
268 exclude it from the requested languages. Eg: ['all', '-live_chat']
269 keepvideo: Keep the video file after post-processing
270 daterange: A DateRange object, download only if the upload_date is in the range.
271 skip_download: Skip the actual download of the video file
272 cachedir: Location of the cache files in the filesystem.
273 False to disable filesystem cache.
274 noplaylist: Download single video instead of a playlist if in doubt.
275 age_limit: An integer representing the user's age in years.
276 Unsuitable videos for the given age are skipped.
277 min_views: An integer representing the minimum view count the video
278 must have in order to not be skipped.
279 Videos without view count information are always
280 downloaded. None for no limit.
281 max_views: An integer representing the maximum view count.
282 Videos that are more popular than that are not
283 downloaded.
284 Videos without view count information are always
285 downloaded. None for no limit.
286 download_archive: File name of a file where all downloads are recorded.
287 Videos already present in the file are not downloaded
288 again.
289 break_on_existing: Stop the download process after attempting to download a
290 file that is in the archive.
291 break_on_reject: Stop the download process when encountering a video that
292 has been filtered out.
293 break_per_url: Whether break_on_reject and break_on_existing
294 should act on each input URL as opposed to for the entire queue
295 cookiefile: File name or text stream from where cookies should be read and dumped to
296 cookiesfrombrowser: A tuple containing the name of the browser, the profile
297 name/pathfrom where cookies are loaded, and the name of the
298 keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
299 legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
300 support RFC 5746 secure renegotiation
301 nocheckcertificate: Do not verify SSL certificates
302 client_certificate: Path to client certificate file in PEM format. May include the private key
303 client_certificate_key: Path to private key file for client certificate
304 client_certificate_password: Password for client certificate private key, if encrypted.
305 If not provided and the key is encrypted, yt-dlp will ask interactively
306 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
307 At the moment, this is only supported by YouTube.
308 http_headers: A dictionary of custom headers to be used for all requests
309 proxy: URL of the proxy server to use
310 geo_verification_proxy: URL of the proxy to use for IP address verification
311 on geo-restricted sites.
312 socket_timeout: Time to wait for unresponsive hosts, in seconds
313 bidi_workaround: Work around buggy terminals without bidirectional text
314 support, using fridibi
315 debug_printtraffic:Print out sent and received HTTP traffic
316 default_search: Prepend this string if an input url is not valid.
317 'auto' for elaborate guessing
318 encoding: Use this encoding instead of the system-specified.
319 extract_flat: Do not resolve URLs, return the immediate result.
320 Pass in 'in_playlist' to only show this behavior for
321 playlist items.
322 wait_for_video: If given, wait for scheduled streams to become available.
323 The value should be a tuple containing the range
324 (min_secs, max_secs) to wait between retries
325 postprocessors: A list of dictionaries, each with an entry
326 * key: The name of the postprocessor. See
327 yt_dlp/postprocessor/__init__.py for a list.
328 * when: When to run the postprocessor. Allowed values are
329 the entries of utils.POSTPROCESS_WHEN
330 Assumed to be 'post_process' if not given
331 progress_hooks: A list of functions that get called on download
332 progress, with a dictionary with the entries
333 * status: One of "downloading", "error", or "finished".
334 Check this first and ignore unknown values.
335 * info_dict: The extracted info_dict
336
337 If status is one of "downloading", or "finished", the
338 following properties may also be present:
339 * filename: The final filename (always present)
340 * tmpfilename: The filename we're currently writing to
341 * downloaded_bytes: Bytes on disk
342 * total_bytes: Size of the whole file, None if unknown
343 * total_bytes_estimate: Guess of the eventual file size,
344 None if unavailable.
345 * elapsed: The number of seconds since download started.
346 * eta: The estimated time in seconds, None if unknown
347 * speed: The download speed in bytes/second, None if
348 unknown
349 * fragment_index: The counter of the currently
350 downloaded video fragment.
351 * fragment_count: The number of fragments (= individual
352 files that will be merged)
353
354 Progress hooks are guaranteed to be called at least once
355 (with status "finished") if the download is successful.
356 postprocessor_hooks: A list of functions that get called on postprocessing
357 progress, with a dictionary with the entries
358 * status: One of "started", "processing", or "finished".
359 Check this first and ignore unknown values.
360 * postprocessor: Name of the postprocessor
361 * info_dict: The extracted info_dict
362
363 Progress hooks are guaranteed to be called at least twice
364 (with status "started" and "finished") if the processing is successful.
365 merge_output_format: Extension to use when merging formats.
366 final_ext: Expected final extension; used to detect when the file was
367 already downloaded and converted
368 fixup: Automatically correct known faults of the file.
369 One of:
370 - "never": do nothing
371 - "warn": only emit a warning
372 - "detect_or_warn": check whether we can do anything
373 about it, warn otherwise (default)
374 source_address: Client-side IP address to bind to.
375 sleep_interval_requests: Number of seconds to sleep between requests
376 during extraction
377 sleep_interval: Number of seconds to sleep before each download when
378 used alone or a lower bound of a range for randomized
379 sleep before each download (minimum possible number
380 of seconds to sleep) when used along with
381 max_sleep_interval.
382 max_sleep_interval:Upper bound of a range for randomized sleep before each
383 download (maximum possible number of seconds to sleep).
384 Must only be used along with sleep_interval.
385 Actual sleep time will be a random float from range
386 [sleep_interval; max_sleep_interval].
387 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
388 listformats: Print an overview of available video formats and exit.
389 list_thumbnails: Print a table of all thumbnails and exit.
390 match_filter: A function that gets called for every video with the signature
391 (info_dict, *, incomplete: bool) -> Optional[str]
392 For backward compatibility with youtube-dl, the signature
393 (info_dict) -> Optional[str] is also allowed.
394 - If it returns a message, the video is ignored.
395 - If it returns None, the video is downloaded.
396 - If it returns utils.NO_DEFAULT, the user is interactively
397 asked whether to download the video.
398 match_filter_func in utils.py is one example for this.
399 no_color: Do not emit color codes in output.
400 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
401 HTTP header
402 geo_bypass_country:
403 Two-letter ISO 3166-2 country code that will be used for
404 explicit geographic restriction bypassing via faking
405 X-Forwarded-For HTTP header
406 geo_bypass_ip_block:
407 IP range in CIDR notation that will be used similarly to
408 geo_bypass_country
409 external_downloader: A dictionary of protocol keys and the executable of the
410 external downloader to use for it. The allowed protocols
411 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
412 Set the value to 'native' to use the native downloader
413 compat_opts: Compatibility options. See "Differences in default behavior".
414 The following options do not work when used through the API:
415 filename, abort-on-error, multistreams, no-live-chat, format-sort
416 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
417 Refer __init__.py for their implementation
418 progress_template: Dictionary of templates for progress outputs.
419 Allowed keys are 'download', 'postprocess',
420 'download-title' (console title) and 'postprocess-title'.
421 The template is mapped on a dictionary with keys 'progress' and 'info'
422 retry_sleep_functions: Dictionary of functions that takes the number of attempts
423 as argument and returns the time to sleep in seconds.
424 Allowed keys are 'http', 'fragment', 'file_access'
425 download_ranges: A function that gets called for every video with the signature
426 (info_dict, *, ydl) -> Iterable[Section].
427 Only the returned sections will be downloaded. Each Section contains:
428 * start_time: Start time of the section in seconds
429 * end_time: End time of the section in seconds
430 * title: Section title (Optional)
431 * index: Section number (Optional)
432
433 The following parameters are not used by YoutubeDL itself, they are used by
434 the downloader (see yt_dlp/downloader/common.py):
435 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
436 max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
437 continuedl, noprogress, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
438 external_downloader_args, concurrent_fragment_downloads.
439
440 The following options are used by the post processors:
441 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
442 to the binary or its containing directory.
443 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
444 and a list of additional command-line arguments for the
445 postprocessor/executable. The dict can also have "PP+EXE" keys
446 which are used when the given exe is used by the given PP.
447 Use 'default' as the name for arguments to passed to all PP
448 For compatibility with youtube-dl, a single list of args
449 can also be used
450
451 The following options are used by the extractors:
452 extractor_retries: Number of times to retry for known errors
453 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
454 hls_split_discontinuity: Split HLS playlists to different formats at
455 discontinuities such as ad breaks (default: False)
456 extractor_args: A dictionary of arguments to be passed to the extractors.
457 See "EXTRACTOR ARGUMENTS" for details.
458 Eg: {'youtube': {'skip': ['dash', 'hls']}}
459 mark_watched: Mark videos watched (even with --simulate). Only for YouTube
460
461 The following options are deprecated and may be removed in the future:
462
463 playliststart: - Use playlist_items
464 Playlist item to start at.
465 playlistend: - Use playlist_items
466 Playlist item to end at.
467 playlistreverse: - Use playlist_items
468 Download playlist items in reverse order.
469 forceurl: - Use forceprint
470 Force printing final URL.
471 forcetitle: - Use forceprint
472 Force printing title.
473 forceid: - Use forceprint
474 Force printing ID.
475 forcethumbnail: - Use forceprint
476 Force printing thumbnail URL.
477 forcedescription: - Use forceprint
478 Force printing description.
479 forcefilename: - Use forceprint
480 Force printing final filename.
481 forceduration: - Use forceprint
482 Force printing duration.
483 allsubtitles: - Use subtitleslangs = ['all']
484 Downloads all the subtitles of the video
485 (requires writesubtitles or writeautomaticsub)
486 include_ads: - Doesn't work
487 Download ads as well
488 call_home: - Not implemented
489 Boolean, true iff we are allowed to contact the
490 yt-dlp servers for debugging.
491 post_hooks: - Register a custom postprocessor
492 A list of functions that get called as the final step
493 for each video file, after all postprocessors have been
494 called. The filename will be passed as the only argument.
495 hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
496 Use the native HLS downloader instead of ffmpeg/avconv
497 if True, otherwise use ffmpeg/avconv if False, otherwise
498 use downloader suggested by extractor if None.
499 prefer_ffmpeg: - avconv support is deprecated
500 If False, use avconv instead of ffmpeg if both are available,
501 otherwise prefer ffmpeg.
502 youtube_include_dash_manifest: - Use extractor_args
503 If True (default), DASH manifests and related
504 data will be downloaded and processed by extractor.
505 You can reduce network I/O by disabling it if you don't
506 care about DASH. (only for youtube)
507 youtube_include_hls_manifest: - Use extractor_args
508 If True (default), HLS manifests and related
509 data will be downloaded and processed by extractor.
510 You can reduce network I/O by disabling it if you don't
511 care about HLS. (only for youtube)
512 """
513
514 _NUMERIC_FIELDS = {
515 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
516 'timestamp', 'release_timestamp',
517 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
518 'average_rating', 'comment_count', 'age_limit',
519 'start_time', 'end_time',
520 'chapter_number', 'season_number', 'episode_number',
521 'track_number', 'disc_number', 'release_year',
522 }
523
524 _format_fields = {
525 # NB: Keep in sync with the docstring of extractor/common.py
526 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
527 'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
528 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
529 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
530 'preference', 'language', 'language_preference', 'quality', 'source_preference',
531 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
532 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
533 }
534 _format_selection_exts = {
535 'audio': {'m4a', 'mp3', 'ogg', 'aac'},
536 'video': {'mp4', 'flv', 'webm', '3gp'},
537 'storyboards': {'mhtml'},
538 }
539
540 def __init__(self, params=None, auto_init=True):
541 """Create a FileDownloader object with the given options.
542 @param auto_init Whether to load the default extractors and print header (if verbose).
543 Set to 'no_verbose_header' to not print the header
544 """
545 if params is None:
546 params = {}
547 self.params = params
548 self._ies = {}
549 self._ies_instances = {}
550 self._pps = {k: [] for k in POSTPROCESS_WHEN}
551 self._printed_messages = set()
552 self._first_webpage_request = True
553 self._post_hooks = []
554 self._progress_hooks = []
555 self._postprocessor_hooks = []
556 self._download_retcode = 0
557 self._num_downloads = 0
558 self._num_videos = 0
559 self._playlist_level = 0
560 self._playlist_urls = set()
561 self.cache = Cache(self)
562
563 windows_enable_vt_mode()
564 stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
565 self._out_files = Namespace(
566 out=stdout,
567 error=sys.stderr,
568 screen=sys.stderr if self.params.get('quiet') else stdout,
569 console=None if compat_os_name == 'nt' else next(
570 filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
571 )
572 self._allow_colors = Namespace(**{
573 type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
574 for type_, stream in self._out_files.items_ if type_ != 'console'
575 })
576
577 MIN_SUPPORTED, MIN_RECOMMENDED = (3, 6), (3, 7)
578 current_version = sys.version_info[:2]
579 if current_version < MIN_RECOMMENDED:
580 msg = 'Support for Python version %d.%d has been deprecated and will break in future versions of yt-dlp'
581 if current_version < MIN_SUPPORTED:
582 msg = 'Python version %d.%d is no longer supported'
583 self.deprecation_warning(
584 f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED))
585
586 if self.params.get('allow_unplayable_formats'):
587 self.report_warning(
588 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
589 'This is a developer option intended for debugging. \n'
590 ' If you experience any issues while using this option, '
591 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
592
593 def check_deprecated(param, option, suggestion):
594 if self.params.get(param) is not None:
595 self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
596 return True
597 return False
598
599 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
600 if self.params.get('geo_verification_proxy') is None:
601 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
602
603 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
604 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
605 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
606
607 for msg in self.params.get('_warnings', []):
608 self.report_warning(msg)
609 for msg in self.params.get('_deprecation_warnings', []):
610 self.deprecation_warning(msg)
611
612 self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
613 if not compat_has_legacy:
614 self.params['compat_opts'].add('no-compat-legacy')
615 if 'list-formats' in self.params['compat_opts']:
616 self.params['listformats_table'] = False
617
618 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
619 # nooverwrites was unnecessarily changed to overwrites
620 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
621 # This ensures compatibility with both keys
622 self.params['overwrites'] = not self.params['nooverwrites']
623 elif self.params.get('overwrites') is None:
624 self.params.pop('overwrites', None)
625 else:
626 self.params['nooverwrites'] = not self.params['overwrites']
627
628 self.params.setdefault('forceprint', {})
629 self.params.setdefault('print_to_file', {})
630
631 # Compatibility with older syntax
632 if not isinstance(params['forceprint'], dict):
633 self.params['forceprint'] = {'video': params['forceprint']}
634
635 if self.params.get('bidi_workaround', False):
636 try:
637 import pty
638 master, slave = pty.openpty()
639 width = shutil.get_terminal_size().columns
640 width_args = [] if width is None else ['-w', str(width)]
641 sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
642 try:
643 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
644 except OSError:
645 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
646 self._output_channel = os.fdopen(master, 'rb')
647 except OSError as ose:
648 if ose.errno == errno.ENOENT:
649 self.report_warning(
650 'Could not find fribidi executable, ignoring --bidi-workaround. '
651 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
652 else:
653 raise
654
655 if auto_init:
656 if auto_init != 'no_verbose_header':
657 self.print_debug_header()
658 self.add_default_info_extractors()
659
660 if (sys.platform != 'win32'
661 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
662 and not self.params.get('restrictfilenames', False)):
663 # Unicode filesystem API will throw errors (#1474, #13027)
664 self.report_warning(
665 'Assuming --restrict-filenames since file system encoding '
666 'cannot encode all characters. '
667 'Set the LC_ALL environment variable to fix this.')
668 self.params['restrictfilenames'] = True
669
670 self._parse_outtmpl()
671
672 # Creating format selector here allows us to catch syntax errors before the extraction
673 self.format_selector = (
674 self.params.get('format') if self.params.get('format') in (None, '-')
675 else self.params['format'] if callable(self.params['format'])
676 else self.build_format_selector(self.params['format']))
677
678 # Set http_headers defaults according to std_headers
679 self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
680
681 hooks = {
682 'post_hooks': self.add_post_hook,
683 'progress_hooks': self.add_progress_hook,
684 'postprocessor_hooks': self.add_postprocessor_hook,
685 }
686 for opt, fn in hooks.items():
687 for ph in self.params.get(opt, []):
688 fn(ph)
689
690 for pp_def_raw in self.params.get('postprocessors', []):
691 pp_def = dict(pp_def_raw)
692 when = pp_def.pop('when', 'post_process')
693 self.add_post_processor(
694 get_postprocessor(pp_def.pop('key'))(self, **pp_def),
695 when=when)
696
697 self._setup_opener()
698 register_socks_protocols()
699
700 def preload_download_archive(fn):
701 """Preload the archive, if any is specified"""
702 if fn is None:
703 return False
704 self.write_debug(f'Loading archive file {fn!r}')
705 try:
706 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
707 for line in archive_file:
708 self.archive.add(line.strip())
709 except OSError as ioe:
710 if ioe.errno != errno.ENOENT:
711 raise
712 return False
713 return True
714
715 self.archive = set()
716 preload_download_archive(self.params.get('download_archive'))
717
718 def warn_if_short_id(self, argv):
719 # short YouTube ID starting with dash?
720 idxs = [
721 i for i, a in enumerate(argv)
722 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
723 if idxs:
724 correct_argv = (
725 ['yt-dlp']
726 + [a for i, a in enumerate(argv) if i not in idxs]
727 + ['--'] + [argv[i] for i in idxs]
728 )
729 self.report_warning(
730 'Long argument string detected. '
731 'Use -- to separate parameters and URLs, like this:\n%s' %
732 args_to_str(correct_argv))
733
734 def add_info_extractor(self, ie):
735 """Add an InfoExtractor object to the end of the list."""
736 ie_key = ie.ie_key()
737 self._ies[ie_key] = ie
738 if not isinstance(ie, type):
739 self._ies_instances[ie_key] = ie
740 ie.set_downloader(self)
741
742 def _get_info_extractor_class(self, ie_key):
743 ie = self._ies.get(ie_key)
744 if ie is None:
745 ie = get_info_extractor(ie_key)
746 self.add_info_extractor(ie)
747 return ie
748
749 def get_info_extractor(self, ie_key):
750 """
751 Get an instance of an IE with name ie_key, it will try to get one from
752 the _ies list, if there's no instance it will create a new one and add
753 it to the extractor list.
754 """
755 ie = self._ies_instances.get(ie_key)
756 if ie is None:
757 ie = get_info_extractor(ie_key)()
758 self.add_info_extractor(ie)
759 return ie
760
761 def add_default_info_extractors(self):
762 """
763 Add the InfoExtractors returned by gen_extractors to the end of the list
764 """
765 for ie in gen_extractor_classes():
766 self.add_info_extractor(ie)
767
768 def add_post_processor(self, pp, when='post_process'):
769 """Add a PostProcessor object to the end of the chain."""
770 assert when in POSTPROCESS_WHEN, f'Invalid when={when}'
771 self._pps[when].append(pp)
772 pp.set_downloader(self)
773
774 def add_post_hook(self, ph):
775 """Add the post hook"""
776 self._post_hooks.append(ph)
777
778 def add_progress_hook(self, ph):
779 """Add the download progress hook"""
780 self._progress_hooks.append(ph)
781
782 def add_postprocessor_hook(self, ph):
783 """Add the postprocessing progress hook"""
784 self._postprocessor_hooks.append(ph)
785 for pps in self._pps.values():
786 for pp in pps:
787 pp.add_progress_hook(ph)
788
789 def _bidi_workaround(self, message):
790 if not hasattr(self, '_output_channel'):
791 return message
792
793 assert hasattr(self, '_output_process')
794 assert isinstance(message, compat_str)
795 line_count = message.count('\n') + 1
796 self._output_process.stdin.write((message + '\n').encode())
797 self._output_process.stdin.flush()
798 res = ''.join(self._output_channel.readline().decode()
799 for _ in range(line_count))
800 return res[:-len('\n')]
801
802 def _write_string(self, message, out=None, only_once=False):
803 if only_once:
804 if message in self._printed_messages:
805 return
806 self._printed_messages.add(message)
807 write_string(message, out=out, encoding=self.params.get('encoding'))
808
809 def to_stdout(self, message, skip_eol=False, quiet=None):
810 """Print message to stdout"""
811 if quiet is not None:
812 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
813 if skip_eol is not False:
814 self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
815 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
816
817 def to_screen(self, message, skip_eol=False, quiet=None):
818 """Print message to screen if not in quiet mode"""
819 if self.params.get('logger'):
820 self.params['logger'].debug(message)
821 return
822 if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
823 return
824 self._write_string(
825 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
826 self._out_files.screen)
827
828 def to_stderr(self, message, only_once=False):
829 """Print message to stderr"""
830 assert isinstance(message, compat_str)
831 if self.params.get('logger'):
832 self.params['logger'].error(message)
833 else:
834 self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
835
836 def _send_console_code(self, code):
837 if compat_os_name == 'nt' or not self._out_files.console:
838 return
839 self._write_string(code, self._out_files.console)
840
841 def to_console_title(self, message):
842 if not self.params.get('consoletitle', False):
843 return
844 message = remove_terminal_sequences(message)
845 if compat_os_name == 'nt':
846 if ctypes.windll.kernel32.GetConsoleWindow():
847 # c_wchar_p() might not be necessary if `message` is
848 # already of type unicode()
849 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
850 else:
851 self._send_console_code(f'\033]0;{message}\007')
852
853 def save_console_title(self):
854 if not self.params.get('consoletitle') or self.params.get('simulate'):
855 return
856 self._send_console_code('\033[22;0t') # Save the title on stack
857
858 def restore_console_title(self):
859 if not self.params.get('consoletitle') or self.params.get('simulate'):
860 return
861 self._send_console_code('\033[23;0t') # Restore the title from stack
862
863 def __enter__(self):
864 self.save_console_title()
865 return self
866
867 def __exit__(self, *args):
868 self.restore_console_title()
869
870 if self.params.get('cookiefile') is not None:
871 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
872
873 def trouble(self, message=None, tb=None, is_error=True):
874 """Determine action to take when a download problem appears.
875
876 Depending on if the downloader has been configured to ignore
877 download errors or not, this method may throw an exception or
878 not when errors are found, after printing the message.
879
880 @param tb If given, is additional traceback information
881 @param is_error Whether to raise error according to ignorerrors
882 """
883 if message is not None:
884 self.to_stderr(message)
885 if self.params.get('verbose'):
886 if tb is None:
887 if sys.exc_info()[0]: # if .trouble has been called from an except block
888 tb = ''
889 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
890 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
891 tb += encode_compat_str(traceback.format_exc())
892 else:
893 tb_data = traceback.format_list(traceback.extract_stack())
894 tb = ''.join(tb_data)
895 if tb:
896 self.to_stderr(tb)
897 if not is_error:
898 return
899 if not self.params.get('ignoreerrors'):
900 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
901 exc_info = sys.exc_info()[1].exc_info
902 else:
903 exc_info = sys.exc_info()
904 raise DownloadError(message, exc_info)
905 self._download_retcode = 1
906
907 Styles = Namespace(
908 HEADERS='yellow',
909 EMPHASIS='light blue',
910 FILENAME='green',
911 ID='green',
912 DELIM='blue',
913 ERROR='red',
914 WARNING='yellow',
915 SUPPRESS='light black',
916 )
917
918 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
919 text = str(text)
920 if test_encoding:
921 original_text = text
922 # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
923 encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
924 text = text.encode(encoding, 'ignore').decode(encoding)
925 if fallback is not None and text != original_text:
926 text = fallback
927 return format_text(text, f) if allow_colors else text if fallback is None else fallback
928
929 def _format_out(self, *args, **kwargs):
930 return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
931
932 def _format_screen(self, *args, **kwargs):
933 return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
934
935 def _format_err(self, *args, **kwargs):
936 return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
937
938 def report_warning(self, message, only_once=False):
939 '''
940 Print the message to stderr, it will be prefixed with 'WARNING:'
941 If stderr is a tty file the 'WARNING:' will be colored
942 '''
943 if self.params.get('logger') is not None:
944 self.params['logger'].warning(message)
945 else:
946 if self.params.get('no_warnings'):
947 return
948 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
949
950 def deprecation_warning(self, message):
951 if self.params.get('logger') is not None:
952 self.params['logger'].warning(f'DeprecationWarning: {message}')
953 else:
954 self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
955
956 def report_error(self, message, *args, **kwargs):
957 '''
958 Do the same as trouble, but prefixes the message with 'ERROR:', colored
959 in red if stderr is a tty file.
960 '''
961 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
962
963 def write_debug(self, message, only_once=False):
964 '''Log debug message or Print message to stderr'''
965 if not self.params.get('verbose', False):
966 return
967 message = f'[debug] {message}'
968 if self.params.get('logger'):
969 self.params['logger'].debug(message)
970 else:
971 self.to_stderr(message, only_once)
972
973 def report_file_already_downloaded(self, file_name):
974 """Report file has already been fully downloaded."""
975 try:
976 self.to_screen('[download] %s has already been downloaded' % file_name)
977 except UnicodeEncodeError:
978 self.to_screen('[download] The file has already been downloaded')
979
980 def report_file_delete(self, file_name):
981 """Report that existing file will be deleted."""
982 try:
983 self.to_screen('Deleting existing file %s' % file_name)
984 except UnicodeEncodeError:
985 self.to_screen('Deleting existing file')
986
987 def raise_no_formats(self, info, forced=False, *, msg=None):
988 has_drm = info.get('_has_drm')
989 ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
990 msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
991 if forced or not ignored:
992 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
993 expected=has_drm or ignored or expected)
994 else:
995 self.report_warning(msg)
996
997 def parse_outtmpl(self):
998 self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version')
999 self._parse_outtmpl()
1000 return self.params['outtmpl']
1001
1002 def _parse_outtmpl(self):
1003 sanitize = IDENTITY
1004 if self.params.get('restrictfilenames'): # Remove spaces in the default template
1005 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
1006
1007 outtmpl = self.params.setdefault('outtmpl', {})
1008 if not isinstance(outtmpl, dict):
1009 self.params['outtmpl'] = outtmpl = {'default': outtmpl}
1010 outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None})
1011
1012 def get_output_path(self, dir_type='', filename=None):
1013 paths = self.params.get('paths', {})
1014 assert isinstance(paths, dict)
1015 path = os.path.join(
1016 expand_path(paths.get('home', '').strip()),
1017 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
1018 filename or '')
1019 return sanitize_path(path, force=self.params.get('windowsfilenames'))
1020
1021 @staticmethod
1022 def _outtmpl_expandpath(outtmpl):
1023 # expand_path translates '%%' into '%' and '$$' into '$'
1024 # correspondingly that is not what we want since we need to keep
1025 # '%%' intact for template dict substitution step. Working around
1026 # with boundary-alike separator hack.
1027 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
1028 outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
1029
1030 # outtmpl should be expand_path'ed before template dict substitution
1031 # because meta fields may contain env variables we don't want to
1032 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
1033 # title "Hello $PATH", we don't want `$PATH` to be expanded.
1034 return expand_path(outtmpl).replace(sep, '')
1035
1036 @staticmethod
1037 def escape_outtmpl(outtmpl):
1038 ''' Escape any remaining strings like %s, %abc% etc. '''
1039 return re.sub(
1040 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
1041 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
1042 outtmpl)
1043
1044 @classmethod
1045 def validate_outtmpl(cls, outtmpl):
1046 ''' @return None or Exception object '''
1047 outtmpl = re.sub(
1048 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljqBUDS]'),
1049 lambda mobj: f'{mobj.group(0)[:-1]}s',
1050 cls._outtmpl_expandpath(outtmpl))
1051 try:
1052 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
1053 return None
1054 except ValueError as err:
1055 return err
1056
1057 @staticmethod
1058 def _copy_infodict(info_dict):
1059 info_dict = dict(info_dict)
1060 info_dict.pop('__postprocessors', None)
1061 info_dict.pop('__pending_error', None)
1062 return info_dict
1063
1064 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
1065 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
1066 @param sanitize Whether to sanitize the output as a filename.
1067 For backward compatibility, a function can also be passed
1068 """
1069
1070 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
1071
1072 info_dict = self._copy_infodict(info_dict)
1073 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1074 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
1075 if info_dict.get('duration', None) is not None
1076 else None)
1077 info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
1078 info_dict['video_autonumber'] = self._num_videos
1079 if info_dict.get('resolution') is None:
1080 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
1081
1082 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1083 # of %(field)s to %(field)0Nd for backward compatibility
1084 field_size_compat_map = {
1085 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
1086 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
1087 'autonumber': self.params.get('autonumber_size') or 5,
1088 }
1089
1090 TMPL_DICT = {}
1091 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljqBUDS]'))
1092 MATH_FUNCTIONS = {
1093 '+': float.__add__,
1094 '-': float.__sub__,
1095 }
1096 # Field is of the form key1.key2...
1097 # where keys (except first) can be string, int or slice
1098 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
1099 MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
1100 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1101 INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
1102 (?P<negate>-)?
1103 (?P<fields>{FIELD_RE})
1104 (?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
1105 (?:>(?P<strf_format>.+?))?
1106 (?P<remaining>
1107 (?P<alternate>(?<!\\),[^|&)]+)?
1108 (?:&(?P<replacement>.*?))?
1109 (?:\|(?P<default>.*?))?
1110 )$''')
1111
1112 def _traverse_infodict(k):
1113 k = k.split('.')
1114 if k[0] == '':
1115 k.pop(0)
1116 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
1117
1118 def get_value(mdict):
1119 # Object traversal
1120 value = _traverse_infodict(mdict['fields'])
1121 # Negative
1122 if mdict['negate']:
1123 value = float_or_none(value)
1124 if value is not None:
1125 value *= -1
1126 # Do maths
1127 offset_key = mdict['maths']
1128 if offset_key:
1129 value = float_or_none(value)
1130 operator = None
1131 while offset_key:
1132 item = re.match(
1133 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1134 offset_key).group(0)
1135 offset_key = offset_key[len(item):]
1136 if operator is None:
1137 operator = MATH_FUNCTIONS[item]
1138 continue
1139 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1140 offset = float_or_none(item)
1141 if offset is None:
1142 offset = float_or_none(_traverse_infodict(item))
1143 try:
1144 value = operator(value, multiplier * offset)
1145 except (TypeError, ZeroDivisionError):
1146 return None
1147 operator = None
1148 # Datetime formatting
1149 if mdict['strf_format']:
1150 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
1151
1152 return value
1153
1154 na = self.params.get('outtmpl_na_placeholder', 'NA')
1155
1156 def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
1157 return sanitize_filename(str(value), restricted=restricted, is_id=(
1158 bool(re.search(r'(^|[_.])id(\.|$)', key))
1159 if 'filename-sanitization' in self.params['compat_opts']
1160 else NO_DEFAULT))
1161
1162 sanitizer = sanitize if callable(sanitize) else filename_sanitizer
1163 sanitize = bool(sanitize)
1164
1165 def _dumpjson_default(obj):
1166 if isinstance(obj, (set, LazyList)):
1167 return list(obj)
1168 return repr(obj)
1169
1170 def create_key(outer_mobj):
1171 if not outer_mobj.group('has_key'):
1172 return outer_mobj.group(0)
1173 key = outer_mobj.group('key')
1174 mobj = re.match(INTERNAL_FORMAT_RE, key)
1175 initial_field = mobj.group('fields') if mobj else ''
1176 value, replacement, default = None, None, na
1177 while mobj:
1178 mobj = mobj.groupdict()
1179 default = mobj['default'] if mobj['default'] is not None else default
1180 value = get_value(mobj)
1181 replacement = mobj['replacement']
1182 if value is None and mobj['alternate']:
1183 mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
1184 else:
1185 break
1186
1187 fmt = outer_mobj.group('format')
1188 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1189 fmt = f'0{field_size_compat_map[key]:d}d'
1190
1191 value = default if value is None else value if replacement is None else replacement
1192
1193 flags = outer_mobj.group('conversion') or ''
1194 str_fmt = f'{fmt[:-1]}s'
1195 if fmt[-1] == 'l': # list
1196 delim = '\n' if '#' in flags else ', '
1197 value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
1198 elif fmt[-1] == 'j': # json
1199 value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
1200 elif fmt[-1] == 'q': # quoted
1201 value = map(str, variadic(value) if '#' in flags else [value])
1202 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
1203 elif fmt[-1] == 'B': # bytes
1204 value = f'%{str_fmt}'.encode() % str(value).encode()
1205 value, fmt = value.decode('utf-8', 'ignore'), 's'
1206 elif fmt[-1] == 'U': # unicode normalized
1207 value, fmt = unicodedata.normalize(
1208 # "+" = compatibility equivalence, "#" = NFD
1209 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
1210 value), str_fmt
1211 elif fmt[-1] == 'D': # decimal suffix
1212 num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
1213 value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
1214 factor=1024 if '#' in flags else 1000)
1215 elif fmt[-1] == 'S': # filename sanitization
1216 value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
1217 elif fmt[-1] == 'c':
1218 if value:
1219 value = str(value)[0]
1220 else:
1221 fmt = str_fmt
1222 elif fmt[-1] not in 'rs': # numeric
1223 value = float_or_none(value)
1224 if value is None:
1225 value, fmt = default, 's'
1226
1227 if sanitize:
1228 if fmt[-1] == 'r':
1229 # If value is an object, sanitize might convert it to a string
1230 # So we convert it to repr first
1231 value, fmt = repr(value), str_fmt
1232 if fmt[-1] in 'csr':
1233 value = sanitizer(initial_field, value)
1234
1235 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
1236 TMPL_DICT[key] = value
1237 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
1238
1239 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
1240
1241 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1242 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1243 return self.escape_outtmpl(outtmpl) % info_dict
1244
1245 def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
1246 assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
1247 if outtmpl is None:
1248 outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default'])
1249 try:
1250 outtmpl = self._outtmpl_expandpath(outtmpl)
1251 filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
1252 if not filename:
1253 return None
1254
1255 if tmpl_type in ('', 'temp'):
1256 final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
1257 if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
1258 filename = replace_extension(filename, ext, final_ext)
1259 elif tmpl_type:
1260 force_ext = OUTTMPL_TYPES[tmpl_type]
1261 if force_ext:
1262 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
1263
1264 # https://github.com/blackjack4494/youtube-dlc/issues/85
1265 trim_file_name = self.params.get('trim_file_name', False)
1266 if trim_file_name:
1267 no_ext, *ext = filename.rsplit('.', 2)
1268 filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
1269
1270 return filename
1271 except ValueError as err:
1272 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
1273 return None
1274
1275 def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
1276 """Generate the output filename"""
1277 if outtmpl:
1278 assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
1279 dir_type = None
1280 filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
1281 if not filename and dir_type not in ('', 'temp'):
1282 return ''
1283
1284 if warn:
1285 if not self.params.get('paths'):
1286 pass
1287 elif filename == '-':
1288 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
1289 elif os.path.isabs(filename):
1290 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
1291 if filename == '-' or not filename:
1292 return filename
1293
1294 return self.get_output_path(dir_type, filename)
1295
1296 def _match_entry(self, info_dict, incomplete=False, silent=False):
1297 """ Returns None if the file should be downloaded """
1298
1299 video_title = info_dict.get('title', info_dict.get('id', 'video'))
1300
1301 def check_filter():
1302 if 'title' in info_dict:
1303 # This can happen when we're just evaluating the playlist
1304 title = info_dict['title']
1305 matchtitle = self.params.get('matchtitle', False)
1306 if matchtitle:
1307 if not re.search(matchtitle, title, re.IGNORECASE):
1308 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1309 rejecttitle = self.params.get('rejecttitle', False)
1310 if rejecttitle:
1311 if re.search(rejecttitle, title, re.IGNORECASE):
1312 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1313 date = info_dict.get('upload_date')
1314 if date is not None:
1315 dateRange = self.params.get('daterange', DateRange())
1316 if date not in dateRange:
1317 return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
1318 view_count = info_dict.get('view_count')
1319 if view_count is not None:
1320 min_views = self.params.get('min_views')
1321 if min_views is not None and view_count < min_views:
1322 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1323 max_views = self.params.get('max_views')
1324 if max_views is not None and view_count > max_views:
1325 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1326 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1327 return 'Skipping "%s" because it is age restricted' % video_title
1328
1329 match_filter = self.params.get('match_filter')
1330 if match_filter is not None:
1331 try:
1332 ret = match_filter(info_dict, incomplete=incomplete)
1333 except TypeError:
1334 # For backward compatibility
1335 ret = None if incomplete else match_filter(info_dict)
1336 if ret is NO_DEFAULT:
1337 while True:
1338 filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
1339 reply = input(self._format_screen(
1340 f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
1341 if reply in {'y', ''}:
1342 return None
1343 elif reply == 'n':
1344 return f'Skipping {video_title}'
1345 elif ret is not None:
1346 return ret
1347 return None
1348
1349 if self.in_download_archive(info_dict):
1350 reason = '%s has already been recorded in the archive' % video_title
1351 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1352 else:
1353 reason = check_filter()
1354 break_opt, break_err = 'break_on_reject', RejectedVideoReached
1355 if reason is not None:
1356 if not silent:
1357 self.to_screen('[download] ' + reason)
1358 if self.params.get(break_opt, False):
1359 raise break_err()
1360 return reason
1361
1362 @staticmethod
1363 def add_extra_info(info_dict, extra_info):
1364 '''Set the keys from extra_info in info dict if they are missing'''
1365 for key, value in extra_info.items():
1366 info_dict.setdefault(key, value)
1367
1368 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
1369 process=True, force_generic_extractor=False):
1370 """
1371 Return a list with a dictionary for each video extracted.
1372
1373 Arguments:
1374 url -- URL to extract
1375
1376 Keyword arguments:
1377 download -- whether to download videos during extraction
1378 ie_key -- extractor key hint
1379 extra_info -- dictionary containing the extra values to add to each result
1380 process -- whether to resolve all unresolved references (URLs, playlist items),
1381 must be True for download to work.
1382 force_generic_extractor -- force using the generic extractor
1383 """
1384
1385 if extra_info is None:
1386 extra_info = {}
1387
1388 if not ie_key and force_generic_extractor:
1389 ie_key = 'Generic'
1390
1391 if ie_key:
1392 ies = {ie_key: self._get_info_extractor_class(ie_key)}
1393 else:
1394 ies = self._ies
1395
1396 for ie_key, ie in ies.items():
1397 if not ie.suitable(url):
1398 continue
1399
1400 if not ie.working():
1401 self.report_warning('The program functionality for this site has been marked as broken, '
1402 'and will probably not work.')
1403
1404 temp_id = ie.get_temp_id(url)
1405 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1406 self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive')
1407 if self.params.get('break_on_existing', False):
1408 raise ExistingVideoReached()
1409 break
1410 return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
1411 else:
1412 self.report_error('no suitable InfoExtractor for URL %s' % url)
1413
1414 def _handle_extraction_exceptions(func):
1415 @functools.wraps(func)
1416 def wrapper(self, *args, **kwargs):
1417 while True:
1418 try:
1419 return func(self, *args, **kwargs)
1420 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
1421 raise
1422 except ReExtractInfo as e:
1423 if e.expected:
1424 self.to_screen(f'{e}; Re-extracting data')
1425 else:
1426 self.to_stderr('\r')
1427 self.report_warning(f'{e}; Re-extracting data')
1428 continue
1429 except GeoRestrictedError as e:
1430 msg = e.msg
1431 if e.countries:
1432 msg += '\nThis video is available in %s.' % ', '.join(
1433 map(ISO3166Utils.short2full, e.countries))
1434 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1435 self.report_error(msg)
1436 except ExtractorError as e: # An error we somewhat expected
1437 self.report_error(str(e), e.format_traceback())
1438 except Exception as e:
1439 if self.params.get('ignoreerrors'):
1440 self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
1441 else:
1442 raise
1443 break
1444 return wrapper
1445
1446 def _wait_for_video(self, ie_result):
1447 if (not self.params.get('wait_for_video')
1448 or ie_result.get('_type', 'video') != 'video'
1449 or ie_result.get('formats') or ie_result.get('url')):
1450 return
1451
1452 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1453 last_msg = ''
1454
1455 def progress(msg):
1456 nonlocal last_msg
1457 self.to_screen(msg + ' ' * (len(last_msg) - len(msg)) + '\r', skip_eol=True)
1458 last_msg = msg
1459
1460 min_wait, max_wait = self.params.get('wait_for_video')
1461 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1462 if diff is None and ie_result.get('live_status') == 'is_upcoming':
1463 diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
1464 self.report_warning('Release time of video is not known')
1465 elif (diff or 0) <= 0:
1466 self.report_warning('Video should already be available according to extracted info')
1467 diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
1468 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1469
1470 wait_till = time.time() + diff
1471 try:
1472 while True:
1473 diff = wait_till - time.time()
1474 if diff <= 0:
1475 progress('')
1476 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1477 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1478 time.sleep(1)
1479 except KeyboardInterrupt:
1480 progress('')
1481 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1482 except BaseException as e:
1483 if not isinstance(e, ReExtractInfo):
1484 self.to_screen('')
1485 raise
1486
1487 @_handle_extraction_exceptions
1488 def __extract_info(self, url, ie, download, extra_info, process):
1489 ie_result = ie.extract(url)
1490 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1491 return
1492 if isinstance(ie_result, list):
1493 # Backwards compatibility: old IE result format
1494 ie_result = {
1495 '_type': 'compat_list',
1496 'entries': ie_result,
1497 }
1498 if extra_info.get('original_url'):
1499 ie_result.setdefault('original_url', extra_info['original_url'])
1500 self.add_default_extra_info(ie_result, ie, url)
1501 if process:
1502 self._wait_for_video(ie_result)
1503 return self.process_ie_result(ie_result, download, extra_info)
1504 else:
1505 return ie_result
1506
1507 def add_default_extra_info(self, ie_result, ie, url):
1508 if url is not None:
1509 self.add_extra_info(ie_result, {
1510 'webpage_url': url,
1511 'original_url': url,
1512 })
1513 webpage_url = ie_result.get('webpage_url')
1514 if webpage_url:
1515 self.add_extra_info(ie_result, {
1516 'webpage_url_basename': url_basename(webpage_url),
1517 'webpage_url_domain': get_domain(webpage_url),
1518 })
1519 if ie is not None:
1520 self.add_extra_info(ie_result, {
1521 'extractor': ie.IE_NAME,
1522 'extractor_key': ie.ie_key(),
1523 })
1524
1525 def process_ie_result(self, ie_result, download=True, extra_info=None):
1526 """
1527 Take the result of the ie(may be modified) and resolve all unresolved
1528 references (URLs, playlist items).
1529
1530 It will also download the videos if 'download'.
1531 Returns the resolved ie_result.
1532 """
1533 if extra_info is None:
1534 extra_info = {}
1535 result_type = ie_result.get('_type', 'video')
1536
1537 if result_type in ('url', 'url_transparent'):
1538 ie_result['url'] = sanitize_url(ie_result['url'])
1539 if ie_result.get('original_url'):
1540 extra_info.setdefault('original_url', ie_result['original_url'])
1541
1542 extract_flat = self.params.get('extract_flat', False)
1543 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1544 or extract_flat is True):
1545 info_copy = ie_result.copy()
1546 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1547 if ie and not ie_result.get('id'):
1548 info_copy['id'] = ie.get_temp_id(ie_result['url'])
1549 self.add_default_extra_info(info_copy, ie, ie_result['url'])
1550 self.add_extra_info(info_copy, extra_info)
1551 info_copy, _ = self.pre_process(info_copy)
1552 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
1553 self._raise_pending_errors(info_copy)
1554 if self.params.get('force_write_download_archive', False):
1555 self.record_download_archive(info_copy)
1556 return ie_result
1557
1558 if result_type == 'video':
1559 self.add_extra_info(ie_result, extra_info)
1560 ie_result = self.process_video_result(ie_result, download=download)
1561 self._raise_pending_errors(ie_result)
1562 additional_urls = (ie_result or {}).get('additional_urls')
1563 if additional_urls:
1564 # TODO: Improve MetadataParserPP to allow setting a list
1565 if isinstance(additional_urls, compat_str):
1566 additional_urls = [additional_urls]
1567 self.to_screen(
1568 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1569 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1570 ie_result['additional_entries'] = [
1571 self.extract_info(
1572 url, download, extra_info=extra_info,
1573 force_generic_extractor=self.params.get('force_generic_extractor'))
1574 for url in additional_urls
1575 ]
1576 return ie_result
1577 elif result_type == 'url':
1578 # We have to add extra_info to the results because it may be
1579 # contained in a playlist
1580 return self.extract_info(
1581 ie_result['url'], download,
1582 ie_key=ie_result.get('ie_key'),
1583 extra_info=extra_info)
1584 elif result_type == 'url_transparent':
1585 # Use the information from the embedding page
1586 info = self.extract_info(
1587 ie_result['url'], ie_key=ie_result.get('ie_key'),
1588 extra_info=extra_info, download=False, process=False)
1589
1590 # extract_info may return None when ignoreerrors is enabled and
1591 # extraction failed with an error, don't crash and return early
1592 # in this case
1593 if not info:
1594 return info
1595
1596 exempted_fields = {'_type', 'url', 'ie_key'}
1597 if not ie_result.get('section_end') and ie_result.get('section_start') is None:
1598 # For video clips, the id etc of the clip extractor should be used
1599 exempted_fields |= {'id', 'extractor', 'extractor_key'}
1600
1601 new_result = info.copy()
1602 new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields))
1603
1604 # Extracted info may not be a video result (i.e.
1605 # info.get('_type', 'video') != video) but rather an url or
1606 # url_transparent. In such cases outer metadata (from ie_result)
1607 # should be propagated to inner one (info). For this to happen
1608 # _type of info should be overridden with url_transparent. This
1609 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1610 if new_result.get('_type') == 'url':
1611 new_result['_type'] = 'url_transparent'
1612
1613 return self.process_ie_result(
1614 new_result, download=download, extra_info=extra_info)
1615 elif result_type in ('playlist', 'multi_video'):
1616 # Protect from infinite recursion due to recursively nested playlists
1617 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1618 webpage_url = ie_result['webpage_url']
1619 if webpage_url in self._playlist_urls:
1620 self.to_screen(
1621 '[download] Skipping already downloaded playlist: %s'
1622 % ie_result.get('title') or ie_result.get('id'))
1623 return
1624
1625 self._playlist_level += 1
1626 self._playlist_urls.add(webpage_url)
1627 self._fill_common_fields(ie_result, False)
1628 self._sanitize_thumbnails(ie_result)
1629 try:
1630 return self.__process_playlist(ie_result, download)
1631 finally:
1632 self._playlist_level -= 1
1633 if not self._playlist_level:
1634 self._playlist_urls.clear()
1635 elif result_type == 'compat_list':
1636 self.report_warning(
1637 'Extractor %s returned a compat_list result. '
1638 'It needs to be updated.' % ie_result.get('extractor'))
1639
1640 def _fixup(r):
1641 self.add_extra_info(r, {
1642 'extractor': ie_result['extractor'],
1643 'webpage_url': ie_result['webpage_url'],
1644 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1645 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1646 'extractor_key': ie_result['extractor_key'],
1647 })
1648 return r
1649 ie_result['entries'] = [
1650 self.process_ie_result(_fixup(r), download, extra_info)
1651 for r in ie_result['entries']
1652 ]
1653 return ie_result
1654 else:
1655 raise Exception('Invalid result type: %s' % result_type)
1656
1657 def _ensure_dir_exists(self, path):
1658 return make_dir(path, self.report_error)
1659
1660 @staticmethod
1661 def _playlist_infodict(ie_result, **kwargs):
1662 return {
1663 **ie_result,
1664 'playlist': ie_result.get('title') or ie_result.get('id'),
1665 'playlist_id': ie_result.get('id'),
1666 'playlist_title': ie_result.get('title'),
1667 'playlist_uploader': ie_result.get('uploader'),
1668 'playlist_uploader_id': ie_result.get('uploader_id'),
1669 'playlist_index': 0,
1670 **kwargs,
1671 }
1672
1673 def __process_playlist(self, ie_result, download):
1674 """Process each entry in the playlist"""
1675 title = ie_result.get('title') or ie_result.get('id') or '<Untitled>'
1676 self.to_screen(f'[download] Downloading playlist: {title}')
1677
1678 all_entries = PlaylistEntries(self, ie_result)
1679 entries = orderedSet(all_entries.get_requested_items(), lazy=True)
1680
1681 lazy = self.params.get('lazy_playlist')
1682 if lazy:
1683 resolved_entries, n_entries = [], 'N/A'
1684 ie_result['requested_entries'], ie_result['entries'] = None, None
1685 else:
1686 entries = resolved_entries = list(entries)
1687 n_entries = len(resolved_entries)
1688 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1689 if not ie_result.get('playlist_count'):
1690 # Better to do this after potentially exhausting entries
1691 ie_result['playlist_count'] = all_entries.get_full_count()
1692
1693 _infojson_written = False
1694 write_playlist_files = self.params.get('allow_playlist_files', True)
1695 if write_playlist_files and self.params.get('list_thumbnails'):
1696 self.list_thumbnails(ie_result)
1697 if write_playlist_files and not self.params.get('simulate'):
1698 ie_copy = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
1699 _infojson_written = self._write_info_json(
1700 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1701 if _infojson_written is None:
1702 return
1703 if self._write_description('playlist', ie_result,
1704 self.prepare_filename(ie_copy, 'pl_description')) is None:
1705 return
1706 # TODO: This should be passed to ThumbnailsConvertor if necessary
1707 self._write_thumbnails('playlist', ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1708
1709 if lazy:
1710 if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
1711 self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True)
1712 elif self.params.get('playlistreverse'):
1713 entries.reverse()
1714 elif self.params.get('playlistrandom'):
1715 random.shuffle(entries)
1716
1717 self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} videos'
1718 f'{format_field(ie_result, "playlist_count", " of %s")}')
1719
1720 failures = 0
1721 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
1722 for i, (playlist_index, entry) in enumerate(entries):
1723 if lazy:
1724 resolved_entries.append((playlist_index, entry))
1725
1726 # TODO: Add auto-generated fields
1727 if not entry or self._match_entry(entry, incomplete=True) is not None:
1728 continue
1729
1730 self.to_screen('[download] Downloading video %s of %s' % (
1731 self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
1732
1733 entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
1734 if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
1735 playlist_index = ie_result['requested_entries'][i]
1736
1737 entry_result = self.__process_iterable_entry(entry, download, {
1738 'n_entries': int_or_none(n_entries),
1739 '__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
1740 'playlist_count': ie_result.get('playlist_count'),
1741 'playlist_index': playlist_index,
1742 'playlist_autonumber': i + 1,
1743 'playlist': title,
1744 'playlist_id': ie_result.get('id'),
1745 'playlist_title': ie_result.get('title'),
1746 'playlist_uploader': ie_result.get('uploader'),
1747 'playlist_uploader_id': ie_result.get('uploader_id'),
1748 'extractor': ie_result['extractor'],
1749 'webpage_url': ie_result['webpage_url'],
1750 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1751 'webpage_url_domain': get_domain(ie_result['webpage_url']),
1752 'extractor_key': ie_result['extractor_key'],
1753 })
1754 if not entry_result:
1755 failures += 1
1756 if failures >= max_failures:
1757 self.report_error(
1758 f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction')
1759 break
1760 resolved_entries[i] = (playlist_index, entry_result)
1761
1762 # Update with processed data
1763 ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
1764
1765 # Write the updated info to json
1766 if _infojson_written is True and self._write_info_json(
1767 'updated playlist', ie_result,
1768 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1769 return
1770
1771 ie_result = self.run_all_pps('playlist', ie_result)
1772 self.to_screen(f'[download] Finished downloading playlist: {title}')
1773 return ie_result
1774
1775 @_handle_extraction_exceptions
1776 def __process_iterable_entry(self, entry, download, extra_info):
1777 return self.process_ie_result(
1778 entry, download=download, extra_info=extra_info)
1779
1780 def _build_format_filter(self, filter_spec):
1781 " Returns a function to filter the formats according to the filter_spec "
1782
1783 OPERATORS = {
1784 '<': operator.lt,
1785 '<=': operator.le,
1786 '>': operator.gt,
1787 '>=': operator.ge,
1788 '=': operator.eq,
1789 '!=': operator.ne,
1790 }
1791 operator_rex = re.compile(r'''(?x)\s*
1792 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1793 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1794 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1795 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1796 m = operator_rex.fullmatch(filter_spec)
1797 if m:
1798 try:
1799 comparison_value = int(m.group('value'))
1800 except ValueError:
1801 comparison_value = parse_filesize(m.group('value'))
1802 if comparison_value is None:
1803 comparison_value = parse_filesize(m.group('value') + 'B')
1804 if comparison_value is None:
1805 raise ValueError(
1806 'Invalid value %r in format specification %r' % (
1807 m.group('value'), filter_spec))
1808 op = OPERATORS[m.group('op')]
1809
1810 if not m:
1811 STR_OPERATORS = {
1812 '=': operator.eq,
1813 '^=': lambda attr, value: attr.startswith(value),
1814 '$=': lambda attr, value: attr.endswith(value),
1815 '*=': lambda attr, value: value in attr,
1816 '~=': lambda attr, value: value.search(attr) is not None
1817 }
1818 str_operator_rex = re.compile(r'''(?x)\s*
1819 (?P<key>[a-zA-Z0-9._-]+)\s*
1820 (?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
1821 (?P<quote>["'])?
1822 (?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
1823 (?(quote)(?P=quote))\s*
1824 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1825 m = str_operator_rex.fullmatch(filter_spec)
1826 if m:
1827 if m.group('op') == '~=':
1828 comparison_value = re.compile(m.group('value'))
1829 else:
1830 comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
1831 str_op = STR_OPERATORS[m.group('op')]
1832 if m.group('negation'):
1833 op = lambda attr, value: not str_op(attr, value)
1834 else:
1835 op = str_op
1836
1837 if not m:
1838 raise SyntaxError('Invalid filter specification %r' % filter_spec)
1839
1840 def _filter(f):
1841 actual_value = f.get(m.group('key'))
1842 if actual_value is None:
1843 return m.group('none_inclusive')
1844 return op(actual_value, comparison_value)
1845 return _filter
1846
1847 def _check_formats(self, formats):
1848 for f in formats:
1849 self.to_screen('[info] Testing format %s' % f['format_id'])
1850 path = self.get_output_path('temp')
1851 if not self._ensure_dir_exists(f'{path}/'):
1852 continue
1853 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
1854 temp_file.close()
1855 try:
1856 success, _ = self.dl(temp_file.name, f, test=True)
1857 except (DownloadError, OSError, ValueError) + network_exceptions:
1858 success = False
1859 finally:
1860 if os.path.exists(temp_file.name):
1861 try:
1862 os.remove(temp_file.name)
1863 except OSError:
1864 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1865 if success:
1866 yield f
1867 else:
1868 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1869
1870 def _default_format_spec(self, info_dict, download=True):
1871
1872 def can_merge():
1873 merger = FFmpegMergerPP(self)
1874 return merger.available and merger.can_merge()
1875
1876 prefer_best = (
1877 not self.params.get('simulate')
1878 and download
1879 and (
1880 not can_merge()
1881 or info_dict.get('is_live') and not self.params.get('live_from_start')
1882 or self.params['outtmpl']['default'] == '-'))
1883 compat = (
1884 prefer_best
1885 or self.params.get('allow_multiple_audio_streams', False)
1886 or 'format-spec' in self.params['compat_opts'])
1887
1888 return (
1889 'best/bestvideo+bestaudio' if prefer_best
1890 else 'bestvideo*+bestaudio/best' if not compat
1891 else 'bestvideo+bestaudio/best')
1892
1893 def build_format_selector(self, format_spec):
1894 def syntax_error(note, start):
1895 message = (
1896 'Invalid format specification: '
1897 '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
1898 return SyntaxError(message)
1899
1900 PICKFIRST = 'PICKFIRST'
1901 MERGE = 'MERGE'
1902 SINGLE = 'SINGLE'
1903 GROUP = 'GROUP'
1904 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1905
1906 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1907 'video': self.params.get('allow_multiple_video_streams', False)}
1908
1909 check_formats = self.params.get('check_formats') == 'selected'
1910
1911 def _parse_filter(tokens):
1912 filter_parts = []
1913 for type, string, start, _, _ in tokens:
1914 if type == tokenize.OP and string == ']':
1915 return ''.join(filter_parts)
1916 else:
1917 filter_parts.append(string)
1918
1919 def _remove_unused_ops(tokens):
1920 # Remove operators that we don't use and join them with the surrounding strings
1921 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1922 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1923 last_string, last_start, last_end, last_line = None, None, None, None
1924 for type, string, start, end, line in tokens:
1925 if type == tokenize.OP and string == '[':
1926 if last_string:
1927 yield tokenize.NAME, last_string, last_start, last_end, last_line
1928 last_string = None
1929 yield type, string, start, end, line
1930 # everything inside brackets will be handled by _parse_filter
1931 for type, string, start, end, line in tokens:
1932 yield type, string, start, end, line
1933 if type == tokenize.OP and string == ']':
1934 break
1935 elif type == tokenize.OP and string in ALLOWED_OPS:
1936 if last_string:
1937 yield tokenize.NAME, last_string, last_start, last_end, last_line
1938 last_string = None
1939 yield type, string, start, end, line
1940 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1941 if not last_string:
1942 last_string = string
1943 last_start = start
1944 last_end = end
1945 else:
1946 last_string += string
1947 if last_string:
1948 yield tokenize.NAME, last_string, last_start, last_end, last_line
1949
1950 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
1951 selectors = []
1952 current_selector = None
1953 for type, string, start, _, _ in tokens:
1954 # ENCODING is only defined in python 3.x
1955 if type == getattr(tokenize, 'ENCODING', None):
1956 continue
1957 elif type in [tokenize.NAME, tokenize.NUMBER]:
1958 current_selector = FormatSelector(SINGLE, string, [])
1959 elif type == tokenize.OP:
1960 if string == ')':
1961 if not inside_group:
1962 # ')' will be handled by the parentheses group
1963 tokens.restore_last_token()
1964 break
1965 elif inside_merge and string in ['/', ',']:
1966 tokens.restore_last_token()
1967 break
1968 elif inside_choice and string == ',':
1969 tokens.restore_last_token()
1970 break
1971 elif string == ',':
1972 if not current_selector:
1973 raise syntax_error('"," must follow a format selector', start)
1974 selectors.append(current_selector)
1975 current_selector = None
1976 elif string == '/':
1977 if not current_selector:
1978 raise syntax_error('"/" must follow a format selector', start)
1979 first_choice = current_selector
1980 second_choice = _parse_format_selection(tokens, inside_choice=True)
1981 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
1982 elif string == '[':
1983 if not current_selector:
1984 current_selector = FormatSelector(SINGLE, 'best', [])
1985 format_filter = _parse_filter(tokens)
1986 current_selector.filters.append(format_filter)
1987 elif string == '(':
1988 if current_selector:
1989 raise syntax_error('Unexpected "("', start)
1990 group = _parse_format_selection(tokens, inside_group=True)
1991 current_selector = FormatSelector(GROUP, group, [])
1992 elif string == '+':
1993 if not current_selector:
1994 raise syntax_error('Unexpected "+"', start)
1995 selector_1 = current_selector
1996 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1997 if not selector_2:
1998 raise syntax_error('Expected a selector', start)
1999 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
2000 else:
2001 raise syntax_error(f'Operator not recognized: "{string}"', start)
2002 elif type == tokenize.ENDMARKER:
2003 break
2004 if current_selector:
2005 selectors.append(current_selector)
2006 return selectors
2007
2008 def _merge(formats_pair):
2009 format_1, format_2 = formats_pair
2010
2011 formats_info = []
2012 formats_info.extend(format_1.get('requested_formats', (format_1,)))
2013 formats_info.extend(format_2.get('requested_formats', (format_2,)))
2014
2015 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
2016 get_no_more = {'video': False, 'audio': False}
2017 for (i, fmt_info) in enumerate(formats_info):
2018 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
2019 formats_info.pop(i)
2020 continue
2021 for aud_vid in ['audio', 'video']:
2022 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
2023 if get_no_more[aud_vid]:
2024 formats_info.pop(i)
2025 break
2026 get_no_more[aud_vid] = True
2027
2028 if len(formats_info) == 1:
2029 return formats_info[0]
2030
2031 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
2032 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
2033
2034 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
2035 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
2036
2037 output_ext = self.params.get('merge_output_format')
2038 if not output_ext:
2039 if the_only_video:
2040 output_ext = the_only_video['ext']
2041 elif the_only_audio and not video_fmts:
2042 output_ext = the_only_audio['ext']
2043 else:
2044 output_ext = 'mkv'
2045
2046 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2047
2048 new_dict = {
2049 'requested_formats': formats_info,
2050 'format': '+'.join(filtered('format')),
2051 'format_id': '+'.join(filtered('format_id')),
2052 'ext': output_ext,
2053 'protocol': '+'.join(map(determine_protocol, formats_info)),
2054 'language': '+'.join(orderedSet(filtered('language'))) or None,
2055 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2056 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2057 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2058 }
2059
2060 if the_only_video:
2061 new_dict.update({
2062 'width': the_only_video.get('width'),
2063 'height': the_only_video.get('height'),
2064 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2065 'fps': the_only_video.get('fps'),
2066 'dynamic_range': the_only_video.get('dynamic_range'),
2067 'vcodec': the_only_video.get('vcodec'),
2068 'vbr': the_only_video.get('vbr'),
2069 'stretched_ratio': the_only_video.get('stretched_ratio'),
2070 })
2071
2072 if the_only_audio:
2073 new_dict.update({
2074 'acodec': the_only_audio.get('acodec'),
2075 'abr': the_only_audio.get('abr'),
2076 'asr': the_only_audio.get('asr'),
2077 })
2078
2079 return new_dict
2080
2081 def _check_formats(formats):
2082 if not check_formats:
2083 yield from formats
2084 return
2085 yield from self._check_formats(formats)
2086
2087 def _build_selector_function(selector):
2088 if isinstance(selector, list): # ,
2089 fs = [_build_selector_function(s) for s in selector]
2090
2091 def selector_function(ctx):
2092 for f in fs:
2093 yield from f(ctx)
2094 return selector_function
2095
2096 elif selector.type == GROUP: # ()
2097 selector_function = _build_selector_function(selector.selector)
2098
2099 elif selector.type == PICKFIRST: # /
2100 fs = [_build_selector_function(s) for s in selector.selector]
2101
2102 def selector_function(ctx):
2103 for f in fs:
2104 picked_formats = list(f(ctx))
2105 if picked_formats:
2106 return picked_formats
2107 return []
2108
2109 elif selector.type == MERGE: # +
2110 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2111
2112 def selector_function(ctx):
2113 for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
2114 yield _merge(pair)
2115
2116 elif selector.type == SINGLE: # atom
2117 format_spec = selector.selector or 'best'
2118
2119 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2120 if format_spec == 'all':
2121 def selector_function(ctx):
2122 yield from _check_formats(ctx['formats'][::-1])
2123 elif format_spec == 'mergeall':
2124 def selector_function(ctx):
2125 formats = list(_check_formats(
2126 f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
2127 if not formats:
2128 return
2129 merged_format = formats[-1]
2130 for f in formats[-2::-1]:
2131 merged_format = _merge((merged_format, f))
2132 yield merged_format
2133
2134 else:
2135 format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
2136 mobj = re.match(
2137 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2138 format_spec)
2139 if mobj is not None:
2140 format_idx = int_or_none(mobj.group('n'), default=1)
2141 format_reverse = mobj.group('bw')[0] == 'b'
2142 format_type = (mobj.group('type') or [None])[0]
2143 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2144 format_modified = mobj.group('mod') is not None
2145
2146 format_fallback = not format_type and not format_modified # for b, w
2147 _filter_f = (
2148 (lambda f: f.get('%scodec' % format_type) != 'none')
2149 if format_type and format_modified # bv*, ba*, wv*, wa*
2150 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2151 if format_type # bv, ba, wv, wa
2152 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2153 if not format_modified # b, w
2154 else lambda f: True) # b*, w*
2155 filter_f = lambda f: _filter_f(f) and (
2156 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
2157 else:
2158 if format_spec in self._format_selection_exts['audio']:
2159 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
2160 elif format_spec in self._format_selection_exts['video']:
2161 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
2162 seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
2163 elif format_spec in self._format_selection_exts['storyboards']:
2164 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2165 else:
2166 filter_f = lambda f: f.get('format_id') == format_spec # id
2167
2168 def selector_function(ctx):
2169 formats = list(ctx['formats'])
2170 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
2171 if not matches:
2172 if format_fallback and ctx['incomplete_formats']:
2173 # for extractors with incomplete formats (audio only (soundcloud)
2174 # or video only (imgur)) best/worst will fallback to
2175 # best/worst {video,audio}-only format
2176 matches = formats
2177 elif seperate_fallback and not ctx['has_merged_format']:
2178 # for compatibility with youtube-dl when there is no pre-merged format
2179 matches = list(filter(seperate_fallback, formats))
2180 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2181 try:
2182 yield matches[format_idx - 1]
2183 except LazyList.IndexError:
2184 return
2185
2186 filters = [self._build_format_filter(f) for f in selector.filters]
2187
2188 def final_selector(ctx):
2189 ctx_copy = dict(ctx)
2190 for _filter in filters:
2191 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2192 return selector_function(ctx_copy)
2193 return final_selector
2194
2195 stream = io.BytesIO(format_spec.encode())
2196 try:
2197 tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
2198 except tokenize.TokenError:
2199 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2200
2201 class TokenIterator:
2202 def __init__(self, tokens):
2203 self.tokens = tokens
2204 self.counter = 0
2205
2206 def __iter__(self):
2207 return self
2208
2209 def __next__(self):
2210 if self.counter >= len(self.tokens):
2211 raise StopIteration()
2212 value = self.tokens[self.counter]
2213 self.counter += 1
2214 return value
2215
2216 next = __next__
2217
2218 def restore_last_token(self):
2219 self.counter -= 1
2220
2221 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
2222 return _build_selector_function(parsed_selector)
2223
2224 def _calc_headers(self, info_dict):
2225 res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
2226
2227 cookies = self._calc_cookies(info_dict['url'])
2228 if cookies:
2229 res['Cookie'] = cookies
2230
2231 if 'X-Forwarded-For' not in res:
2232 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2233 if x_forwarded_for_ip:
2234 res['X-Forwarded-For'] = x_forwarded_for_ip
2235
2236 return res
2237
2238 def _calc_cookies(self, url):
2239 pr = sanitized_Request(url)
2240 self.cookiejar.add_cookie_header(pr)
2241 return pr.get_header('Cookie')
2242
2243 def _sort_thumbnails(self, thumbnails):
2244 thumbnails.sort(key=lambda t: (
2245 t.get('preference') if t.get('preference') is not None else -1,
2246 t.get('width') if t.get('width') is not None else -1,
2247 t.get('height') if t.get('height') is not None else -1,
2248 t.get('id') if t.get('id') is not None else '',
2249 t.get('url')))
2250
2251 def _sanitize_thumbnails(self, info_dict):
2252 thumbnails = info_dict.get('thumbnails')
2253 if thumbnails is None:
2254 thumbnail = info_dict.get('thumbnail')
2255 if thumbnail:
2256 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2257 if not thumbnails:
2258 return
2259
2260 def check_thumbnails(thumbnails):
2261 for t in thumbnails:
2262 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2263 try:
2264 self.urlopen(HEADRequest(t['url']))
2265 except network_exceptions as err:
2266 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2267 continue
2268 yield t
2269
2270 self._sort_thumbnails(thumbnails)
2271 for i, t in enumerate(thumbnails):
2272 if t.get('id') is None:
2273 t['id'] = '%d' % i
2274 if t.get('width') and t.get('height'):
2275 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2276 t['url'] = sanitize_url(t['url'])
2277
2278 if self.params.get('check_formats') is True:
2279 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
2280 else:
2281 info_dict['thumbnails'] = thumbnails
2282
2283 def _fill_common_fields(self, info_dict, is_video=True):
2284 # TODO: move sanitization here
2285 if is_video:
2286 # playlists are allowed to lack "title"
2287 title = info_dict.get('title', NO_DEFAULT)
2288 if title is NO_DEFAULT:
2289 raise ExtractorError('Missing "title" field in extractor result',
2290 video_id=info_dict['id'], ie=info_dict['extractor'])
2291 info_dict['fulltitle'] = title
2292 if not title:
2293 if title == '':
2294 self.write_debug('Extractor gave empty title. Creating a generic title')
2295 else:
2296 self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
2297 info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
2298
2299 if info_dict.get('duration') is not None:
2300 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2301
2302 for ts_key, date_key in (
2303 ('timestamp', 'upload_date'),
2304 ('release_timestamp', 'release_date'),
2305 ('modified_timestamp', 'modified_date'),
2306 ):
2307 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2308 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2309 # see http://bugs.python.org/issue1646728)
2310 with contextlib.suppress(ValueError, OverflowError, OSError):
2311 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2312 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2313
2314 live_keys = ('is_live', 'was_live')
2315 live_status = info_dict.get('live_status')
2316 if live_status is None:
2317 for key in live_keys:
2318 if info_dict.get(key) is False:
2319 continue
2320 if info_dict.get(key):
2321 live_status = key
2322 break
2323 if all(info_dict.get(key) is False for key in live_keys):
2324 live_status = 'not_live'
2325 if live_status:
2326 info_dict['live_status'] = live_status
2327 for key in live_keys:
2328 if info_dict.get(key) is None:
2329 info_dict[key] = (live_status == key)
2330
2331 # Auto generate title fields corresponding to the *_number fields when missing
2332 # in order to always have clean titles. This is very common for TV series.
2333 for field in ('chapter', 'season', 'episode'):
2334 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2335 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2336
2337 def _raise_pending_errors(self, info):
2338 err = info.pop('__pending_error', None)
2339 if err:
2340 self.report_error(err, tb=False)
2341
2342 def process_video_result(self, info_dict, download=True):
2343 assert info_dict.get('_type', 'video') == 'video'
2344 self._num_videos += 1
2345
2346 if 'id' not in info_dict:
2347 raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
2348 elif not info_dict.get('id'):
2349 raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
2350
2351 def report_force_conversion(field, field_not, conversion):
2352 self.report_warning(
2353 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2354 % (field, field_not, conversion))
2355
2356 def sanitize_string_field(info, string_field):
2357 field = info.get(string_field)
2358 if field is None or isinstance(field, compat_str):
2359 return
2360 report_force_conversion(string_field, 'a string', 'string')
2361 info[string_field] = compat_str(field)
2362
2363 def sanitize_numeric_fields(info):
2364 for numeric_field in self._NUMERIC_FIELDS:
2365 field = info.get(numeric_field)
2366 if field is None or isinstance(field, (int, float)):
2367 continue
2368 report_force_conversion(numeric_field, 'numeric', 'int')
2369 info[numeric_field] = int_or_none(field)
2370
2371 sanitize_string_field(info_dict, 'id')
2372 sanitize_numeric_fields(info_dict)
2373 if info_dict.get('section_end') and info_dict.get('section_start') is not None:
2374 info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3)
2375 if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
2376 self.report_warning('"duration" field is negative, there is an error in extractor')
2377
2378 if 'playlist' not in info_dict:
2379 # It isn't part of a playlist
2380 info_dict['playlist'] = None
2381 info_dict['playlist_index'] = None
2382
2383 self._sanitize_thumbnails(info_dict)
2384
2385 thumbnail = info_dict.get('thumbnail')
2386 thumbnails = info_dict.get('thumbnails')
2387 if thumbnail:
2388 info_dict['thumbnail'] = sanitize_url(thumbnail)
2389 elif thumbnails:
2390 info_dict['thumbnail'] = thumbnails[-1]['url']
2391
2392 if info_dict.get('display_id') is None and 'id' in info_dict:
2393 info_dict['display_id'] = info_dict['id']
2394
2395 self._fill_common_fields(info_dict)
2396
2397 for cc_kind in ('subtitles', 'automatic_captions'):
2398 cc = info_dict.get(cc_kind)
2399 if cc:
2400 for _, subtitle in cc.items():
2401 for subtitle_format in subtitle:
2402 if subtitle_format.get('url'):
2403 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2404 if subtitle_format.get('ext') is None:
2405 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2406
2407 automatic_captions = info_dict.get('automatic_captions')
2408 subtitles = info_dict.get('subtitles')
2409
2410 info_dict['requested_subtitles'] = self.process_subtitles(
2411 info_dict['id'], subtitles, automatic_captions)
2412
2413 if info_dict.get('formats') is None:
2414 # There's only one format available
2415 formats = [info_dict]
2416 else:
2417 formats = info_dict['formats']
2418
2419 # or None ensures --clean-infojson removes it
2420 info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
2421 if not self.params.get('allow_unplayable_formats'):
2422 formats = [f for f in formats if not f.get('has_drm')]
2423 if info_dict['_has_drm'] and all(
2424 f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
2425 self.report_warning(
2426 'This video is DRM protected and only images are available for download. '
2427 'Use --list-formats to see them')
2428
2429 get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
2430 if not get_from_start:
2431 info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
2432 if info_dict.get('is_live') and formats:
2433 formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
2434 if get_from_start and not formats:
2435 self.raise_no_formats(info_dict, msg=(
2436 '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
2437 'If you want to download from the current time, use --no-live-from-start'))
2438
2439 if not formats:
2440 self.raise_no_formats(info_dict)
2441
2442 def is_wellformed(f):
2443 url = f.get('url')
2444 if not url:
2445 self.report_warning(
2446 '"url" field is missing or empty - skipping format, '
2447 'there is an error in extractor')
2448 return False
2449 if isinstance(url, bytes):
2450 sanitize_string_field(f, 'url')
2451 return True
2452
2453 # Filter out malformed formats for better extraction robustness
2454 formats = list(filter(is_wellformed, formats))
2455
2456 formats_dict = {}
2457
2458 # We check that all the formats have the format and format_id fields
2459 for i, format in enumerate(formats):
2460 sanitize_string_field(format, 'format_id')
2461 sanitize_numeric_fields(format)
2462 format['url'] = sanitize_url(format['url'])
2463 if not format.get('format_id'):
2464 format['format_id'] = compat_str(i)
2465 else:
2466 # Sanitize format_id from characters used in format selector expression
2467 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
2468 format_id = format['format_id']
2469 if format_id not in formats_dict:
2470 formats_dict[format_id] = []
2471 formats_dict[format_id].append(format)
2472
2473 # Make sure all formats have unique format_id
2474 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
2475 for format_id, ambiguous_formats in formats_dict.items():
2476 ambigious_id = len(ambiguous_formats) > 1
2477 for i, format in enumerate(ambiguous_formats):
2478 if ambigious_id:
2479 format['format_id'] = '%s-%d' % (format_id, i)
2480 if format.get('ext') is None:
2481 format['ext'] = determine_ext(format['url']).lower()
2482 # Ensure there is no conflict between id and ext in format selection
2483 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2484 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2485 format['format_id'] = 'f%s' % format['format_id']
2486
2487 for i, format in enumerate(formats):
2488 if format.get('format') is None:
2489 format['format'] = '{id} - {res}{note}'.format(
2490 id=format['format_id'],
2491 res=self.format_resolution(format),
2492 note=format_field(format, 'format_note', ' (%s)'),
2493 )
2494 if format.get('protocol') is None:
2495 format['protocol'] = determine_protocol(format)
2496 if format.get('resolution') is None:
2497 format['resolution'] = self.format_resolution(format, default=None)
2498 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2499 format['dynamic_range'] = 'SDR'
2500 if (info_dict.get('duration') and format.get('tbr')
2501 and not format.get('filesize') and not format.get('filesize_approx')):
2502 format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
2503
2504 # Add HTTP headers, so that external programs can use them from the
2505 # json output
2506 full_format_info = info_dict.copy()
2507 full_format_info.update(format)
2508 format['http_headers'] = self._calc_headers(full_format_info)
2509 # Remove private housekeeping stuff
2510 if '__x_forwarded_for_ip' in info_dict:
2511 del info_dict['__x_forwarded_for_ip']
2512
2513 if self.params.get('check_formats') is True:
2514 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
2515
2516 if not formats or formats[0] is not info_dict:
2517 # only set the 'formats' fields if the original info_dict list them
2518 # otherwise we end up with a circular reference, the first (and unique)
2519 # element in the 'formats' field in info_dict is info_dict itself,
2520 # which can't be exported to json
2521 info_dict['formats'] = formats
2522
2523 info_dict, _ = self.pre_process(info_dict)
2524
2525 if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
2526 return info_dict
2527
2528 self.post_extract(info_dict)
2529 info_dict, _ = self.pre_process(info_dict, 'after_filter')
2530
2531 # The pre-processors may have modified the formats
2532 formats = info_dict.get('formats', [info_dict])
2533
2534 list_only = self.params.get('simulate') is None and (
2535 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2536 interactive_format_selection = not list_only and self.format_selector == '-'
2537 if self.params.get('list_thumbnails'):
2538 self.list_thumbnails(info_dict)
2539 if self.params.get('listsubtitles'):
2540 if 'automatic_captions' in info_dict:
2541 self.list_subtitles(
2542 info_dict['id'], automatic_captions, 'automatic captions')
2543 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2544 if self.params.get('listformats') or interactive_format_selection:
2545 self.list_formats(info_dict)
2546 if list_only:
2547 # Without this printing, -F --print-json will not work
2548 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
2549 return info_dict
2550
2551 format_selector = self.format_selector
2552 if format_selector is None:
2553 req_format = self._default_format_spec(info_dict, download=download)
2554 self.write_debug('Default format spec: %s' % req_format)
2555 format_selector = self.build_format_selector(req_format)
2556
2557 while True:
2558 if interactive_format_selection:
2559 req_format = input(
2560 self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
2561 try:
2562 format_selector = self.build_format_selector(req_format)
2563 except SyntaxError as err:
2564 self.report_error(err, tb=False, is_error=False)
2565 continue
2566
2567 formats_to_download = list(format_selector({
2568 'formats': formats,
2569 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
2570 'incomplete_formats': (
2571 # All formats are video-only or
2572 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2573 # all formats are audio-only
2574 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
2575 }))
2576 if interactive_format_selection and not formats_to_download:
2577 self.report_error('Requested format is not available', tb=False, is_error=False)
2578 continue
2579 break
2580
2581 if not formats_to_download:
2582 if not self.params.get('ignore_no_formats_error'):
2583 raise ExtractorError(
2584 'Requested format is not available. Use --list-formats for a list of available formats',
2585 expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
2586 self.report_warning('Requested format is not available')
2587 # Process what we can, even without any available formats.
2588 formats_to_download = [{}]
2589
2590 requested_ranges = self.params.get('download_ranges')
2591 if requested_ranges:
2592 requested_ranges = tuple(requested_ranges(info_dict, self))
2593
2594 best_format, downloaded_formats = formats_to_download[-1], []
2595 if download:
2596 if best_format:
2597 def to_screen(*msg):
2598 self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
2599
2600 to_screen(f'Downloading {len(formats_to_download)} format(s):',
2601 (f['format_id'] for f in formats_to_download))
2602 if requested_ranges:
2603 to_screen(f'Downloading {len(requested_ranges)} time ranges:',
2604 (f'{int(c["start_time"])}-{int(c["end_time"])}' for c in requested_ranges))
2605 max_downloads_reached = False
2606
2607 for fmt, chapter in itertools.product(formats_to_download, requested_ranges or [{}]):
2608 new_info = self._copy_infodict(info_dict)
2609 new_info.update(fmt)
2610 offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf')
2611 if chapter or offset:
2612 new_info.update({
2613 'section_start': offset + chapter.get('start_time', 0),
2614 'section_end': offset + min(chapter.get('end_time', duration), duration),
2615 'section_title': chapter.get('title'),
2616 'section_number': chapter.get('index'),
2617 })
2618 downloaded_formats.append(new_info)
2619 try:
2620 self.process_info(new_info)
2621 except MaxDownloadsReached:
2622 max_downloads_reached = True
2623 self._raise_pending_errors(new_info)
2624 # Remove copied info
2625 for key, val in tuple(new_info.items()):
2626 if info_dict.get(key) == val:
2627 new_info.pop(key)
2628 if max_downloads_reached:
2629 break
2630
2631 write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
2632 assert write_archive.issubset({True, False, 'ignore'})
2633 if True in write_archive and False not in write_archive:
2634 self.record_download_archive(info_dict)
2635
2636 info_dict['requested_downloads'] = downloaded_formats
2637 info_dict = self.run_all_pps('after_video', info_dict)
2638 if max_downloads_reached:
2639 raise MaxDownloadsReached()
2640
2641 # We update the info dict with the selected best quality format (backwards compatibility)
2642 info_dict.update(best_format)
2643 return info_dict
2644
2645 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
2646 """Select the requested subtitles and their format"""
2647 available_subs, normal_sub_langs = {}, []
2648 if normal_subtitles and self.params.get('writesubtitles'):
2649 available_subs.update(normal_subtitles)
2650 normal_sub_langs = tuple(normal_subtitles.keys())
2651 if automatic_captions and self.params.get('writeautomaticsub'):
2652 for lang, cap_info in automatic_captions.items():
2653 if lang not in available_subs:
2654 available_subs[lang] = cap_info
2655
2656 if (not self.params.get('writesubtitles') and not
2657 self.params.get('writeautomaticsub') or not
2658 available_subs):
2659 return None
2660
2661 all_sub_langs = tuple(available_subs.keys())
2662 if self.params.get('allsubtitles', False):
2663 requested_langs = all_sub_langs
2664 elif self.params.get('subtitleslangs', False):
2665 # A list is used so that the order of languages will be the same as
2666 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2667 requested_langs = []
2668 for lang_re in self.params.get('subtitleslangs'):
2669 discard = lang_re[0] == '-'
2670 if discard:
2671 lang_re = lang_re[1:]
2672 if lang_re == 'all':
2673 if discard:
2674 requested_langs = []
2675 else:
2676 requested_langs.extend(all_sub_langs)
2677 continue
2678 current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
2679 if discard:
2680 for lang in current_langs:
2681 while lang in requested_langs:
2682 requested_langs.remove(lang)
2683 else:
2684 requested_langs.extend(current_langs)
2685 requested_langs = orderedSet(requested_langs)
2686 elif normal_sub_langs:
2687 requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
2688 else:
2689 requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
2690 if requested_langs:
2691 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
2692
2693 formats_query = self.params.get('subtitlesformat', 'best')
2694 formats_preference = formats_query.split('/') if formats_query else []
2695 subs = {}
2696 for lang in requested_langs:
2697 formats = available_subs.get(lang)
2698 if formats is None:
2699 self.report_warning(f'{lang} subtitles not available for {video_id}')
2700 continue
2701 for ext in formats_preference:
2702 if ext == 'best':
2703 f = formats[-1]
2704 break
2705 matches = list(filter(lambda f: f['ext'] == ext, formats))
2706 if matches:
2707 f = matches[-1]
2708 break
2709 else:
2710 f = formats[-1]
2711 self.report_warning(
2712 'No subtitle format found matching "%s" for language %s, '
2713 'using %s' % (formats_query, lang, f['ext']))
2714 subs[lang] = f
2715 return subs
2716
2717 def _forceprint(self, key, info_dict):
2718 if info_dict is None:
2719 return
2720 info_copy = info_dict.copy()
2721 info_copy['formats_table'] = self.render_formats_table(info_dict)
2722 info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
2723 info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
2724 info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
2725
2726 def format_tmpl(tmpl):
2727 mobj = re.match(r'\w+(=?)$', tmpl)
2728 if mobj and mobj.group(1):
2729 return f'{tmpl[:-1]} = %({tmpl[:-1]})r'
2730 elif mobj:
2731 return f'%({tmpl})s'
2732 return tmpl
2733
2734 for tmpl in self.params['forceprint'].get(key, []):
2735 self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
2736
2737 for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
2738 filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
2739 tmpl = format_tmpl(tmpl)
2740 self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
2741 if self._ensure_dir_exists(filename):
2742 with open(filename, 'a', encoding='utf-8') as f:
2743 f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
2744
2745 def __forced_printings(self, info_dict, filename, incomplete):
2746 def print_mandatory(field, actual_field=None):
2747 if actual_field is None:
2748 actual_field = field
2749 if (self.params.get('force%s' % field, False)
2750 and (not incomplete or info_dict.get(actual_field) is not None)):
2751 self.to_stdout(info_dict[actual_field])
2752
2753 def print_optional(field):
2754 if (self.params.get('force%s' % field, False)
2755 and info_dict.get(field) is not None):
2756 self.to_stdout(info_dict[field])
2757
2758 info_dict = info_dict.copy()
2759 if filename is not None:
2760 info_dict['filename'] = filename
2761 if info_dict.get('requested_formats') is not None:
2762 # For RTMP URLs, also include the playpath
2763 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2764 elif info_dict.get('url'):
2765 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2766
2767 if (self.params.get('forcejson')
2768 or self.params['forceprint'].get('video')
2769 or self.params['print_to_file'].get('video')):
2770 self.post_extract(info_dict)
2771 self._forceprint('video', info_dict)
2772
2773 print_mandatory('title')
2774 print_mandatory('id')
2775 print_mandatory('url', 'urls')
2776 print_optional('thumbnail')
2777 print_optional('description')
2778 print_optional('filename')
2779 if self.params.get('forceduration') and info_dict.get('duration') is not None:
2780 self.to_stdout(formatSeconds(info_dict['duration']))
2781 print_mandatory('format')
2782
2783 if self.params.get('forcejson'):
2784 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
2785
2786 def dl(self, name, info, subtitle=False, test=False):
2787 if not info.get('url'):
2788 self.raise_no_formats(info, True)
2789
2790 if test:
2791 verbose = self.params.get('verbose')
2792 params = {
2793 'test': True,
2794 'quiet': self.params.get('quiet') or not verbose,
2795 'verbose': verbose,
2796 'noprogress': not verbose,
2797 'nopart': True,
2798 'skip_unavailable_fragments': False,
2799 'keep_fragments': False,
2800 'overwrites': True,
2801 '_no_ytdl_file': True,
2802 }
2803 else:
2804 params = self.params
2805 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
2806 if not test:
2807 for ph in self._progress_hooks:
2808 fd.add_progress_hook(ph)
2809 urls = '", "'.join(
2810 (f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
2811 for f in info.get('requested_formats', []) or [info])
2812 self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
2813
2814 # Note: Ideally info should be a deep-copied so that hooks cannot modify it.
2815 # But it may contain objects that are not deep-copyable
2816 new_info = self._copy_infodict(info)
2817 if new_info.get('http_headers') is None:
2818 new_info['http_headers'] = self._calc_headers(new_info)
2819 return fd.download(name, new_info, subtitle)
2820
2821 def existing_file(self, filepaths, *, default_overwrite=True):
2822 existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
2823 if existing_files and not self.params.get('overwrites', default_overwrite):
2824 return existing_files[0]
2825
2826 for file in existing_files:
2827 self.report_file_delete(file)
2828 os.remove(file)
2829 return None
2830
2831 def process_info(self, info_dict):
2832 """Process a single resolved IE result. (Modifies it in-place)"""
2833
2834 assert info_dict.get('_type', 'video') == 'video'
2835 original_infodict = info_dict
2836
2837 if 'format' not in info_dict and 'ext' in info_dict:
2838 info_dict['format'] = info_dict['ext']
2839
2840 # This is mostly just for backward compatibility of process_info
2841 # As a side-effect, this allows for format-specific filters
2842 if self._match_entry(info_dict) is not None:
2843 info_dict['__write_download_archive'] = 'ignore'
2844 return
2845
2846 # Does nothing under normal operation - for backward compatibility of process_info
2847 self.post_extract(info_dict)
2848 self._num_downloads += 1
2849
2850 # info_dict['_filename'] needs to be set for backward compatibility
2851 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2852 temp_filename = self.prepare_filename(info_dict, 'temp')
2853 files_to_move = {}
2854
2855 # Forced printings
2856 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
2857
2858 def check_max_downloads():
2859 if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
2860 raise MaxDownloadsReached()
2861
2862 if self.params.get('simulate'):
2863 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
2864 check_max_downloads()
2865 return
2866
2867 if full_filename is None:
2868 return
2869 if not self._ensure_dir_exists(encodeFilename(full_filename)):
2870 return
2871 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
2872 return
2873
2874 if self._write_description('video', info_dict,
2875 self.prepare_filename(info_dict, 'description')) is None:
2876 return
2877
2878 sub_files = self._write_subtitles(info_dict, temp_filename)
2879 if sub_files is None:
2880 return
2881 files_to_move.update(dict(sub_files))
2882
2883 thumb_files = self._write_thumbnails(
2884 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2885 if thumb_files is None:
2886 return
2887 files_to_move.update(dict(thumb_files))
2888
2889 infofn = self.prepare_filename(info_dict, 'infojson')
2890 _infojson_written = self._write_info_json('video', info_dict, infofn)
2891 if _infojson_written:
2892 info_dict['infojson_filename'] = infofn
2893 # For backward compatibility, even though it was a private field
2894 info_dict['__infojson_filename'] = infofn
2895 elif _infojson_written is None:
2896 return
2897
2898 # Note: Annotations are deprecated
2899 annofn = None
2900 if self.params.get('writeannotations', False):
2901 annofn = self.prepare_filename(info_dict, 'annotation')
2902 if annofn:
2903 if not self._ensure_dir_exists(encodeFilename(annofn)):
2904 return
2905 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
2906 self.to_screen('[info] Video annotations are already present')
2907 elif not info_dict.get('annotations'):
2908 self.report_warning('There are no annotations to write.')
2909 else:
2910 try:
2911 self.to_screen('[info] Writing video annotations to: ' + annofn)
2912 with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2913 annofile.write(info_dict['annotations'])
2914 except (KeyError, TypeError):
2915 self.report_warning('There are no annotations to write.')
2916 except OSError:
2917 self.report_error('Cannot write annotations file: ' + annofn)
2918 return
2919
2920 # Write internet shortcut files
2921 def _write_link_file(link_type):
2922 url = try_get(info_dict['webpage_url'], iri_to_uri)
2923 if not url:
2924 self.report_warning(
2925 f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
2926 return True
2927 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
2928 if not self._ensure_dir_exists(encodeFilename(linkfn)):
2929 return False
2930 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
2931 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
2932 return True
2933 try:
2934 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
2935 with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
2936 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
2937 template_vars = {'url': url}
2938 if link_type == 'desktop':
2939 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
2940 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
2941 except OSError:
2942 self.report_error(f'Cannot write internet shortcut {linkfn}')
2943 return False
2944 return True
2945
2946 write_links = {
2947 'url': self.params.get('writeurllink'),
2948 'webloc': self.params.get('writewebloclink'),
2949 'desktop': self.params.get('writedesktoplink'),
2950 }
2951 if self.params.get('writelink'):
2952 link_type = ('webloc' if sys.platform == 'darwin'
2953 else 'desktop' if sys.platform.startswith('linux')
2954 else 'url')
2955 write_links[link_type] = True
2956
2957 if any(should_write and not _write_link_file(link_type)
2958 for link_type, should_write in write_links.items()):
2959 return
2960
2961 def replace_info_dict(new_info):
2962 nonlocal info_dict
2963 if new_info == info_dict:
2964 return
2965 info_dict.clear()
2966 info_dict.update(new_info)
2967
2968 new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
2969 replace_info_dict(new_info)
2970
2971 if self.params.get('skip_download'):
2972 info_dict['filepath'] = temp_filename
2973 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2974 info_dict['__files_to_move'] = files_to_move
2975 replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
2976 info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
2977 else:
2978 # Download
2979 info_dict.setdefault('__postprocessors', [])
2980 try:
2981
2982 def existing_video_file(*filepaths):
2983 ext = info_dict.get('ext')
2984 converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
2985 file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
2986 default_overwrite=False)
2987 if file:
2988 info_dict['ext'] = os.path.splitext(file)[1][1:]
2989 return file
2990
2991 fd, success = None, True
2992 if info_dict.get('protocol') or info_dict.get('url'):
2993 fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
2994 if fd is not FFmpegFD and (
2995 info_dict.get('section_start') or info_dict.get('section_end')):
2996 msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
2997 else 'You have requested downloading the video partially, but ffmpeg is not installed')
2998 self.report_error(f'{msg}. Aborting')
2999 return
3000
3001 if info_dict.get('requested_formats') is not None:
3002
3003 def compatible_formats(formats):
3004 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
3005 video_formats = [format for format in formats if format.get('vcodec') != 'none']
3006 audio_formats = [format for format in formats if format.get('acodec') != 'none']
3007 if len(video_formats) > 2 or len(audio_formats) > 2:
3008 return False
3009
3010 # Check extension
3011 exts = {format.get('ext') for format in formats}
3012 COMPATIBLE_EXTS = (
3013 {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'},
3014 {'webm'},
3015 )
3016 for ext_sets in COMPATIBLE_EXTS:
3017 if ext_sets.issuperset(exts):
3018 return True
3019 # TODO: Check acodec/vcodec
3020 return False
3021
3022 requested_formats = info_dict['requested_formats']
3023 old_ext = info_dict['ext']
3024 if self.params.get('merge_output_format') is None:
3025 if not compatible_formats(requested_formats):
3026 info_dict['ext'] = 'mkv'
3027 self.report_warning(
3028 'Requested formats are incompatible for merge and will be merged into mkv')
3029 if (info_dict['ext'] == 'webm'
3030 and info_dict.get('thumbnails')
3031 # check with type instead of pp_key, __name__, or isinstance
3032 # since we dont want any custom PPs to trigger this
3033 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
3034 info_dict['ext'] = 'mkv'
3035 self.report_warning(
3036 'webm doesn\'t support embedding a thumbnail, mkv will be used')
3037 new_ext = info_dict['ext']
3038
3039 def correct_ext(filename, ext=new_ext):
3040 if filename == '-':
3041 return filename
3042 filename_real_ext = os.path.splitext(filename)[1][1:]
3043 filename_wo_ext = (
3044 os.path.splitext(filename)[0]
3045 if filename_real_ext in (old_ext, new_ext)
3046 else filename)
3047 return f'{filename_wo_ext}.{ext}'
3048
3049 # Ensure filename always has a correct extension for successful merge
3050 full_filename = correct_ext(full_filename)
3051 temp_filename = correct_ext(temp_filename)
3052 dl_filename = existing_video_file(full_filename, temp_filename)
3053 info_dict['__real_download'] = False
3054
3055 merger = FFmpegMergerPP(self)
3056 downloaded = []
3057 if dl_filename is not None:
3058 self.report_file_already_downloaded(dl_filename)
3059 elif fd:
3060 for f in requested_formats if fd != FFmpegFD else []:
3061 f['filepath'] = fname = prepend_extension(
3062 correct_ext(temp_filename, info_dict['ext']),
3063 'f%s' % f['format_id'], info_dict['ext'])
3064 downloaded.append(fname)
3065 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
3066 success, real_download = self.dl(temp_filename, info_dict)
3067 info_dict['__real_download'] = real_download
3068 else:
3069 if self.params.get('allow_unplayable_formats'):
3070 self.report_warning(
3071 'You have requested merging of multiple formats '
3072 'while also allowing unplayable formats to be downloaded. '
3073 'The formats won\'t be merged to prevent data corruption.')
3074 elif not merger.available:
3075 msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
3076 if not self.params.get('ignoreerrors'):
3077 self.report_error(f'{msg}. Aborting due to --abort-on-error')
3078 return
3079 self.report_warning(f'{msg}. The formats won\'t be merged')
3080
3081 if temp_filename == '-':
3082 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
3083 else 'but the formats are incompatible for simultaneous download' if merger.available
3084 else 'but ffmpeg is not installed')
3085 self.report_warning(
3086 f'You have requested downloading multiple formats to stdout {reason}. '
3087 'The formats will be streamed one after the other')
3088 fname = temp_filename
3089 for f in requested_formats:
3090 new_info = dict(info_dict)
3091 del new_info['requested_formats']
3092 new_info.update(f)
3093 if temp_filename != '-':
3094 fname = prepend_extension(
3095 correct_ext(temp_filename, new_info['ext']),
3096 'f%s' % f['format_id'], new_info['ext'])
3097 if not self._ensure_dir_exists(fname):
3098 return
3099 f['filepath'] = fname
3100 downloaded.append(fname)
3101 partial_success, real_download = self.dl(fname, new_info)
3102 info_dict['__real_download'] = info_dict['__real_download'] or real_download
3103 success = success and partial_success
3104
3105 if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
3106 info_dict['__postprocessors'].append(merger)
3107 info_dict['__files_to_merge'] = downloaded
3108 # Even if there were no downloads, it is being merged only now
3109 info_dict['__real_download'] = True
3110 else:
3111 for file in downloaded:
3112 files_to_move[file] = None
3113 else:
3114 # Just a single file
3115 dl_filename = existing_video_file(full_filename, temp_filename)
3116 if dl_filename is None or dl_filename == temp_filename:
3117 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
3118 # So we should try to resume the download
3119 success, real_download = self.dl(temp_filename, info_dict)
3120 info_dict['__real_download'] = real_download
3121 else:
3122 self.report_file_already_downloaded(dl_filename)
3123
3124 dl_filename = dl_filename or temp_filename
3125 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
3126
3127 except network_exceptions as err:
3128 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
3129 return
3130 except OSError as err:
3131 raise UnavailableVideoError(err)
3132 except (ContentTooShortError, ) as err:
3133 self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
3134 return
3135
3136 self._raise_pending_errors(info_dict)
3137 if success and full_filename != '-':
3138
3139 def fixup():
3140 do_fixup = True
3141 fixup_policy = self.params.get('fixup')
3142 vid = info_dict['id']
3143
3144 if fixup_policy in ('ignore', 'never'):
3145 return
3146 elif fixup_policy == 'warn':
3147 do_fixup = 'warn'
3148 elif fixup_policy != 'force':
3149 assert fixup_policy in ('detect_or_warn', None)
3150 if not info_dict.get('__real_download'):
3151 do_fixup = False
3152
3153 def ffmpeg_fixup(cndn, msg, cls):
3154 if not (do_fixup and cndn):
3155 return
3156 elif do_fixup == 'warn':
3157 self.report_warning(f'{vid}: {msg}')
3158 return
3159 pp = cls(self)
3160 if pp.available:
3161 info_dict['__postprocessors'].append(pp)
3162 else:
3163 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
3164
3165 stretched_ratio = info_dict.get('stretched_ratio')
3166 ffmpeg_fixup(
3167 stretched_ratio not in (1, None),
3168 f'Non-uniform pixel ratio {stretched_ratio}',
3169 FFmpegFixupStretchedPP)
3170
3171 ffmpeg_fixup(
3172 (info_dict.get('requested_formats') is None
3173 and info_dict.get('container') == 'm4a_dash'
3174 and info_dict.get('ext') == 'm4a'),
3175 'writing DASH m4a. Only some players support this container',
3176 FFmpegFixupM4aPP)
3177
3178 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
3179 downloader = downloader.FD_NAME if downloader else None
3180
3181 if info_dict.get('requested_formats') is None: # Not necessary if doing merger
3182 ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
3183 or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
3184 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3185 FFmpegFixupM3u8PP)
3186 ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
3187 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
3188
3189 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3190 ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
3191
3192 fixup()
3193 try:
3194 replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
3195 except PostProcessingError as err:
3196 self.report_error('Postprocessing: %s' % str(err))
3197 return
3198 try:
3199 for ph in self._post_hooks:
3200 ph(info_dict['filepath'])
3201 except Exception as err:
3202 self.report_error('post hooks: %s' % str(err))
3203 return
3204 info_dict['__write_download_archive'] = True
3205
3206 assert info_dict is original_infodict # Make sure the info_dict was modified in-place
3207 if self.params.get('force_write_download_archive'):
3208 info_dict['__write_download_archive'] = True
3209 check_max_downloads()
3210
3211 def __download_wrapper(self, func):
3212 @functools.wraps(func)
3213 def wrapper(*args, **kwargs):
3214 try:
3215 res = func(*args, **kwargs)
3216 except UnavailableVideoError as e:
3217 self.report_error(e)
3218 except DownloadCancelled as e:
3219 self.to_screen(f'[info] {e}')
3220 if not self.params.get('break_per_url'):
3221 raise
3222 else:
3223 if self.params.get('dump_single_json', False):
3224 self.post_extract(res)
3225 self.to_stdout(json.dumps(self.sanitize_info(res)))
3226 return wrapper
3227
3228 def download(self, url_list):
3229 """Download a given list of URLs."""
3230 url_list = variadic(url_list) # Passing a single URL is a common mistake
3231 outtmpl = self.params['outtmpl']['default']
3232 if (len(url_list) > 1
3233 and outtmpl != '-'
3234 and '%' not in outtmpl
3235 and self.params.get('max_downloads') != 1):
3236 raise SameFileError(outtmpl)
3237
3238 for url in url_list:
3239 self.__download_wrapper(self.extract_info)(
3240 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
3241
3242 return self._download_retcode
3243
3244 def download_with_info_file(self, info_filename):
3245 with contextlib.closing(fileinput.FileInput(
3246 [info_filename], mode='r',
3247 openhook=fileinput.hook_encoded('utf-8'))) as f:
3248 # FileInput doesn't have a read method, we can't call json.load
3249 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
3250 try:
3251 self.__download_wrapper(self.process_ie_result)(info, download=True)
3252 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3253 if not isinstance(e, EntryNotInPlaylist):
3254 self.to_stderr('\r')
3255 webpage_url = info.get('webpage_url')
3256 if webpage_url is not None:
3257 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
3258 return self.download([webpage_url])
3259 else:
3260 raise
3261 return self._download_retcode
3262
3263 @staticmethod
3264 def sanitize_info(info_dict, remove_private_keys=False):
3265 ''' Sanitize the infodict for converting to json '''
3266 if info_dict is None:
3267 return info_dict
3268 info_dict.setdefault('epoch', int(time.time()))
3269 info_dict.setdefault('_type', 'video')
3270
3271 if remove_private_keys:
3272 reject = lambda k, v: v is None or k.startswith('__') or k in {
3273 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
3274 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
3275 }
3276 else:
3277 reject = lambda k, v: False
3278
3279 def filter_fn(obj):
3280 if isinstance(obj, dict):
3281 return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
3282 elif isinstance(obj, (list, tuple, set, LazyList)):
3283 return list(map(filter_fn, obj))
3284 elif obj is None or isinstance(obj, (str, int, float, bool)):
3285 return obj
3286 else:
3287 return repr(obj)
3288
3289 return filter_fn(info_dict)
3290
3291 @staticmethod
3292 def filter_requested_info(info_dict, actually_filter=True):
3293 ''' Alias of sanitize_info for backward compatibility '''
3294 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3295
3296 def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
3297 for filename in set(filter(None, files_to_delete)):
3298 if msg:
3299 self.to_screen(msg % filename)
3300 try:
3301 os.remove(filename)
3302 except OSError:
3303 self.report_warning(f'Unable to delete file {filename}')
3304 if filename in info.get('__files_to_move', []): # NB: Delete even if None
3305 del info['__files_to_move'][filename]
3306
3307 @staticmethod
3308 def post_extract(info_dict):
3309 def actual_post_extract(info_dict):
3310 if info_dict.get('_type') in ('playlist', 'multi_video'):
3311 for video_dict in info_dict.get('entries', {}):
3312 actual_post_extract(video_dict or {})
3313 return
3314
3315 post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
3316 info_dict.update(post_extractor())
3317
3318 actual_post_extract(info_dict or {})
3319
3320 def run_pp(self, pp, infodict):
3321 files_to_delete = []
3322 if '__files_to_move' not in infodict:
3323 infodict['__files_to_move'] = {}
3324 try:
3325 files_to_delete, infodict = pp.run(infodict)
3326 except PostProcessingError as e:
3327 # Must be True and not 'only_download'
3328 if self.params.get('ignoreerrors') is True:
3329 self.report_error(e)
3330 return infodict
3331 raise
3332
3333 if not files_to_delete:
3334 return infodict
3335 if self.params.get('keepvideo', False):
3336 for f in files_to_delete:
3337 infodict['__files_to_move'].setdefault(f, '')
3338 else:
3339 self._delete_downloaded_files(
3340 *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
3341 return infodict
3342
3343 def run_all_pps(self, key, info, *, additional_pps=None):
3344 self._forceprint(key, info)
3345 for pp in (additional_pps or []) + self._pps[key]:
3346 info = self.run_pp(pp, info)
3347 return info
3348
3349 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
3350 info = dict(ie_info)
3351 info['__files_to_move'] = files_to_move or {}
3352 try:
3353 info = self.run_all_pps(key, info)
3354 except PostProcessingError as err:
3355 msg = f'Preprocessing: {err}'
3356 info.setdefault('__pending_error', msg)
3357 self.report_error(msg, is_error=False)
3358 return info, info.pop('__files_to_move', None)
3359
3360 def post_process(self, filename, info, files_to_move=None):
3361 """Run all the postprocessors on the given file."""
3362 info['filepath'] = filename
3363 info['__files_to_move'] = files_to_move or {}
3364 info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
3365 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3366 del info['__files_to_move']
3367 return self.run_all_pps('after_move', info)
3368
3369 def _make_archive_id(self, info_dict):
3370 video_id = info_dict.get('id')
3371 if not video_id:
3372 return
3373 # Future-proof against any change in case
3374 # and backwards compatibility with prior versions
3375 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
3376 if extractor is None:
3377 url = str_or_none(info_dict.get('url'))
3378 if not url:
3379 return
3380 # Try to find matching extractor for the URL and take its ie_key
3381 for ie_key, ie in self._ies.items():
3382 if ie.suitable(url):
3383 extractor = ie_key
3384 break
3385 else:
3386 return
3387 return f'{extractor.lower()} {video_id}'
3388
3389 def in_download_archive(self, info_dict):
3390 fn = self.params.get('download_archive')
3391 if fn is None:
3392 return False
3393
3394 vid_id = self._make_archive_id(info_dict)
3395 if not vid_id:
3396 return False # Incomplete video information
3397
3398 return vid_id in self.archive
3399
3400 def record_download_archive(self, info_dict):
3401 fn = self.params.get('download_archive')
3402 if fn is None:
3403 return
3404 vid_id = self._make_archive_id(info_dict)
3405 assert vid_id
3406 self.write_debug(f'Adding to archive: {vid_id}')
3407 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3408 archive_file.write(vid_id + '\n')
3409 self.archive.add(vid_id)
3410
3411 @staticmethod
3412 def format_resolution(format, default='unknown'):
3413 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
3414 return 'audio only'
3415 if format.get('resolution') is not None:
3416 return format['resolution']
3417 if format.get('width') and format.get('height'):
3418 return '%dx%d' % (format['width'], format['height'])
3419 elif format.get('height'):
3420 return '%sp' % format['height']
3421 elif format.get('width'):
3422 return '%dx?' % format['width']
3423 return default
3424
3425 def _list_format_headers(self, *headers):
3426 if self.params.get('listformats_table', True) is not False:
3427 return [self._format_out(header, self.Styles.HEADERS) for header in headers]
3428 return headers
3429
3430 def _format_note(self, fdict):
3431 res = ''
3432 if fdict.get('ext') in ['f4f', 'f4m']:
3433 res += '(unsupported)'
3434 if fdict.get('language'):
3435 if res:
3436 res += ' '
3437 res += '[%s]' % fdict['language']
3438 if fdict.get('format_note') is not None:
3439 if res:
3440 res += ' '
3441 res += fdict['format_note']
3442 if fdict.get('tbr') is not None:
3443 if res:
3444 res += ', '
3445 res += '%4dk' % fdict['tbr']
3446 if fdict.get('container') is not None:
3447 if res:
3448 res += ', '
3449 res += '%s container' % fdict['container']
3450 if (fdict.get('vcodec') is not None
3451 and fdict.get('vcodec') != 'none'):
3452 if res:
3453 res += ', '
3454 res += fdict['vcodec']
3455 if fdict.get('vbr') is not None:
3456 res += '@'
3457 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3458 res += 'video@'
3459 if fdict.get('vbr') is not None:
3460 res += '%4dk' % fdict['vbr']
3461 if fdict.get('fps') is not None:
3462 if res:
3463 res += ', '
3464 res += '%sfps' % fdict['fps']
3465 if fdict.get('acodec') is not None:
3466 if res:
3467 res += ', '
3468 if fdict['acodec'] == 'none':
3469 res += 'video only'
3470 else:
3471 res += '%-5s' % fdict['acodec']
3472 elif fdict.get('abr') is not None:
3473 if res:
3474 res += ', '
3475 res += 'audio'
3476 if fdict.get('abr') is not None:
3477 res += '@%3dk' % fdict['abr']
3478 if fdict.get('asr') is not None:
3479 res += ' (%5dHz)' % fdict['asr']
3480 if fdict.get('filesize') is not None:
3481 if res:
3482 res += ', '
3483 res += format_bytes(fdict['filesize'])
3484 elif fdict.get('filesize_approx') is not None:
3485 if res:
3486 res += ', '
3487 res += '~' + format_bytes(fdict['filesize_approx'])
3488 return res
3489
3490 def render_formats_table(self, info_dict):
3491 if not info_dict.get('formats') and not info_dict.get('url'):
3492 return None
3493
3494 formats = info_dict.get('formats', [info_dict])
3495 if not self.params.get('listformats_table', True) is not False:
3496 table = [
3497 [
3498 format_field(f, 'format_id'),
3499 format_field(f, 'ext'),
3500 self.format_resolution(f),
3501 self._format_note(f)
3502 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3503 return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
3504
3505 delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
3506 table = [
3507 [
3508 self._format_out(format_field(f, 'format_id'), self.Styles.ID),
3509 format_field(f, 'ext'),
3510 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
3511 format_field(f, 'fps', '\t%d'),
3512 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3513 delim,
3514 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
3515 format_field(f, 'tbr', '\t%dk'),
3516 shorten_protocol_name(f.get('protocol', '')),
3517 delim,
3518 format_field(f, 'vcodec', default='unknown').replace(
3519 'none', 'images' if f.get('acodec') == 'none'
3520 else self._format_out('audio only', self.Styles.SUPPRESS)),
3521 format_field(f, 'vbr', '\t%dk'),
3522 format_field(f, 'acodec', default='unknown').replace(
3523 'none', '' if f.get('vcodec') == 'none'
3524 else self._format_out('video only', self.Styles.SUPPRESS)),
3525 format_field(f, 'abr', '\t%dk'),
3526 format_field(f, 'asr', '\t%dHz'),
3527 join_nonempty(
3528 self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
3529 format_field(f, 'language', '[%s]'),
3530 join_nonempty(format_field(f, 'format_note'),
3531 format_field(f, 'container', ignore=(None, f.get('ext'))),
3532 delim=', '),
3533 delim=' '),
3534 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3535 header_line = self._list_format_headers(
3536 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3537 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3538
3539 return render_table(
3540 header_line, table, hide_empty=True,
3541 delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
3542
3543 def render_thumbnails_table(self, info_dict):
3544 thumbnails = list(info_dict.get('thumbnails') or [])
3545 if not thumbnails:
3546 return None
3547 return render_table(
3548 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
3549 [[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
3550
3551 def render_subtitles_table(self, video_id, subtitles):
3552 def _row(lang, formats):
3553 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
3554 if len(set(names)) == 1:
3555 names = [] if names[0] == 'unknown' else names[:1]
3556 return [lang, ', '.join(names), ', '.join(exts)]
3557
3558 if not subtitles:
3559 return None
3560 return render_table(
3561 self._list_format_headers('Language', 'Name', 'Formats'),
3562 [_row(lang, formats) for lang, formats in subtitles.items()],
3563 hide_empty=True)
3564
3565 def __list_table(self, video_id, name, func, *args):
3566 table = func(*args)
3567 if not table:
3568 self.to_screen(f'{video_id} has no {name}')
3569 return
3570 self.to_screen(f'[info] Available {name} for {video_id}:')
3571 self.to_stdout(table)
3572
3573 def list_formats(self, info_dict):
3574 self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
3575
3576 def list_thumbnails(self, info_dict):
3577 self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
3578
3579 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3580 self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
3581
3582 def urlopen(self, req):
3583 """ Start an HTTP download """
3584 if isinstance(req, str):
3585 req = sanitized_Request(req)
3586 return self._opener.open(req, timeout=self._socket_timeout)
3587
3588 def print_debug_header(self):
3589 if not self.params.get('verbose'):
3590 return
3591
3592 # These imports can be slow. So import them only as needed
3593 from .extractor.extractors import _LAZY_LOADER
3594 from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
3595
3596 def get_encoding(stream):
3597 ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
3598 if not supports_terminal_sequences(stream):
3599 from .utils import WINDOWS_VT_MODE # Must be imported locally
3600 ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
3601 return ret
3602
3603 encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
3604 locale.getpreferredencoding(),
3605 sys.getfilesystemencoding(),
3606 self.get_encoding(),
3607 ', '.join(
3608 f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
3609 if stream is not None and key != 'console')
3610 )
3611
3612 logger = self.params.get('logger')
3613 if logger:
3614 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3615 write_debug(encoding_str)
3616 else:
3617 write_string(f'[debug] {encoding_str}\n', encoding=None)
3618 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
3619
3620 source = detect_variant()
3621 write_debug(join_nonempty(
3622 'yt-dlp version', __version__,
3623 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3624 '' if source == 'unknown' else f'({source})',
3625 delim=' '))
3626 if not _LAZY_LOADER:
3627 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
3628 write_debug('Lazy loading extractors is forcibly disabled')
3629 else:
3630 write_debug('Lazy loading extractors is disabled')
3631 if plugin_extractors or plugin_postprocessors:
3632 write_debug('Plugins: %s' % [
3633 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3634 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
3635 if self.params['compat_opts']:
3636 write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
3637
3638 if source == 'source':
3639 try:
3640 stdout, _, _ = Popen.run(
3641 ['git', 'rev-parse', '--short', 'HEAD'],
3642 text=True, cwd=os.path.dirname(os.path.abspath(__file__)),
3643 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3644 if re.fullmatch('[0-9a-f]+', stdout.strip()):
3645 write_debug(f'Git HEAD: {stdout.strip()}')
3646 except Exception:
3647 with contextlib.suppress(Exception):
3648 sys.exc_clear()
3649
3650 def python_implementation():
3651 impl_name = platform.python_implementation()
3652 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
3653 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
3654 return impl_name
3655
3656 write_debug('Python version %s (%s %s) - %s' % (
3657 platform.python_version(),
3658 python_implementation(),
3659 platform.architecture()[0],
3660 platform_name()))
3661
3662 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3663 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3664 if ffmpeg_features:
3665 exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
3666
3667 exe_versions['rtmpdump'] = rtmpdump_version()
3668 exe_versions['phantomjs'] = PhantomJSwrapper._version()
3669 exe_str = ', '.join(
3670 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3671 ) or 'none'
3672 write_debug('exe versions: %s' % exe_str)
3673
3674 from .compat.compat_utils import get_package_info
3675 from .dependencies import available_dependencies
3676
3677 write_debug('Optional libraries: %s' % (', '.join(sorted({
3678 join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
3679 })) or 'none'))
3680
3681 self._setup_opener()
3682 proxy_map = {}
3683 for handler in self._opener.handlers:
3684 if hasattr(handler, 'proxies'):
3685 proxy_map.update(handler.proxies)
3686 write_debug(f'Proxy map: {proxy_map}')
3687
3688 # Not implemented
3689 if False and self.params.get('call_home'):
3690 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
3691 write_debug('Public IP address: %s' % ipaddr)
3692 latest_version = self.urlopen(
3693 'https://yt-dl.org/latest/version').read().decode()
3694 if version_tuple(latest_version) > version_tuple(__version__):
3695 self.report_warning(
3696 'You are using an outdated version (newest version: %s)! '
3697 'See https://yt-dl.org/update if you need help updating.' %
3698 latest_version)
3699
3700 def _setup_opener(self):
3701 if hasattr(self, '_opener'):
3702 return
3703 timeout_val = self.params.get('socket_timeout')
3704 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
3705
3706 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
3707 opts_cookiefile = self.params.get('cookiefile')
3708 opts_proxy = self.params.get('proxy')
3709
3710 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
3711
3712 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
3713 if opts_proxy is not None:
3714 if opts_proxy == '':
3715 proxies = {}
3716 else:
3717 proxies = {'http': opts_proxy, 'https': opts_proxy}
3718 else:
3719 proxies = urllib.request.getproxies()
3720 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3721 if 'http' in proxies and 'https' not in proxies:
3722 proxies['https'] = proxies['http']
3723 proxy_handler = PerRequestProxyHandler(proxies)
3724
3725 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
3726 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3727 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
3728 redirect_handler = YoutubeDLRedirectHandler()
3729 data_handler = urllib.request.DataHandler()
3730
3731 # When passing our own FileHandler instance, build_opener won't add the
3732 # default FileHandler and allows us to disable the file protocol, which
3733 # can be used for malicious purposes (see
3734 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3735 file_handler = urllib.request.FileHandler()
3736
3737 def file_open(*args, **kwargs):
3738 raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3739 file_handler.file_open = file_open
3740
3741 opener = urllib.request.build_opener(
3742 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
3743
3744 # Delete the default user-agent header, which would otherwise apply in
3745 # cases where our custom HTTP handler doesn't come into play
3746 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3747 opener.addheaders = []
3748 self._opener = opener
3749
3750 def encode(self, s):
3751 if isinstance(s, bytes):
3752 return s # Already encoded
3753
3754 try:
3755 return s.encode(self.get_encoding())
3756 except UnicodeEncodeError as err:
3757 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3758 raise
3759
3760 def get_encoding(self):
3761 encoding = self.params.get('encoding')
3762 if encoding is None:
3763 encoding = preferredencoding()
3764 return encoding
3765
3766 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
3767 ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
3768 if overwrite is None:
3769 overwrite = self.params.get('overwrites', True)
3770 if not self.params.get('writeinfojson'):
3771 return False
3772 elif not infofn:
3773 self.write_debug(f'Skipping writing {label} infojson')
3774 return False
3775 elif not self._ensure_dir_exists(infofn):
3776 return None
3777 elif not overwrite and os.path.exists(infofn):
3778 self.to_screen(f'[info] {label.title()} metadata is already present')
3779 return 'exists'
3780
3781 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3782 try:
3783 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3784 return True
3785 except OSError:
3786 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3787 return None
3788
3789 def _write_description(self, label, ie_result, descfn):
3790 ''' Write description and returns True = written, False = skip, None = error '''
3791 if not self.params.get('writedescription'):
3792 return False
3793 elif not descfn:
3794 self.write_debug(f'Skipping writing {label} description')
3795 return False
3796 elif not self._ensure_dir_exists(descfn):
3797 return None
3798 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3799 self.to_screen(f'[info] {label.title()} description is already present')
3800 elif ie_result.get('description') is None:
3801 self.report_warning(f'There\'s no {label} description to write')
3802 return False
3803 else:
3804 try:
3805 self.to_screen(f'[info] Writing {label} description to: {descfn}')
3806 with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
3807 descfile.write(ie_result['description'])
3808 except OSError:
3809 self.report_error(f'Cannot write {label} description file {descfn}')
3810 return None
3811 return True
3812
3813 def _write_subtitles(self, info_dict, filename):
3814 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3815 ret = []
3816 subtitles = info_dict.get('requested_subtitles')
3817 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3818 # subtitles download errors are already managed as troubles in relevant IE
3819 # that way it will silently go on when used with unsupporting IE
3820 return ret
3821
3822 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3823 if not sub_filename_base:
3824 self.to_screen('[info] Skipping writing video subtitles')
3825 return ret
3826 for sub_lang, sub_info in subtitles.items():
3827 sub_format = sub_info['ext']
3828 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3829 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
3830 existing_sub = self.existing_file((sub_filename_final, sub_filename))
3831 if existing_sub:
3832 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3833 sub_info['filepath'] = existing_sub
3834 ret.append((existing_sub, sub_filename_final))
3835 continue
3836
3837 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3838 if sub_info.get('data') is not None:
3839 try:
3840 # Use newline='' to prevent conversion of newline characters
3841 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3842 with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
3843 subfile.write(sub_info['data'])
3844 sub_info['filepath'] = sub_filename
3845 ret.append((sub_filename, sub_filename_final))
3846 continue
3847 except OSError:
3848 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3849 return None
3850
3851 try:
3852 sub_copy = sub_info.copy()
3853 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3854 self.dl(sub_filename, sub_copy, subtitle=True)
3855 sub_info['filepath'] = sub_filename
3856 ret.append((sub_filename, sub_filename_final))
3857 except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
3858 msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
3859 if self.params.get('ignoreerrors') is not True: # False or 'only_download'
3860 if not self.params.get('ignoreerrors'):
3861 self.report_error(msg)
3862 raise DownloadError(msg)
3863 self.report_warning(msg)
3864 return ret
3865
3866 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3867 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3868 write_all = self.params.get('write_all_thumbnails', False)
3869 thumbnails, ret = [], []
3870 if write_all or self.params.get('writethumbnail', False):
3871 thumbnails = info_dict.get('thumbnails') or []
3872 multiple = write_all and len(thumbnails) > 1
3873
3874 if thumb_filename_base is None:
3875 thumb_filename_base = filename
3876 if thumbnails and not thumb_filename_base:
3877 self.write_debug(f'Skipping writing {label} thumbnail')
3878 return ret
3879
3880 for idx, t in list(enumerate(thumbnails))[::-1]:
3881 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
3882 thumb_display_id = f'{label} thumbnail {t["id"]}'
3883 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3884 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
3885
3886 existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
3887 if existing_thumb:
3888 self.to_screen('[info] %s is already present' % (
3889 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
3890 t['filepath'] = existing_thumb
3891 ret.append((existing_thumb, thumb_filename_final))
3892 else:
3893 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
3894 try:
3895 uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
3896 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
3897 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
3898 shutil.copyfileobj(uf, thumbf)
3899 ret.append((thumb_filename, thumb_filename_final))
3900 t['filepath'] = thumb_filename
3901 except network_exceptions as err:
3902 thumbnails.pop(idx)
3903 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
3904 if ret and not write_all:
3905 break
3906 return ret