]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
[YoutubeDL] Write verbose header to logger
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 #!/usr/bin/env python3
2 # coding: utf-8
3
4 from __future__ import absolute_import, unicode_literals
5
6 import collections
7 import contextlib
8 import copy
9 import datetime
10 import errno
11 import fileinput
12 import functools
13 import io
14 import itertools
15 import json
16 import locale
17 import operator
18 import os
19 import platform
20 import re
21 import shutil
22 import subprocess
23 import sys
24 import tempfile
25 import time
26 import tokenize
27 import traceback
28 import random
29 import unicodedata
30
31 from string import ascii_letters
32
33 from .compat import (
34 compat_basestring,
35 compat_get_terminal_size,
36 compat_kwargs,
37 compat_numeric_types,
38 compat_os_name,
39 compat_pycrypto_AES,
40 compat_shlex_quote,
41 compat_str,
42 compat_tokenize_tokenize,
43 compat_urllib_error,
44 compat_urllib_request,
45 compat_urllib_request_DataHandler,
46 windows_enable_vt_mode,
47 )
48 from .cookies import load_cookies
49 from .utils import (
50 age_restricted,
51 args_to_str,
52 ContentTooShortError,
53 date_from_str,
54 DateRange,
55 DEFAULT_OUTTMPL,
56 determine_ext,
57 determine_protocol,
58 DOT_DESKTOP_LINK_TEMPLATE,
59 DOT_URL_LINK_TEMPLATE,
60 DOT_WEBLOC_LINK_TEMPLATE,
61 DownloadError,
62 encode_compat_str,
63 encodeFilename,
64 EntryNotInPlaylist,
65 error_to_compat_str,
66 ExistingVideoReached,
67 expand_path,
68 ExtractorError,
69 float_or_none,
70 format_bytes,
71 format_field,
72 formatSeconds,
73 GeoRestrictedError,
74 HEADRequest,
75 int_or_none,
76 iri_to_uri,
77 ISO3166Utils,
78 LazyList,
79 locked_file,
80 make_dir,
81 make_HTTPS_handler,
82 MaxDownloadsReached,
83 network_exceptions,
84 orderedSet,
85 OUTTMPL_TYPES,
86 PagedList,
87 parse_filesize,
88 PerRequestProxyHandler,
89 platform_name,
90 PostProcessingError,
91 preferredencoding,
92 prepend_extension,
93 process_communicate_or_kill,
94 register_socks_protocols,
95 RejectedVideoReached,
96 render_table,
97 replace_extension,
98 SameFileError,
99 sanitize_filename,
100 sanitize_path,
101 sanitize_url,
102 sanitized_Request,
103 std_headers,
104 STR_FORMAT_RE_TMPL,
105 STR_FORMAT_TYPES,
106 str_or_none,
107 strftime_or_none,
108 subtitles_filename,
109 supports_terminal_sequences,
110 TERMINAL_SEQUENCES,
111 ThrottledDownload,
112 to_high_limit_path,
113 traverse_obj,
114 try_get,
115 UnavailableVideoError,
116 url_basename,
117 variadic,
118 version_tuple,
119 write_json_file,
120 write_string,
121 YoutubeDLCookieProcessor,
122 YoutubeDLHandler,
123 YoutubeDLRedirectHandler,
124 )
125 from .cache import Cache
126 from .extractor import (
127 gen_extractor_classes,
128 get_info_extractor,
129 _LAZY_LOADER,
130 _PLUGIN_CLASSES as plugin_extractors
131 )
132 from .extractor.openload import PhantomJSwrapper
133 from .downloader import (
134 FFmpegFD,
135 get_suitable_downloader,
136 shorten_protocol_name
137 )
138 from .downloader.rtmp import rtmpdump_version
139 from .postprocessor import (
140 get_postprocessor,
141 EmbedThumbnailPP,
142 FFmpegFixupDurationPP,
143 FFmpegFixupM3u8PP,
144 FFmpegFixupM4aPP,
145 FFmpegFixupStretchedPP,
146 FFmpegFixupTimestampPP,
147 FFmpegMergerPP,
148 FFmpegPostProcessor,
149 MoveFilesAfterDownloadPP,
150 _PLUGIN_CLASSES as plugin_postprocessors
151 )
152 from .update import detect_variant
153 from .version import __version__
154
155 if compat_os_name == 'nt':
156 import ctypes
157
158
159 class YoutubeDL(object):
160 """YoutubeDL class.
161
162 YoutubeDL objects are the ones responsible of downloading the
163 actual video file and writing it to disk if the user has requested
164 it, among some other tasks. In most cases there should be one per
165 program. As, given a video URL, the downloader doesn't know how to
166 extract all the needed information, task that InfoExtractors do, it
167 has to pass the URL to one of them.
168
169 For this, YoutubeDL objects have a method that allows
170 InfoExtractors to be registered in a given order. When it is passed
171 a URL, the YoutubeDL object handles it to the first InfoExtractor it
172 finds that reports being able to handle it. The InfoExtractor extracts
173 all the information about the video or videos the URL refers to, and
174 YoutubeDL process the extracted information, possibly using a File
175 Downloader to download the video.
176
177 YoutubeDL objects accept a lot of parameters. In order not to saturate
178 the object constructor with arguments, it receives a dictionary of
179 options instead. These options are available through the params
180 attribute for the InfoExtractors to use. The YoutubeDL also
181 registers itself as the downloader in charge for the InfoExtractors
182 that are added to it, so this is a "mutual registration".
183
184 Available options:
185
186 username: Username for authentication purposes.
187 password: Password for authentication purposes.
188 videopassword: Password for accessing a video.
189 ap_mso: Adobe Pass multiple-system operator identifier.
190 ap_username: Multiple-system operator account username.
191 ap_password: Multiple-system operator account password.
192 usenetrc: Use netrc for authentication instead.
193 verbose: Print additional info to stdout.
194 quiet: Do not print messages to stdout.
195 no_warnings: Do not print out anything for warnings.
196 forceprint: A list of templates to force print
197 forceurl: Force printing final URL. (Deprecated)
198 forcetitle: Force printing title. (Deprecated)
199 forceid: Force printing ID. (Deprecated)
200 forcethumbnail: Force printing thumbnail URL. (Deprecated)
201 forcedescription: Force printing description. (Deprecated)
202 forcefilename: Force printing final filename. (Deprecated)
203 forceduration: Force printing duration. (Deprecated)
204 forcejson: Force printing info_dict as JSON.
205 dump_single_json: Force printing the info_dict of the whole playlist
206 (or video) as a single JSON line.
207 force_write_download_archive: Force writing download archive regardless
208 of 'skip_download' or 'simulate'.
209 simulate: Do not download the video files. If unset (or None),
210 simulate only if listsubtitles, listformats or list_thumbnails is used
211 format: Video format code. see "FORMAT SELECTION" for more details.
212 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
213 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
214 extracting metadata even if the video is not actually
215 available for download (experimental)
216 format_sort: How to sort the video formats. see "Sorting Formats"
217 for more details.
218 format_sort_force: Force the given format_sort. see "Sorting Formats"
219 for more details.
220 allow_multiple_video_streams: Allow multiple video streams to be merged
221 into a single file
222 allow_multiple_audio_streams: Allow multiple audio streams to be merged
223 into a single file
224 check_formats Whether to test if the formats are downloadable.
225 Can be True (check all), False (check none)
226 or None (check only if requested by extractor)
227 paths: Dictionary of output paths. The allowed keys are 'home'
228 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
229 outtmpl: Dictionary of templates for output names. Allowed keys
230 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
231 For compatibility with youtube-dl, a single string can also be used
232 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
233 restrictfilenames: Do not allow "&" and spaces in file names
234 trim_file_name: Limit length of filename (extension excluded)
235 windowsfilenames: Force the filenames to be windows compatible
236 ignoreerrors: Do not stop on download/postprocessing errors.
237 Can be 'only_download' to ignore only download errors.
238 Default is 'only_download' for CLI, but False for API
239 skip_playlist_after_errors: Number of allowed failures until the rest of
240 the playlist is skipped
241 force_generic_extractor: Force downloader to use the generic extractor
242 overwrites: Overwrite all video and metadata files if True,
243 overwrite only non-video files if None
244 and don't overwrite any file if False
245 For compatibility with youtube-dl,
246 "nooverwrites" may also be used instead
247 playliststart: Playlist item to start at.
248 playlistend: Playlist item to end at.
249 playlist_items: Specific indices of playlist to download.
250 playlistreverse: Download playlist items in reverse order.
251 playlistrandom: Download playlist items in random order.
252 matchtitle: Download only matching titles.
253 rejecttitle: Reject downloads for matching titles.
254 logger: Log messages to a logging.Logger instance.
255 logtostderr: Log messages to stderr instead of stdout.
256 consoletitle: Display progress in console window's titlebar.
257 writedescription: Write the video description to a .description file
258 writeinfojson: Write the video description to a .info.json file
259 clean_infojson: Remove private fields from the infojson
260 getcomments: Extract video comments. This will not be written to disk
261 unless writeinfojson is also given
262 writeannotations: Write the video annotations to a .annotations.xml file
263 writethumbnail: Write the thumbnail image to a file
264 allow_playlist_files: Whether to write playlists' description, infojson etc
265 also to disk when using the 'write*' options
266 write_all_thumbnails: Write all thumbnail formats to files
267 writelink: Write an internet shortcut file, depending on the
268 current platform (.url/.webloc/.desktop)
269 writeurllink: Write a Windows internet shortcut file (.url)
270 writewebloclink: Write a macOS internet shortcut file (.webloc)
271 writedesktoplink: Write a Linux internet shortcut file (.desktop)
272 writesubtitles: Write the video subtitles to a file
273 writeautomaticsub: Write the automatically generated subtitles to a file
274 allsubtitles: Deprecated - Use subtitleslangs = ['all']
275 Downloads all the subtitles of the video
276 (requires writesubtitles or writeautomaticsub)
277 listsubtitles: Lists all available subtitles for the video
278 subtitlesformat: The format code for subtitles
279 subtitleslangs: List of languages of the subtitles to download (can be regex).
280 The list may contain "all" to refer to all the available
281 subtitles. The language can be prefixed with a "-" to
282 exclude it from the requested languages. Eg: ['all', '-live_chat']
283 keepvideo: Keep the video file after post-processing
284 daterange: A DateRange object, download only if the upload_date is in the range.
285 skip_download: Skip the actual download of the video file
286 cachedir: Location of the cache files in the filesystem.
287 False to disable filesystem cache.
288 noplaylist: Download single video instead of a playlist if in doubt.
289 age_limit: An integer representing the user's age in years.
290 Unsuitable videos for the given age are skipped.
291 min_views: An integer representing the minimum view count the video
292 must have in order to not be skipped.
293 Videos without view count information are always
294 downloaded. None for no limit.
295 max_views: An integer representing the maximum view count.
296 Videos that are more popular than that are not
297 downloaded.
298 Videos without view count information are always
299 downloaded. None for no limit.
300 download_archive: File name of a file where all downloads are recorded.
301 Videos already present in the file are not downloaded
302 again.
303 break_on_existing: Stop the download process after attempting to download a
304 file that is in the archive.
305 break_on_reject: Stop the download process when encountering a video that
306 has been filtered out.
307 cookiefile: File name where cookies should be read from and dumped to
308 cookiesfrombrowser: A tuple containing the name of the browser and the profile
309 name/path from where cookies are loaded.
310 Eg: ('chrome', ) or (vivaldi, 'default')
311 nocheckcertificate:Do not verify SSL certificates
312 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
313 At the moment, this is only supported by YouTube.
314 proxy: URL of the proxy server to use
315 geo_verification_proxy: URL of the proxy to use for IP address verification
316 on geo-restricted sites.
317 socket_timeout: Time to wait for unresponsive hosts, in seconds
318 bidi_workaround: Work around buggy terminals without bidirectional text
319 support, using fridibi
320 debug_printtraffic:Print out sent and received HTTP traffic
321 include_ads: Download ads as well
322 default_search: Prepend this string if an input url is not valid.
323 'auto' for elaborate guessing
324 encoding: Use this encoding instead of the system-specified.
325 extract_flat: Do not resolve URLs, return the immediate result.
326 Pass in 'in_playlist' to only show this behavior for
327 playlist items.
328 postprocessors: A list of dictionaries, each with an entry
329 * key: The name of the postprocessor. See
330 yt_dlp/postprocessor/__init__.py for a list.
331 * when: When to run the postprocessor. Can be one of
332 pre_process|before_dl|post_process|after_move.
333 Assumed to be 'post_process' if not given
334 post_hooks: Deprecated - Register a custom postprocessor instead
335 A list of functions that get called as the final step
336 for each video file, after all postprocessors have been
337 called. The filename will be passed as the only argument.
338 progress_hooks: A list of functions that get called on download
339 progress, with a dictionary with the entries
340 * status: One of "downloading", "error", or "finished".
341 Check this first and ignore unknown values.
342 * info_dict: The extracted info_dict
343
344 If status is one of "downloading", or "finished", the
345 following properties may also be present:
346 * filename: The final filename (always present)
347 * tmpfilename: The filename we're currently writing to
348 * downloaded_bytes: Bytes on disk
349 * total_bytes: Size of the whole file, None if unknown
350 * total_bytes_estimate: Guess of the eventual file size,
351 None if unavailable.
352 * elapsed: The number of seconds since download started.
353 * eta: The estimated time in seconds, None if unknown
354 * speed: The download speed in bytes/second, None if
355 unknown
356 * fragment_index: The counter of the currently
357 downloaded video fragment.
358 * fragment_count: The number of fragments (= individual
359 files that will be merged)
360
361 Progress hooks are guaranteed to be called at least once
362 (with status "finished") if the download is successful.
363 postprocessor_hooks: A list of functions that get called on postprocessing
364 progress, with a dictionary with the entries
365 * status: One of "started", "processing", or "finished".
366 Check this first and ignore unknown values.
367 * postprocessor: Name of the postprocessor
368 * info_dict: The extracted info_dict
369
370 Progress hooks are guaranteed to be called at least twice
371 (with status "started" and "finished") if the processing is successful.
372 merge_output_format: Extension to use when merging formats.
373 final_ext: Expected final extension; used to detect when the file was
374 already downloaded and converted. "merge_output_format" is
375 replaced by this extension when given
376 fixup: Automatically correct known faults of the file.
377 One of:
378 - "never": do nothing
379 - "warn": only emit a warning
380 - "detect_or_warn": check whether we can do anything
381 about it, warn otherwise (default)
382 source_address: Client-side IP address to bind to.
383 call_home: Boolean, true iff we are allowed to contact the
384 yt-dlp servers for debugging. (BROKEN)
385 sleep_interval_requests: Number of seconds to sleep between requests
386 during extraction
387 sleep_interval: Number of seconds to sleep before each download when
388 used alone or a lower bound of a range for randomized
389 sleep before each download (minimum possible number
390 of seconds to sleep) when used along with
391 max_sleep_interval.
392 max_sleep_interval:Upper bound of a range for randomized sleep before each
393 download (maximum possible number of seconds to sleep).
394 Must only be used along with sleep_interval.
395 Actual sleep time will be a random float from range
396 [sleep_interval; max_sleep_interval].
397 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
398 listformats: Print an overview of available video formats and exit.
399 list_thumbnails: Print a table of all thumbnails and exit.
400 match_filter: A function that gets called with the info_dict of
401 every video.
402 If it returns a message, the video is ignored.
403 If it returns None, the video is downloaded.
404 match_filter_func in utils.py is one example for this.
405 no_color: Do not emit color codes in output.
406 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
407 HTTP header
408 geo_bypass_country:
409 Two-letter ISO 3166-2 country code that will be used for
410 explicit geographic restriction bypassing via faking
411 X-Forwarded-For HTTP header
412 geo_bypass_ip_block:
413 IP range in CIDR notation that will be used similarly to
414 geo_bypass_country
415
416 The following options determine which downloader is picked:
417 external_downloader: A dictionary of protocol keys and the executable of the
418 external downloader to use for it. The allowed protocols
419 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
420 Set the value to 'native' to use the native downloader
421 hls_prefer_native: Deprecated - Use external_downloader = {'m3u8': 'native'}
422 or {'m3u8': 'ffmpeg'} instead.
423 Use the native HLS downloader instead of ffmpeg/avconv
424 if True, otherwise use ffmpeg/avconv if False, otherwise
425 use downloader suggested by extractor if None.
426 compat_opts: Compatibility options. See "Differences in default behavior".
427 The following options do not work when used through the API:
428 filename, abort-on-error, multistreams, no-live-chat, format-sort
429 no-clean-infojson, no-playlist-metafiles, no-keep-subs.
430 Refer __init__.py for their implementation
431 progress_template: Dictionary of templates for progress outputs.
432 Allowed keys are 'download', 'postprocess',
433 'download-title' (console title) and 'postprocess-title'.
434 The template is mapped on a dictionary with keys 'progress' and 'info'
435
436 The following parameters are not used by YoutubeDL itself, they are used by
437 the downloader (see yt_dlp/downloader/common.py):
438 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
439 max_filesize, test, noresizebuffer, retries, fragment_retries, continuedl,
440 noprogress, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
441 external_downloader_args.
442
443 The following options are used by the post processors:
444 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
445 otherwise prefer ffmpeg. (avconv support is deprecated)
446 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
447 to the binary or its containing directory.
448 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
449 and a list of additional command-line arguments for the
450 postprocessor/executable. The dict can also have "PP+EXE" keys
451 which are used when the given exe is used by the given PP.
452 Use 'default' as the name for arguments to passed to all PP
453 For compatibility with youtube-dl, a single list of args
454 can also be used
455
456 The following options are used by the extractors:
457 extractor_retries: Number of times to retry for known errors
458 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
459 hls_split_discontinuity: Split HLS playlists to different formats at
460 discontinuities such as ad breaks (default: False)
461 extractor_args: A dictionary of arguments to be passed to the extractors.
462 See "EXTRACTOR ARGUMENTS" for details.
463 Eg: {'youtube': {'skip': ['dash', 'hls']}}
464 youtube_include_dash_manifest: Deprecated - Use extractor_args instead.
465 If True (default), DASH manifests and related
466 data will be downloaded and processed by extractor.
467 You can reduce network I/O by disabling it if you don't
468 care about DASH. (only for youtube)
469 youtube_include_hls_manifest: Deprecated - Use extractor_args instead.
470 If True (default), HLS manifests and related
471 data will be downloaded and processed by extractor.
472 You can reduce network I/O by disabling it if you don't
473 care about HLS. (only for youtube)
474 """
475
476 _NUMERIC_FIELDS = set((
477 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
478 'timestamp', 'release_timestamp',
479 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
480 'average_rating', 'comment_count', 'age_limit',
481 'start_time', 'end_time',
482 'chapter_number', 'season_number', 'episode_number',
483 'track_number', 'disc_number', 'release_year',
484 ))
485
486 params = None
487 _ies = {}
488 _pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
489 _printed_messages = set()
490 _first_webpage_request = True
491 _download_retcode = None
492 _num_downloads = None
493 _playlist_level = 0
494 _playlist_urls = set()
495 _screen_file = None
496
497 def __init__(self, params=None, auto_init=True):
498 """Create a FileDownloader object with the given options.
499 @param auto_init Whether to load the default extractors and print header (if verbose).
500 Set to 'no_verbose_header' to not ptint the header
501 """
502 if params is None:
503 params = {}
504 self._ies = {}
505 self._ies_instances = {}
506 self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
507 self._printed_messages = set()
508 self._first_webpage_request = True
509 self._post_hooks = []
510 self._progress_hooks = []
511 self._postprocessor_hooks = []
512 self._download_retcode = 0
513 self._num_downloads = 0
514 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
515 self._err_file = sys.stderr
516 self.params = params
517 self.cache = Cache(self)
518
519 windows_enable_vt_mode()
520 # FIXME: This will break if we ever print color to stdout
521 self.params['no_color'] = self.params.get('no_color') or not supports_terminal_sequences(self._err_file)
522
523 if sys.version_info < (3, 6):
524 self.report_warning(
525 'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys.version_info[:2])
526
527 if self.params.get('allow_unplayable_formats'):
528 self.report_warning(
529 f'You have asked for {self._color_text("unplayable formats", "blue")} to be listed/downloaded. '
530 'This is a developer option intended for debugging. \n'
531 ' If you experience any issues while using this option, '
532 f'{self._color_text("DO NOT", "red")} open a bug report')
533
534 def check_deprecated(param, option, suggestion):
535 if self.params.get(param) is not None:
536 self.report_warning('%s is deprecated. Use %s instead' % (option, suggestion))
537 return True
538 return False
539
540 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
541 if self.params.get('geo_verification_proxy') is None:
542 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
543
544 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
545 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
546 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
547
548 for msg in self.params.get('warnings', []):
549 self.report_warning(msg)
550
551 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
552 # nooverwrites was unnecessarily changed to overwrites
553 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
554 # This ensures compatibility with both keys
555 self.params['overwrites'] = not self.params['nooverwrites']
556 elif self.params.get('overwrites') is None:
557 self.params.pop('overwrites', None)
558 else:
559 self.params['nooverwrites'] = not self.params['overwrites']
560
561 if params.get('bidi_workaround', False):
562 try:
563 import pty
564 master, slave = pty.openpty()
565 width = compat_get_terminal_size().columns
566 if width is None:
567 width_args = []
568 else:
569 width_args = ['-w', str(width)]
570 sp_kwargs = dict(
571 stdin=subprocess.PIPE,
572 stdout=slave,
573 stderr=self._err_file)
574 try:
575 self._output_process = subprocess.Popen(
576 ['bidiv'] + width_args, **sp_kwargs
577 )
578 except OSError:
579 self._output_process = subprocess.Popen(
580 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
581 self._output_channel = os.fdopen(master, 'rb')
582 except OSError as ose:
583 if ose.errno == errno.ENOENT:
584 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
585 else:
586 raise
587
588 if (sys.platform != 'win32'
589 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
590 and not params.get('restrictfilenames', False)):
591 # Unicode filesystem API will throw errors (#1474, #13027)
592 self.report_warning(
593 'Assuming --restrict-filenames since file system encoding '
594 'cannot encode all characters. '
595 'Set the LC_ALL environment variable to fix this.')
596 self.params['restrictfilenames'] = True
597
598 self.outtmpl_dict = self.parse_outtmpl()
599
600 # Creating format selector here allows us to catch syntax errors before the extraction
601 self.format_selector = (
602 None if self.params.get('format') is None
603 else self.build_format_selector(self.params['format']))
604
605 self._setup_opener()
606
607 if auto_init:
608 if auto_init != 'no_verbose_header':
609 self.print_debug_header()
610 self.add_default_info_extractors()
611
612 for pp_def_raw in self.params.get('postprocessors', []):
613 pp_def = dict(pp_def_raw)
614 when = pp_def.pop('when', 'post_process')
615 pp_class = get_postprocessor(pp_def.pop('key'))
616 pp = pp_class(self, **compat_kwargs(pp_def))
617 self.add_post_processor(pp, when=when)
618
619 for ph in self.params.get('post_hooks', []):
620 self.add_post_hook(ph)
621
622 for ph in self.params.get('progress_hooks', []):
623 self.add_progress_hook(ph)
624
625 register_socks_protocols()
626
627 def preload_download_archive(fn):
628 """Preload the archive, if any is specified"""
629 if fn is None:
630 return False
631 self.write_debug('Loading archive file %r\n' % fn)
632 try:
633 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
634 for line in archive_file:
635 self.archive.add(line.strip())
636 except IOError as ioe:
637 if ioe.errno != errno.ENOENT:
638 raise
639 return False
640 return True
641
642 self.archive = set()
643 preload_download_archive(self.params.get('download_archive'))
644
645 def warn_if_short_id(self, argv):
646 # short YouTube ID starting with dash?
647 idxs = [
648 i for i, a in enumerate(argv)
649 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
650 if idxs:
651 correct_argv = (
652 ['yt-dlp']
653 + [a for i, a in enumerate(argv) if i not in idxs]
654 + ['--'] + [argv[i] for i in idxs]
655 )
656 self.report_warning(
657 'Long argument string detected. '
658 'Use -- to separate parameters and URLs, like this:\n%s\n' %
659 args_to_str(correct_argv))
660
661 def add_info_extractor(self, ie):
662 """Add an InfoExtractor object to the end of the list."""
663 ie_key = ie.ie_key()
664 self._ies[ie_key] = ie
665 if not isinstance(ie, type):
666 self._ies_instances[ie_key] = ie
667 ie.set_downloader(self)
668
669 def _get_info_extractor_class(self, ie_key):
670 ie = self._ies.get(ie_key)
671 if ie is None:
672 ie = get_info_extractor(ie_key)
673 self.add_info_extractor(ie)
674 return ie
675
676 def get_info_extractor(self, ie_key):
677 """
678 Get an instance of an IE with name ie_key, it will try to get one from
679 the _ies list, if there's no instance it will create a new one and add
680 it to the extractor list.
681 """
682 ie = self._ies_instances.get(ie_key)
683 if ie is None:
684 ie = get_info_extractor(ie_key)()
685 self.add_info_extractor(ie)
686 return ie
687
688 def add_default_info_extractors(self):
689 """
690 Add the InfoExtractors returned by gen_extractors to the end of the list
691 """
692 for ie in gen_extractor_classes():
693 self.add_info_extractor(ie)
694
695 def add_post_processor(self, pp, when='post_process'):
696 """Add a PostProcessor object to the end of the chain."""
697 self._pps[when].append(pp)
698 pp.set_downloader(self)
699
700 def add_post_hook(self, ph):
701 """Add the post hook"""
702 self._post_hooks.append(ph)
703
704 def add_progress_hook(self, ph):
705 """Add the download progress hook"""
706 self._progress_hooks.append(ph)
707
708 def add_postprocessor_hook(self, ph):
709 """Add the postprocessing progress hook"""
710 self._postprocessor_hooks.append(ph)
711
712 def _bidi_workaround(self, message):
713 if not hasattr(self, '_output_channel'):
714 return message
715
716 assert hasattr(self, '_output_process')
717 assert isinstance(message, compat_str)
718 line_count = message.count('\n') + 1
719 self._output_process.stdin.write((message + '\n').encode('utf-8'))
720 self._output_process.stdin.flush()
721 res = ''.join(self._output_channel.readline().decode('utf-8')
722 for _ in range(line_count))
723 return res[:-len('\n')]
724
725 def _write_string(self, message, out=None, only_once=False):
726 if only_once:
727 if message in self._printed_messages:
728 return
729 self._printed_messages.add(message)
730 write_string(message, out=out, encoding=self.params.get('encoding'))
731
732 def to_stdout(self, message, skip_eol=False, quiet=False):
733 """Print message to stdout"""
734 if self.params.get('logger'):
735 self.params['logger'].debug(message)
736 elif not quiet or self.params.get('verbose'):
737 self._write_string(
738 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
739 self._err_file if quiet else self._screen_file)
740
741 def to_stderr(self, message, only_once=False):
742 """Print message to stderr"""
743 assert isinstance(message, compat_str)
744 if self.params.get('logger'):
745 self.params['logger'].error(message)
746 else:
747 self._write_string('%s\n' % self._bidi_workaround(message), self._err_file, only_once=only_once)
748
749 def to_console_title(self, message):
750 if not self.params.get('consoletitle', False):
751 return
752 if compat_os_name == 'nt':
753 if ctypes.windll.kernel32.GetConsoleWindow():
754 # c_wchar_p() might not be necessary if `message` is
755 # already of type unicode()
756 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
757 elif 'TERM' in os.environ:
758 self._write_string('\033]0;%s\007' % message, self._screen_file)
759
760 def save_console_title(self):
761 if not self.params.get('consoletitle', False):
762 return
763 if self.params.get('simulate'):
764 return
765 if compat_os_name != 'nt' and 'TERM' in os.environ:
766 # Save the title on stack
767 self._write_string('\033[22;0t', self._screen_file)
768
769 def restore_console_title(self):
770 if not self.params.get('consoletitle', False):
771 return
772 if self.params.get('simulate'):
773 return
774 if compat_os_name != 'nt' and 'TERM' in os.environ:
775 # Restore the title from stack
776 self._write_string('\033[23;0t', self._screen_file)
777
778 def __enter__(self):
779 self.save_console_title()
780 return self
781
782 def __exit__(self, *args):
783 self.restore_console_title()
784
785 if self.params.get('cookiefile') is not None:
786 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
787
788 def trouble(self, message=None, tb=None):
789 """Determine action to take when a download problem appears.
790
791 Depending on if the downloader has been configured to ignore
792 download errors or not, this method may throw an exception or
793 not when errors are found, after printing the message.
794
795 tb, if given, is additional traceback information.
796 """
797 if message is not None:
798 self.to_stderr(message)
799 if self.params.get('verbose'):
800 if tb is None:
801 if sys.exc_info()[0]: # if .trouble has been called from an except block
802 tb = ''
803 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
804 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
805 tb += encode_compat_str(traceback.format_exc())
806 else:
807 tb_data = traceback.format_list(traceback.extract_stack())
808 tb = ''.join(tb_data)
809 if tb:
810 self.to_stderr(tb)
811 if not self.params.get('ignoreerrors'):
812 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
813 exc_info = sys.exc_info()[1].exc_info
814 else:
815 exc_info = sys.exc_info()
816 raise DownloadError(message, exc_info)
817 self._download_retcode = 1
818
819 def to_screen(self, message, skip_eol=False):
820 """Print message to stdout if not in quiet mode"""
821 self.to_stdout(
822 message, skip_eol, quiet=self.params.get('quiet', False))
823
824 def _color_text(self, text, color):
825 if self.params.get('no_color'):
826 return text
827 return f'{TERMINAL_SEQUENCES[color.upper()]}{text}{TERMINAL_SEQUENCES["RESET_STYLE"]}'
828
829 def report_warning(self, message, only_once=False):
830 '''
831 Print the message to stderr, it will be prefixed with 'WARNING:'
832 If stderr is a tty file the 'WARNING:' will be colored
833 '''
834 if self.params.get('logger') is not None:
835 self.params['logger'].warning(message)
836 else:
837 if self.params.get('no_warnings'):
838 return
839 self.to_stderr(f'{self._color_text("WARNING:", "yellow")} {message}', only_once)
840
841 def report_error(self, message, tb=None):
842 '''
843 Do the same as trouble, but prefixes the message with 'ERROR:', colored
844 in red if stderr is a tty file.
845 '''
846 self.trouble(f'{self._color_text("ERROR:", "red")} {message}', tb)
847
848 def write_debug(self, message, only_once=False):
849 '''Log debug message or Print message to stderr'''
850 if not self.params.get('verbose', False):
851 return
852 message = '[debug] %s' % message
853 if self.params.get('logger'):
854 self.params['logger'].debug(message)
855 else:
856 self.to_stderr(message, only_once)
857
858 def report_file_already_downloaded(self, file_name):
859 """Report file has already been fully downloaded."""
860 try:
861 self.to_screen('[download] %s has already been downloaded' % file_name)
862 except UnicodeEncodeError:
863 self.to_screen('[download] The file has already been downloaded')
864
865 def report_file_delete(self, file_name):
866 """Report that existing file will be deleted."""
867 try:
868 self.to_screen('Deleting existing file %s' % file_name)
869 except UnicodeEncodeError:
870 self.to_screen('Deleting existing file')
871
872 def raise_no_formats(self, info, forced=False):
873 has_drm = info.get('__has_drm')
874 msg = 'This video is DRM protected' if has_drm else 'No video formats found!'
875 expected = self.params.get('ignore_no_formats_error')
876 if forced or not expected:
877 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
878 expected=has_drm or expected)
879 else:
880 self.report_warning(msg)
881
882 def parse_outtmpl(self):
883 outtmpl_dict = self.params.get('outtmpl', {})
884 if not isinstance(outtmpl_dict, dict):
885 outtmpl_dict = {'default': outtmpl_dict}
886 outtmpl_dict.update({
887 k: v for k, v in DEFAULT_OUTTMPL.items()
888 if outtmpl_dict.get(k) is None})
889 for key, val in outtmpl_dict.items():
890 if isinstance(val, bytes):
891 self.report_warning(
892 'Parameter outtmpl is bytes, but should be a unicode string. '
893 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
894 return outtmpl_dict
895
896 def get_output_path(self, dir_type='', filename=None):
897 paths = self.params.get('paths', {})
898 assert isinstance(paths, dict)
899 path = os.path.join(
900 expand_path(paths.get('home', '').strip()),
901 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
902 filename or '')
903
904 # Temporary fix for #4787
905 # 'Treat' all problem characters by passing filename through preferredencoding
906 # to workaround encoding issues with subprocess on python2 @ Windows
907 if sys.version_info < (3, 0) and sys.platform == 'win32':
908 path = encodeFilename(path, True).decode(preferredencoding())
909 return sanitize_path(path, force=self.params.get('windowsfilenames'))
910
911 @staticmethod
912 def _outtmpl_expandpath(outtmpl):
913 # expand_path translates '%%' into '%' and '$$' into '$'
914 # correspondingly that is not what we want since we need to keep
915 # '%%' intact for template dict substitution step. Working around
916 # with boundary-alike separator hack.
917 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
918 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
919
920 # outtmpl should be expand_path'ed before template dict substitution
921 # because meta fields may contain env variables we don't want to
922 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
923 # title "Hello $PATH", we don't want `$PATH` to be expanded.
924 return expand_path(outtmpl).replace(sep, '')
925
926 @staticmethod
927 def escape_outtmpl(outtmpl):
928 ''' Escape any remaining strings like %s, %abc% etc. '''
929 return re.sub(
930 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
931 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
932 outtmpl)
933
934 @classmethod
935 def validate_outtmpl(cls, outtmpl):
936 ''' @return None or Exception object '''
937 outtmpl = re.sub(
938 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljqBU]'),
939 lambda mobj: f'{mobj.group(0)[:-1]}s',
940 cls._outtmpl_expandpath(outtmpl))
941 try:
942 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
943 return None
944 except ValueError as err:
945 return err
946
947 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
948 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict """
949 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
950
951 info_dict = dict(info_dict) # Do not sanitize so as not to consume LazyList
952 for key in ('__original_infodict', '__postprocessors'):
953 info_dict.pop(key, None)
954 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
955 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
956 if info_dict.get('duration', None) is not None
957 else None)
958 info_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
959 if info_dict.get('resolution') is None:
960 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
961
962 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
963 # of %(field)s to %(field)0Nd for backward compatibility
964 field_size_compat_map = {
965 'playlist_index': len(str(info_dict.get('_last_playlist_index') or '')),
966 'playlist_autonumber': len(str(info_dict.get('n_entries') or '')),
967 'autonumber': self.params.get('autonumber_size') or 5,
968 }
969
970 TMPL_DICT = {}
971 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljqBU]'))
972 MATH_FUNCTIONS = {
973 '+': float.__add__,
974 '-': float.__sub__,
975 }
976 # Field is of the form key1.key2...
977 # where keys (except first) can be string, int or slice
978 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
979 MATH_FIELD_RE = r'''{field}|{num}'''.format(field=FIELD_RE, num=r'-?\d+(?:.\d+)?')
980 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
981 INTERNAL_FORMAT_RE = re.compile(r'''(?x)
982 (?P<negate>-)?
983 (?P<fields>{field})
984 (?P<maths>(?:{math_op}{math_field})*)
985 (?:>(?P<strf_format>.+?))?
986 (?P<alternate>(?<!\\),[^|)]+)?
987 (?:\|(?P<default>.*?))?
988 $'''.format(field=FIELD_RE, math_op=MATH_OPERATORS_RE, math_field=MATH_FIELD_RE))
989
990 def _traverse_infodict(k):
991 k = k.split('.')
992 if k[0] == '':
993 k.pop(0)
994 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
995
996 def get_value(mdict):
997 # Object traversal
998 value = _traverse_infodict(mdict['fields'])
999 # Negative
1000 if mdict['negate']:
1001 value = float_or_none(value)
1002 if value is not None:
1003 value *= -1
1004 # Do maths
1005 offset_key = mdict['maths']
1006 if offset_key:
1007 value = float_or_none(value)
1008 operator = None
1009 while offset_key:
1010 item = re.match(
1011 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1012 offset_key).group(0)
1013 offset_key = offset_key[len(item):]
1014 if operator is None:
1015 operator = MATH_FUNCTIONS[item]
1016 continue
1017 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1018 offset = float_or_none(item)
1019 if offset is None:
1020 offset = float_or_none(_traverse_infodict(item))
1021 try:
1022 value = operator(value, multiplier * offset)
1023 except (TypeError, ZeroDivisionError):
1024 return None
1025 operator = None
1026 # Datetime formatting
1027 if mdict['strf_format']:
1028 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
1029
1030 return value
1031
1032 na = self.params.get('outtmpl_na_placeholder', 'NA')
1033
1034 def _dumpjson_default(obj):
1035 if isinstance(obj, (set, LazyList)):
1036 return list(obj)
1037 raise TypeError(f'Object of type {type(obj).__name__} is not JSON serializable')
1038
1039 def create_key(outer_mobj):
1040 if not outer_mobj.group('has_key'):
1041 return outer_mobj.group(0)
1042 key = outer_mobj.group('key')
1043 mobj = re.match(INTERNAL_FORMAT_RE, key)
1044 initial_field = mobj.group('fields').split('.')[-1] if mobj else ''
1045 value, default = None, na
1046 while mobj:
1047 mobj = mobj.groupdict()
1048 default = mobj['default'] if mobj['default'] is not None else default
1049 value = get_value(mobj)
1050 if value is None and mobj['alternate']:
1051 mobj = re.match(INTERNAL_FORMAT_RE, mobj['alternate'][1:])
1052 else:
1053 break
1054
1055 fmt = outer_mobj.group('format')
1056 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1057 fmt = '0{:d}d'.format(field_size_compat_map[key])
1058
1059 value = default if value is None else value
1060
1061 str_fmt = f'{fmt[:-1]}s'
1062 if fmt[-1] == 'l': # list
1063 delim = '\n' if '#' in (outer_mobj.group('conversion') or '') else ', '
1064 value, fmt = delim.join(variadic(value)), str_fmt
1065 elif fmt[-1] == 'j': # json
1066 value, fmt = json.dumps(value, default=_dumpjson_default), str_fmt
1067 elif fmt[-1] == 'q': # quoted
1068 value, fmt = compat_shlex_quote(str(value)), str_fmt
1069 elif fmt[-1] == 'B': # bytes
1070 value = f'%{str_fmt}'.encode('utf-8') % str(value).encode('utf-8')
1071 value, fmt = value.decode('utf-8', 'ignore'), 's'
1072 elif fmt[-1] == 'U': # unicode normalized
1073 opts = outer_mobj.group('conversion') or ''
1074 value, fmt = unicodedata.normalize(
1075 # "+" = compatibility equivalence, "#" = NFD
1076 'NF%s%s' % ('K' if '+' in opts else '', 'D' if '#' in opts else 'C'),
1077 value), str_fmt
1078 elif fmt[-1] == 'c':
1079 if value:
1080 value = str(value)[0]
1081 else:
1082 fmt = str_fmt
1083 elif fmt[-1] not in 'rs': # numeric
1084 value = float_or_none(value)
1085 if value is None:
1086 value, fmt = default, 's'
1087
1088 if sanitize:
1089 if fmt[-1] == 'r':
1090 # If value is an object, sanitize might convert it to a string
1091 # So we convert it to repr first
1092 value, fmt = repr(value), str_fmt
1093 if fmt[-1] in 'csr':
1094 value = sanitize(initial_field, value)
1095
1096 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
1097 TMPL_DICT[key] = value
1098 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
1099
1100 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
1101
1102 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1103 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1104 return self.escape_outtmpl(outtmpl) % info_dict
1105
1106 def _prepare_filename(self, info_dict, tmpl_type='default'):
1107 try:
1108 sanitize = lambda k, v: sanitize_filename(
1109 compat_str(v),
1110 restricted=self.params.get('restrictfilenames'),
1111 is_id=(k == 'id' or k.endswith('_id')))
1112 outtmpl = self._outtmpl_expandpath(self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default']))
1113 filename = self.evaluate_outtmpl(outtmpl, info_dict, sanitize)
1114
1115 force_ext = OUTTMPL_TYPES.get(tmpl_type)
1116 if filename and force_ext is not None:
1117 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
1118
1119 # https://github.com/blackjack4494/youtube-dlc/issues/85
1120 trim_file_name = self.params.get('trim_file_name', False)
1121 if trim_file_name:
1122 fn_groups = filename.rsplit('.')
1123 ext = fn_groups[-1]
1124 sub_ext = ''
1125 if len(fn_groups) > 2:
1126 sub_ext = fn_groups[-2]
1127 filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
1128
1129 return filename
1130 except ValueError as err:
1131 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
1132 return None
1133
1134 def prepare_filename(self, info_dict, dir_type='', warn=False):
1135 """Generate the output filename."""
1136
1137 filename = self._prepare_filename(info_dict, dir_type or 'default')
1138 if not filename and dir_type not in ('', 'temp'):
1139 return ''
1140
1141 if warn:
1142 if not self.params.get('paths'):
1143 pass
1144 elif filename == '-':
1145 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
1146 elif os.path.isabs(filename):
1147 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
1148 if filename == '-' or not filename:
1149 return filename
1150
1151 return self.get_output_path(dir_type, filename)
1152
1153 def _match_entry(self, info_dict, incomplete=False, silent=False):
1154 """ Returns None if the file should be downloaded """
1155
1156 video_title = info_dict.get('title', info_dict.get('id', 'video'))
1157
1158 def check_filter():
1159 if 'title' in info_dict:
1160 # This can happen when we're just evaluating the playlist
1161 title = info_dict['title']
1162 matchtitle = self.params.get('matchtitle', False)
1163 if matchtitle:
1164 if not re.search(matchtitle, title, re.IGNORECASE):
1165 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1166 rejecttitle = self.params.get('rejecttitle', False)
1167 if rejecttitle:
1168 if re.search(rejecttitle, title, re.IGNORECASE):
1169 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1170 date = info_dict.get('upload_date')
1171 if date is not None:
1172 dateRange = self.params.get('daterange', DateRange())
1173 if date not in dateRange:
1174 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
1175 view_count = info_dict.get('view_count')
1176 if view_count is not None:
1177 min_views = self.params.get('min_views')
1178 if min_views is not None and view_count < min_views:
1179 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1180 max_views = self.params.get('max_views')
1181 if max_views is not None and view_count > max_views:
1182 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1183 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1184 return 'Skipping "%s" because it is age restricted' % video_title
1185
1186 match_filter = self.params.get('match_filter')
1187 if match_filter is not None:
1188 try:
1189 ret = match_filter(info_dict, incomplete=incomplete)
1190 except TypeError:
1191 # For backward compatibility
1192 ret = None if incomplete else match_filter(info_dict)
1193 if ret is not None:
1194 return ret
1195 return None
1196
1197 if self.in_download_archive(info_dict):
1198 reason = '%s has already been recorded in the archive' % video_title
1199 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1200 else:
1201 reason = check_filter()
1202 break_opt, break_err = 'break_on_reject', RejectedVideoReached
1203 if reason is not None:
1204 if not silent:
1205 self.to_screen('[download] ' + reason)
1206 if self.params.get(break_opt, False):
1207 raise break_err()
1208 return reason
1209
1210 @staticmethod
1211 def add_extra_info(info_dict, extra_info):
1212 '''Set the keys from extra_info in info dict if they are missing'''
1213 for key, value in extra_info.items():
1214 info_dict.setdefault(key, value)
1215
1216 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
1217 process=True, force_generic_extractor=False):
1218 """
1219 Return a list with a dictionary for each video extracted.
1220
1221 Arguments:
1222 url -- URL to extract
1223
1224 Keyword arguments:
1225 download -- whether to download videos during extraction
1226 ie_key -- extractor key hint
1227 extra_info -- dictionary containing the extra values to add to each result
1228 process -- whether to resolve all unresolved references (URLs, playlist items),
1229 must be True for download to work.
1230 force_generic_extractor -- force using the generic extractor
1231 """
1232
1233 if extra_info is None:
1234 extra_info = {}
1235
1236 if not ie_key and force_generic_extractor:
1237 ie_key = 'Generic'
1238
1239 if ie_key:
1240 ies = {ie_key: self._get_info_extractor_class(ie_key)}
1241 else:
1242 ies = self._ies
1243
1244 for ie_key, ie in ies.items():
1245 if not ie.suitable(url):
1246 continue
1247
1248 if not ie.working():
1249 self.report_warning('The program functionality for this site has been marked as broken, '
1250 'and will probably not work.')
1251
1252 temp_id = ie.get_temp_id(url)
1253 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1254 self.to_screen("[%s] %s: has already been recorded in archive" % (
1255 ie_key, temp_id))
1256 break
1257 return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
1258 else:
1259 self.report_error('no suitable InfoExtractor for URL %s' % url)
1260
1261 def __handle_extraction_exceptions(func):
1262 @functools.wraps(func)
1263 def wrapper(self, *args, **kwargs):
1264 try:
1265 return func(self, *args, **kwargs)
1266 except GeoRestrictedError as e:
1267 msg = e.msg
1268 if e.countries:
1269 msg += '\nThis video is available in %s.' % ', '.join(
1270 map(ISO3166Utils.short2full, e.countries))
1271 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1272 self.report_error(msg)
1273 except ExtractorError as e: # An error we somewhat expected
1274 self.report_error(compat_str(e), e.format_traceback())
1275 except ThrottledDownload:
1276 self.to_stderr('\r')
1277 self.report_warning('The download speed is below throttle limit. Re-extracting data')
1278 return wrapper(self, *args, **kwargs)
1279 except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached, LazyList.IndexError):
1280 raise
1281 except Exception as e:
1282 if self.params.get('ignoreerrors'):
1283 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
1284 else:
1285 raise
1286 return wrapper
1287
1288 @__handle_extraction_exceptions
1289 def __extract_info(self, url, ie, download, extra_info, process):
1290 ie_result = ie.extract(url)
1291 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1292 return
1293 if isinstance(ie_result, list):
1294 # Backwards compatibility: old IE result format
1295 ie_result = {
1296 '_type': 'compat_list',
1297 'entries': ie_result,
1298 }
1299 if extra_info.get('original_url'):
1300 ie_result.setdefault('original_url', extra_info['original_url'])
1301 self.add_default_extra_info(ie_result, ie, url)
1302 if process:
1303 return self.process_ie_result(ie_result, download, extra_info)
1304 else:
1305 return ie_result
1306
1307 def add_default_extra_info(self, ie_result, ie, url):
1308 if url is not None:
1309 self.add_extra_info(ie_result, {
1310 'webpage_url': url,
1311 'original_url': url,
1312 'webpage_url_basename': url_basename(url),
1313 })
1314 if ie is not None:
1315 self.add_extra_info(ie_result, {
1316 'extractor': ie.IE_NAME,
1317 'extractor_key': ie.ie_key(),
1318 })
1319
1320 def process_ie_result(self, ie_result, download=True, extra_info=None):
1321 """
1322 Take the result of the ie(may be modified) and resolve all unresolved
1323 references (URLs, playlist items).
1324
1325 It will also download the videos if 'download'.
1326 Returns the resolved ie_result.
1327 """
1328 if extra_info is None:
1329 extra_info = {}
1330 result_type = ie_result.get('_type', 'video')
1331
1332 if result_type in ('url', 'url_transparent'):
1333 ie_result['url'] = sanitize_url(ie_result['url'])
1334 if ie_result.get('original_url'):
1335 extra_info.setdefault('original_url', ie_result['original_url'])
1336
1337 extract_flat = self.params.get('extract_flat', False)
1338 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1339 or extract_flat is True):
1340 info_copy = ie_result.copy()
1341 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1342 if ie and not ie_result.get('id'):
1343 info_copy['id'] = ie.get_temp_id(ie_result['url'])
1344 self.add_default_extra_info(info_copy, ie, ie_result['url'])
1345 self.add_extra_info(info_copy, extra_info)
1346 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
1347 if self.params.get('force_write_download_archive', False):
1348 self.record_download_archive(info_copy)
1349 return ie_result
1350
1351 if result_type == 'video':
1352 self.add_extra_info(ie_result, extra_info)
1353 ie_result = self.process_video_result(ie_result, download=download)
1354 additional_urls = (ie_result or {}).get('additional_urls')
1355 if additional_urls:
1356 # TODO: Improve MetadataParserPP to allow setting a list
1357 if isinstance(additional_urls, compat_str):
1358 additional_urls = [additional_urls]
1359 self.to_screen(
1360 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1361 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1362 ie_result['additional_entries'] = [
1363 self.extract_info(
1364 url, download, extra_info,
1365 force_generic_extractor=self.params.get('force_generic_extractor'))
1366 for url in additional_urls
1367 ]
1368 return ie_result
1369 elif result_type == 'url':
1370 # We have to add extra_info to the results because it may be
1371 # contained in a playlist
1372 return self.extract_info(
1373 ie_result['url'], download,
1374 ie_key=ie_result.get('ie_key'),
1375 extra_info=extra_info)
1376 elif result_type == 'url_transparent':
1377 # Use the information from the embedding page
1378 info = self.extract_info(
1379 ie_result['url'], ie_key=ie_result.get('ie_key'),
1380 extra_info=extra_info, download=False, process=False)
1381
1382 # extract_info may return None when ignoreerrors is enabled and
1383 # extraction failed with an error, don't crash and return early
1384 # in this case
1385 if not info:
1386 return info
1387
1388 force_properties = dict(
1389 (k, v) for k, v in ie_result.items() if v is not None)
1390 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
1391 if f in force_properties:
1392 del force_properties[f]
1393 new_result = info.copy()
1394 new_result.update(force_properties)
1395
1396 # Extracted info may not be a video result (i.e.
1397 # info.get('_type', 'video') != video) but rather an url or
1398 # url_transparent. In such cases outer metadata (from ie_result)
1399 # should be propagated to inner one (info). For this to happen
1400 # _type of info should be overridden with url_transparent. This
1401 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1402 if new_result.get('_type') == 'url':
1403 new_result['_type'] = 'url_transparent'
1404
1405 return self.process_ie_result(
1406 new_result, download=download, extra_info=extra_info)
1407 elif result_type in ('playlist', 'multi_video'):
1408 # Protect from infinite recursion due to recursively nested playlists
1409 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1410 webpage_url = ie_result['webpage_url']
1411 if webpage_url in self._playlist_urls:
1412 self.to_screen(
1413 '[download] Skipping already downloaded playlist: %s'
1414 % ie_result.get('title') or ie_result.get('id'))
1415 return
1416
1417 self._playlist_level += 1
1418 self._playlist_urls.add(webpage_url)
1419 self._sanitize_thumbnails(ie_result)
1420 try:
1421 return self.__process_playlist(ie_result, download)
1422 finally:
1423 self._playlist_level -= 1
1424 if not self._playlist_level:
1425 self._playlist_urls.clear()
1426 elif result_type == 'compat_list':
1427 self.report_warning(
1428 'Extractor %s returned a compat_list result. '
1429 'It needs to be updated.' % ie_result.get('extractor'))
1430
1431 def _fixup(r):
1432 self.add_extra_info(r, {
1433 'extractor': ie_result['extractor'],
1434 'webpage_url': ie_result['webpage_url'],
1435 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1436 'extractor_key': ie_result['extractor_key'],
1437 })
1438 return r
1439 ie_result['entries'] = [
1440 self.process_ie_result(_fixup(r), download, extra_info)
1441 for r in ie_result['entries']
1442 ]
1443 return ie_result
1444 else:
1445 raise Exception('Invalid result type: %s' % result_type)
1446
1447 def _ensure_dir_exists(self, path):
1448 return make_dir(path, self.report_error)
1449
1450 def __process_playlist(self, ie_result, download):
1451 # We process each entry in the playlist
1452 playlist = ie_result.get('title') or ie_result.get('id')
1453 self.to_screen('[download] Downloading playlist: %s' % playlist)
1454
1455 if 'entries' not in ie_result:
1456 raise EntryNotInPlaylist()
1457 incomplete_entries = bool(ie_result.get('requested_entries'))
1458 if incomplete_entries:
1459 def fill_missing_entries(entries, indexes):
1460 ret = [None] * max(*indexes)
1461 for i, entry in zip(indexes, entries):
1462 ret[i - 1] = entry
1463 return ret
1464 ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
1465
1466 playlist_results = []
1467
1468 playliststart = self.params.get('playliststart', 1)
1469 playlistend = self.params.get('playlistend')
1470 # For backwards compatibility, interpret -1 as whole list
1471 if playlistend == -1:
1472 playlistend = None
1473
1474 playlistitems_str = self.params.get('playlist_items')
1475 playlistitems = None
1476 if playlistitems_str is not None:
1477 def iter_playlistitems(format):
1478 for string_segment in format.split(','):
1479 if '-' in string_segment:
1480 start, end = string_segment.split('-')
1481 for item in range(int(start), int(end) + 1):
1482 yield int(item)
1483 else:
1484 yield int(string_segment)
1485 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
1486
1487 ie_entries = ie_result['entries']
1488 msg = (
1489 'Downloading %d videos' if not isinstance(ie_entries, list)
1490 else 'Collected %d videos; downloading %%d of them' % len(ie_entries))
1491
1492 if isinstance(ie_entries, list):
1493 def get_entry(i):
1494 return ie_entries[i - 1]
1495 else:
1496 if not isinstance(ie_entries, PagedList):
1497 ie_entries = LazyList(ie_entries)
1498
1499 def get_entry(i):
1500 return YoutubeDL.__handle_extraction_exceptions(
1501 lambda self, i: ie_entries[i - 1]
1502 )(self, i)
1503
1504 entries = []
1505 items = playlistitems if playlistitems is not None else itertools.count(playliststart)
1506 for i in items:
1507 if i == 0:
1508 continue
1509 if playlistitems is None and playlistend is not None and playlistend < i:
1510 break
1511 entry = None
1512 try:
1513 entry = get_entry(i)
1514 if entry is None:
1515 raise EntryNotInPlaylist()
1516 except (IndexError, EntryNotInPlaylist):
1517 if incomplete_entries:
1518 raise EntryNotInPlaylist()
1519 elif not playlistitems:
1520 break
1521 entries.append(entry)
1522 try:
1523 if entry is not None:
1524 self._match_entry(entry, incomplete=True, silent=True)
1525 except (ExistingVideoReached, RejectedVideoReached):
1526 break
1527 ie_result['entries'] = entries
1528
1529 # Save playlist_index before re-ordering
1530 entries = [
1531 ((playlistitems[i - 1] if playlistitems else i + playliststart - 1), entry)
1532 for i, entry in enumerate(entries, 1)
1533 if entry is not None]
1534 n_entries = len(entries)
1535
1536 if not playlistitems and (playliststart or playlistend):
1537 playlistitems = list(range(playliststart, playliststart + n_entries))
1538 ie_result['requested_entries'] = playlistitems
1539
1540 if self.params.get('allow_playlist_files', True):
1541 ie_copy = {
1542 'playlist': playlist,
1543 'playlist_id': ie_result.get('id'),
1544 'playlist_title': ie_result.get('title'),
1545 'playlist_uploader': ie_result.get('uploader'),
1546 'playlist_uploader_id': ie_result.get('uploader_id'),
1547 'playlist_index': 0,
1548 }
1549 ie_copy.update(dict(ie_result))
1550
1551 if self._write_info_json('playlist', ie_result,
1552 self.prepare_filename(ie_copy, 'pl_infojson')) is None:
1553 return
1554 if self._write_description('playlist', ie_result,
1555 self.prepare_filename(ie_copy, 'pl_description')) is None:
1556 return
1557 # TODO: This should be passed to ThumbnailsConvertor if necessary
1558 self._write_thumbnails('playlist', ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1559
1560 if self.params.get('playlistreverse', False):
1561 entries = entries[::-1]
1562 if self.params.get('playlistrandom', False):
1563 random.shuffle(entries)
1564
1565 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
1566
1567 self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries))
1568 failures = 0
1569 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
1570 for i, entry_tuple in enumerate(entries, 1):
1571 playlist_index, entry = entry_tuple
1572 if 'playlist-index' in self.params.get('compat_opts', []):
1573 playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1
1574 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
1575 # This __x_forwarded_for_ip thing is a bit ugly but requires
1576 # minimal changes
1577 if x_forwarded_for:
1578 entry['__x_forwarded_for_ip'] = x_forwarded_for
1579 extra = {
1580 'n_entries': n_entries,
1581 '_last_playlist_index': max(playlistitems) if playlistitems else (playlistend or n_entries),
1582 'playlist_index': playlist_index,
1583 'playlist_autonumber': i,
1584 'playlist': playlist,
1585 'playlist_id': ie_result.get('id'),
1586 'playlist_title': ie_result.get('title'),
1587 'playlist_uploader': ie_result.get('uploader'),
1588 'playlist_uploader_id': ie_result.get('uploader_id'),
1589 'extractor': ie_result['extractor'],
1590 'webpage_url': ie_result['webpage_url'],
1591 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1592 'extractor_key': ie_result['extractor_key'],
1593 }
1594
1595 if self._match_entry(entry, incomplete=True) is not None:
1596 continue
1597
1598 entry_result = self.__process_iterable_entry(entry, download, extra)
1599 if not entry_result:
1600 failures += 1
1601 if failures >= max_failures:
1602 self.report_error(
1603 'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures))
1604 break
1605 # TODO: skip failed (empty) entries?
1606 playlist_results.append(entry_result)
1607 ie_result['entries'] = playlist_results
1608 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1609 return ie_result
1610
1611 @__handle_extraction_exceptions
1612 def __process_iterable_entry(self, entry, download, extra_info):
1613 return self.process_ie_result(
1614 entry, download=download, extra_info=extra_info)
1615
1616 def _build_format_filter(self, filter_spec):
1617 " Returns a function to filter the formats according to the filter_spec "
1618
1619 OPERATORS = {
1620 '<': operator.lt,
1621 '<=': operator.le,
1622 '>': operator.gt,
1623 '>=': operator.ge,
1624 '=': operator.eq,
1625 '!=': operator.ne,
1626 }
1627 operator_rex = re.compile(r'''(?x)\s*
1628 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1629 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1630 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1631 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1632 m = operator_rex.fullmatch(filter_spec)
1633 if m:
1634 try:
1635 comparison_value = int(m.group('value'))
1636 except ValueError:
1637 comparison_value = parse_filesize(m.group('value'))
1638 if comparison_value is None:
1639 comparison_value = parse_filesize(m.group('value') + 'B')
1640 if comparison_value is None:
1641 raise ValueError(
1642 'Invalid value %r in format specification %r' % (
1643 m.group('value'), filter_spec))
1644 op = OPERATORS[m.group('op')]
1645
1646 if not m:
1647 STR_OPERATORS = {
1648 '=': operator.eq,
1649 '^=': lambda attr, value: attr.startswith(value),
1650 '$=': lambda attr, value: attr.endswith(value),
1651 '*=': lambda attr, value: value in attr,
1652 }
1653 str_operator_rex = re.compile(r'''(?x)\s*
1654 (?P<key>[a-zA-Z0-9._-]+)\s*
1655 (?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1656 (?P<value>[a-zA-Z0-9._-]+)\s*
1657 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1658 m = str_operator_rex.fullmatch(filter_spec)
1659 if m:
1660 comparison_value = m.group('value')
1661 str_op = STR_OPERATORS[m.group('op')]
1662 if m.group('negation'):
1663 op = lambda attr, value: not str_op(attr, value)
1664 else:
1665 op = str_op
1666
1667 if not m:
1668 raise SyntaxError('Invalid filter specification %r' % filter_spec)
1669
1670 def _filter(f):
1671 actual_value = f.get(m.group('key'))
1672 if actual_value is None:
1673 return m.group('none_inclusive')
1674 return op(actual_value, comparison_value)
1675 return _filter
1676
1677 def _default_format_spec(self, info_dict, download=True):
1678
1679 def can_merge():
1680 merger = FFmpegMergerPP(self)
1681 return merger.available and merger.can_merge()
1682
1683 prefer_best = (
1684 not self.params.get('simulate')
1685 and download
1686 and (
1687 not can_merge()
1688 or info_dict.get('is_live', False)
1689 or self.outtmpl_dict['default'] == '-'))
1690 compat = (
1691 prefer_best
1692 or self.params.get('allow_multiple_audio_streams', False)
1693 or 'format-spec' in self.params.get('compat_opts', []))
1694
1695 return (
1696 'best/bestvideo+bestaudio' if prefer_best
1697 else 'bestvideo*+bestaudio/best' if not compat
1698 else 'bestvideo+bestaudio/best')
1699
1700 def build_format_selector(self, format_spec):
1701 def syntax_error(note, start):
1702 message = (
1703 'Invalid format specification: '
1704 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1705 return SyntaxError(message)
1706
1707 PICKFIRST = 'PICKFIRST'
1708 MERGE = 'MERGE'
1709 SINGLE = 'SINGLE'
1710 GROUP = 'GROUP'
1711 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1712
1713 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1714 'video': self.params.get('allow_multiple_video_streams', False)}
1715
1716 check_formats = self.params.get('check_formats')
1717
1718 def _parse_filter(tokens):
1719 filter_parts = []
1720 for type, string, start, _, _ in tokens:
1721 if type == tokenize.OP and string == ']':
1722 return ''.join(filter_parts)
1723 else:
1724 filter_parts.append(string)
1725
1726 def _remove_unused_ops(tokens):
1727 # Remove operators that we don't use and join them with the surrounding strings
1728 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1729 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1730 last_string, last_start, last_end, last_line = None, None, None, None
1731 for type, string, start, end, line in tokens:
1732 if type == tokenize.OP and string == '[':
1733 if last_string:
1734 yield tokenize.NAME, last_string, last_start, last_end, last_line
1735 last_string = None
1736 yield type, string, start, end, line
1737 # everything inside brackets will be handled by _parse_filter
1738 for type, string, start, end, line in tokens:
1739 yield type, string, start, end, line
1740 if type == tokenize.OP and string == ']':
1741 break
1742 elif type == tokenize.OP and string in ALLOWED_OPS:
1743 if last_string:
1744 yield tokenize.NAME, last_string, last_start, last_end, last_line
1745 last_string = None
1746 yield type, string, start, end, line
1747 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1748 if not last_string:
1749 last_string = string
1750 last_start = start
1751 last_end = end
1752 else:
1753 last_string += string
1754 if last_string:
1755 yield tokenize.NAME, last_string, last_start, last_end, last_line
1756
1757 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
1758 selectors = []
1759 current_selector = None
1760 for type, string, start, _, _ in tokens:
1761 # ENCODING is only defined in python 3.x
1762 if type == getattr(tokenize, 'ENCODING', None):
1763 continue
1764 elif type in [tokenize.NAME, tokenize.NUMBER]:
1765 current_selector = FormatSelector(SINGLE, string, [])
1766 elif type == tokenize.OP:
1767 if string == ')':
1768 if not inside_group:
1769 # ')' will be handled by the parentheses group
1770 tokens.restore_last_token()
1771 break
1772 elif inside_merge and string in ['/', ',']:
1773 tokens.restore_last_token()
1774 break
1775 elif inside_choice and string == ',':
1776 tokens.restore_last_token()
1777 break
1778 elif string == ',':
1779 if not current_selector:
1780 raise syntax_error('"," must follow a format selector', start)
1781 selectors.append(current_selector)
1782 current_selector = None
1783 elif string == '/':
1784 if not current_selector:
1785 raise syntax_error('"/" must follow a format selector', start)
1786 first_choice = current_selector
1787 second_choice = _parse_format_selection(tokens, inside_choice=True)
1788 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
1789 elif string == '[':
1790 if not current_selector:
1791 current_selector = FormatSelector(SINGLE, 'best', [])
1792 format_filter = _parse_filter(tokens)
1793 current_selector.filters.append(format_filter)
1794 elif string == '(':
1795 if current_selector:
1796 raise syntax_error('Unexpected "("', start)
1797 group = _parse_format_selection(tokens, inside_group=True)
1798 current_selector = FormatSelector(GROUP, group, [])
1799 elif string == '+':
1800 if not current_selector:
1801 raise syntax_error('Unexpected "+"', start)
1802 selector_1 = current_selector
1803 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1804 if not selector_2:
1805 raise syntax_error('Expected a selector', start)
1806 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
1807 else:
1808 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1809 elif type == tokenize.ENDMARKER:
1810 break
1811 if current_selector:
1812 selectors.append(current_selector)
1813 return selectors
1814
1815 def _merge(formats_pair):
1816 format_1, format_2 = formats_pair
1817
1818 formats_info = []
1819 formats_info.extend(format_1.get('requested_formats', (format_1,)))
1820 formats_info.extend(format_2.get('requested_formats', (format_2,)))
1821
1822 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
1823 get_no_more = {'video': False, 'audio': False}
1824 for (i, fmt_info) in enumerate(formats_info):
1825 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
1826 formats_info.pop(i)
1827 continue
1828 for aud_vid in ['audio', 'video']:
1829 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
1830 if get_no_more[aud_vid]:
1831 formats_info.pop(i)
1832 break
1833 get_no_more[aud_vid] = True
1834
1835 if len(formats_info) == 1:
1836 return formats_info[0]
1837
1838 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
1839 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
1840
1841 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
1842 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
1843
1844 output_ext = self.params.get('merge_output_format')
1845 if not output_ext:
1846 if the_only_video:
1847 output_ext = the_only_video['ext']
1848 elif the_only_audio and not video_fmts:
1849 output_ext = the_only_audio['ext']
1850 else:
1851 output_ext = 'mkv'
1852
1853 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
1854
1855 new_dict = {
1856 'requested_formats': formats_info,
1857 'format': '+'.join(filtered('format')),
1858 'format_id': '+'.join(filtered('format_id')),
1859 'ext': output_ext,
1860 'protocol': '+'.join(map(determine_protocol, formats_info)),
1861 'language': '+'.join(orderedSet(filtered('language'))),
1862 'format_note': '+'.join(orderedSet(filtered('format_note'))),
1863 'filesize_approx': sum(filtered('filesize', 'filesize_approx')),
1864 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
1865 }
1866
1867 if the_only_video:
1868 new_dict.update({
1869 'width': the_only_video.get('width'),
1870 'height': the_only_video.get('height'),
1871 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
1872 'fps': the_only_video.get('fps'),
1873 'vcodec': the_only_video.get('vcodec'),
1874 'vbr': the_only_video.get('vbr'),
1875 'stretched_ratio': the_only_video.get('stretched_ratio'),
1876 })
1877
1878 if the_only_audio:
1879 new_dict.update({
1880 'acodec': the_only_audio.get('acodec'),
1881 'abr': the_only_audio.get('abr'),
1882 'asr': the_only_audio.get('asr'),
1883 })
1884
1885 return new_dict
1886
1887 def _check_formats(formats):
1888 if not check_formats:
1889 yield from formats
1890 return
1891 for f in formats:
1892 self.to_screen('[info] Testing format %s' % f['format_id'])
1893 temp_file = tempfile.NamedTemporaryFile(
1894 suffix='.tmp', delete=False,
1895 dir=self.get_output_path('temp') or None)
1896 temp_file.close()
1897 try:
1898 success, _ = self.dl(temp_file.name, f, test=True)
1899 except (DownloadError, IOError, OSError, ValueError) + network_exceptions:
1900 success = False
1901 finally:
1902 if os.path.exists(temp_file.name):
1903 try:
1904 os.remove(temp_file.name)
1905 except OSError:
1906 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1907 if success:
1908 yield f
1909 else:
1910 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1911
1912 def _build_selector_function(selector):
1913 if isinstance(selector, list): # ,
1914 fs = [_build_selector_function(s) for s in selector]
1915
1916 def selector_function(ctx):
1917 for f in fs:
1918 yield from f(ctx)
1919 return selector_function
1920
1921 elif selector.type == GROUP: # ()
1922 selector_function = _build_selector_function(selector.selector)
1923
1924 elif selector.type == PICKFIRST: # /
1925 fs = [_build_selector_function(s) for s in selector.selector]
1926
1927 def selector_function(ctx):
1928 for f in fs:
1929 picked_formats = list(f(ctx))
1930 if picked_formats:
1931 return picked_formats
1932 return []
1933
1934 elif selector.type == MERGE: # +
1935 selector_1, selector_2 = map(_build_selector_function, selector.selector)
1936
1937 def selector_function(ctx):
1938 for pair in itertools.product(
1939 selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
1940 yield _merge(pair)
1941
1942 elif selector.type == SINGLE: # atom
1943 format_spec = selector.selector or 'best'
1944
1945 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
1946 if format_spec == 'all':
1947 def selector_function(ctx):
1948 yield from _check_formats(ctx['formats'])
1949 elif format_spec == 'mergeall':
1950 def selector_function(ctx):
1951 formats = list(_check_formats(ctx['formats']))
1952 if not formats:
1953 return
1954 merged_format = formats[-1]
1955 for f in formats[-2::-1]:
1956 merged_format = _merge((merged_format, f))
1957 yield merged_format
1958
1959 else:
1960 format_fallback, format_reverse, format_idx = False, True, 1
1961 mobj = re.match(
1962 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
1963 format_spec)
1964 if mobj is not None:
1965 format_idx = int_or_none(mobj.group('n'), default=1)
1966 format_reverse = mobj.group('bw')[0] == 'b'
1967 format_type = (mobj.group('type') or [None])[0]
1968 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
1969 format_modified = mobj.group('mod') is not None
1970
1971 format_fallback = not format_type and not format_modified # for b, w
1972 _filter_f = (
1973 (lambda f: f.get('%scodec' % format_type) != 'none')
1974 if format_type and format_modified # bv*, ba*, wv*, wa*
1975 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
1976 if format_type # bv, ba, wv, wa
1977 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
1978 if not format_modified # b, w
1979 else lambda f: True) # b*, w*
1980 filter_f = lambda f: _filter_f(f) and (
1981 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
1982 else:
1983 if format_spec in ('m4a', 'mp3', 'ogg', 'aac'): # audio extension
1984 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
1985 elif format_spec in ('mp4', 'flv', 'webm', '3gp'): # video extension
1986 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
1987 elif format_spec in ('mhtml', ): # storyboards extension
1988 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
1989 else:
1990 filter_f = lambda f: f.get('format_id') == format_spec # id
1991
1992 def selector_function(ctx):
1993 formats = list(ctx['formats'])
1994 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
1995 if format_fallback and ctx['incomplete_formats'] and not matches:
1996 # for extractors with incomplete formats (audio only (soundcloud)
1997 # or video only (imgur)) best/worst will fallback to
1998 # best/worst {video,audio}-only format
1999 matches = formats
2000 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2001 try:
2002 yield matches[format_idx - 1]
2003 except IndexError:
2004 return
2005
2006 filters = [self._build_format_filter(f) for f in selector.filters]
2007
2008 def final_selector(ctx):
2009 ctx_copy = copy.deepcopy(ctx)
2010 for _filter in filters:
2011 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2012 return selector_function(ctx_copy)
2013 return final_selector
2014
2015 stream = io.BytesIO(format_spec.encode('utf-8'))
2016 try:
2017 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
2018 except tokenize.TokenError:
2019 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2020
2021 class TokenIterator(object):
2022 def __init__(self, tokens):
2023 self.tokens = tokens
2024 self.counter = 0
2025
2026 def __iter__(self):
2027 return self
2028
2029 def __next__(self):
2030 if self.counter >= len(self.tokens):
2031 raise StopIteration()
2032 value = self.tokens[self.counter]
2033 self.counter += 1
2034 return value
2035
2036 next = __next__
2037
2038 def restore_last_token(self):
2039 self.counter -= 1
2040
2041 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
2042 return _build_selector_function(parsed_selector)
2043
2044 def _calc_headers(self, info_dict):
2045 res = std_headers.copy()
2046
2047 add_headers = info_dict.get('http_headers')
2048 if add_headers:
2049 res.update(add_headers)
2050
2051 cookies = self._calc_cookies(info_dict)
2052 if cookies:
2053 res['Cookie'] = cookies
2054
2055 if 'X-Forwarded-For' not in res:
2056 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2057 if x_forwarded_for_ip:
2058 res['X-Forwarded-For'] = x_forwarded_for_ip
2059
2060 return res
2061
2062 def _calc_cookies(self, info_dict):
2063 pr = sanitized_Request(info_dict['url'])
2064 self.cookiejar.add_cookie_header(pr)
2065 return pr.get_header('Cookie')
2066
2067 def _sanitize_thumbnails(self, info_dict):
2068 thumbnails = info_dict.get('thumbnails')
2069 if thumbnails is None:
2070 thumbnail = info_dict.get('thumbnail')
2071 if thumbnail:
2072 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2073 if thumbnails:
2074 thumbnails.sort(key=lambda t: (
2075 t.get('preference') if t.get('preference') is not None else -1,
2076 t.get('width') if t.get('width') is not None else -1,
2077 t.get('height') if t.get('height') is not None else -1,
2078 t.get('id') if t.get('id') is not None else '',
2079 t.get('url')))
2080
2081 def thumbnail_tester():
2082 if self.params.get('check_formats'):
2083 test_all = True
2084 to_screen = lambda msg: self.to_screen(f'[info] {msg}')
2085 else:
2086 test_all = False
2087 to_screen = self.write_debug
2088
2089 def test_thumbnail(t):
2090 if not test_all and not t.get('_test_url'):
2091 return True
2092 to_screen('Testing thumbnail %s' % t['id'])
2093 try:
2094 self.urlopen(HEADRequest(t['url']))
2095 except network_exceptions as err:
2096 to_screen('Unable to connect to thumbnail %s URL "%s" - %s. Skipping...' % (
2097 t['id'], t['url'], error_to_compat_str(err)))
2098 return False
2099 return True
2100
2101 return test_thumbnail
2102
2103 for i, t in enumerate(thumbnails):
2104 if t.get('id') is None:
2105 t['id'] = '%d' % i
2106 if t.get('width') and t.get('height'):
2107 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2108 t['url'] = sanitize_url(t['url'])
2109
2110 if self.params.get('check_formats') is not False:
2111 info_dict['thumbnails'] = LazyList(filter(thumbnail_tester(), thumbnails[::-1])).reverse()
2112 else:
2113 info_dict['thumbnails'] = thumbnails
2114
2115 def process_video_result(self, info_dict, download=True):
2116 assert info_dict.get('_type', 'video') == 'video'
2117
2118 if 'id' not in info_dict:
2119 raise ExtractorError('Missing "id" field in extractor result')
2120 if 'title' not in info_dict:
2121 raise ExtractorError('Missing "title" field in extractor result',
2122 video_id=info_dict['id'], ie=info_dict['extractor'])
2123
2124 def report_force_conversion(field, field_not, conversion):
2125 self.report_warning(
2126 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2127 % (field, field_not, conversion))
2128
2129 def sanitize_string_field(info, string_field):
2130 field = info.get(string_field)
2131 if field is None or isinstance(field, compat_str):
2132 return
2133 report_force_conversion(string_field, 'a string', 'string')
2134 info[string_field] = compat_str(field)
2135
2136 def sanitize_numeric_fields(info):
2137 for numeric_field in self._NUMERIC_FIELDS:
2138 field = info.get(numeric_field)
2139 if field is None or isinstance(field, compat_numeric_types):
2140 continue
2141 report_force_conversion(numeric_field, 'numeric', 'int')
2142 info[numeric_field] = int_or_none(field)
2143
2144 sanitize_string_field(info_dict, 'id')
2145 sanitize_numeric_fields(info_dict)
2146
2147 if 'playlist' not in info_dict:
2148 # It isn't part of a playlist
2149 info_dict['playlist'] = None
2150 info_dict['playlist_index'] = None
2151
2152 self._sanitize_thumbnails(info_dict)
2153
2154 thumbnail = info_dict.get('thumbnail')
2155 thumbnails = info_dict.get('thumbnails')
2156 if thumbnail:
2157 info_dict['thumbnail'] = sanitize_url(thumbnail)
2158 elif thumbnails:
2159 info_dict['thumbnail'] = thumbnails[-1]['url']
2160
2161 if info_dict.get('display_id') is None and 'id' in info_dict:
2162 info_dict['display_id'] = info_dict['id']
2163
2164 for ts_key, date_key in (
2165 ('timestamp', 'upload_date'),
2166 ('release_timestamp', 'release_date'),
2167 ):
2168 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2169 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2170 # see http://bugs.python.org/issue1646728)
2171 try:
2172 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2173 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2174 except (ValueError, OverflowError, OSError):
2175 pass
2176
2177 live_keys = ('is_live', 'was_live')
2178 live_status = info_dict.get('live_status')
2179 if live_status is None:
2180 for key in live_keys:
2181 if info_dict.get(key) is False:
2182 continue
2183 if info_dict.get(key):
2184 live_status = key
2185 break
2186 if all(info_dict.get(key) is False for key in live_keys):
2187 live_status = 'not_live'
2188 if live_status:
2189 info_dict['live_status'] = live_status
2190 for key in live_keys:
2191 if info_dict.get(key) is None:
2192 info_dict[key] = (live_status == key)
2193
2194 # Auto generate title fields corresponding to the *_number fields when missing
2195 # in order to always have clean titles. This is very common for TV series.
2196 for field in ('chapter', 'season', 'episode'):
2197 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2198 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2199
2200 for cc_kind in ('subtitles', 'automatic_captions'):
2201 cc = info_dict.get(cc_kind)
2202 if cc:
2203 for _, subtitle in cc.items():
2204 for subtitle_format in subtitle:
2205 if subtitle_format.get('url'):
2206 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2207 if subtitle_format.get('ext') is None:
2208 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2209
2210 automatic_captions = info_dict.get('automatic_captions')
2211 subtitles = info_dict.get('subtitles')
2212
2213 info_dict['requested_subtitles'] = self.process_subtitles(
2214 info_dict['id'], subtitles, automatic_captions)
2215
2216 # We now pick which formats have to be downloaded
2217 if info_dict.get('formats') is None:
2218 # There's only one format available
2219 formats = [info_dict]
2220 else:
2221 formats = info_dict['formats']
2222
2223 info_dict['__has_drm'] = any(f.get('has_drm') for f in formats)
2224 if not self.params.get('allow_unplayable_formats'):
2225 formats = [f for f in formats if not f.get('has_drm')]
2226
2227 if not formats:
2228 self.raise_no_formats(info_dict)
2229
2230 def is_wellformed(f):
2231 url = f.get('url')
2232 if not url:
2233 self.report_warning(
2234 '"url" field is missing or empty - skipping format, '
2235 'there is an error in extractor')
2236 return False
2237 if isinstance(url, bytes):
2238 sanitize_string_field(f, 'url')
2239 return True
2240
2241 # Filter out malformed formats for better extraction robustness
2242 formats = list(filter(is_wellformed, formats))
2243
2244 formats_dict = {}
2245
2246 # We check that all the formats have the format and format_id fields
2247 for i, format in enumerate(formats):
2248 sanitize_string_field(format, 'format_id')
2249 sanitize_numeric_fields(format)
2250 format['url'] = sanitize_url(format['url'])
2251 if not format.get('format_id'):
2252 format['format_id'] = compat_str(i)
2253 else:
2254 # Sanitize format_id from characters used in format selector expression
2255 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
2256 format_id = format['format_id']
2257 if format_id not in formats_dict:
2258 formats_dict[format_id] = []
2259 formats_dict[format_id].append(format)
2260
2261 # Make sure all formats have unique format_id
2262 for format_id, ambiguous_formats in formats_dict.items():
2263 if len(ambiguous_formats) > 1:
2264 for i, format in enumerate(ambiguous_formats):
2265 format['format_id'] = '%s-%d' % (format_id, i)
2266
2267 for i, format in enumerate(formats):
2268 if format.get('format') is None:
2269 format['format'] = '{id} - {res}{note}'.format(
2270 id=format['format_id'],
2271 res=self.format_resolution(format),
2272 note=format_field(format, 'format_note', ' (%s)'),
2273 )
2274 # Automatically determine file extension if missing
2275 if format.get('ext') is None:
2276 format['ext'] = determine_ext(format['url']).lower()
2277 # Automatically determine protocol if missing (useful for format
2278 # selection purposes)
2279 if format.get('protocol') is None:
2280 format['protocol'] = determine_protocol(format)
2281 # Add HTTP headers, so that external programs can use them from the
2282 # json output
2283 full_format_info = info_dict.copy()
2284 full_format_info.update(format)
2285 format['http_headers'] = self._calc_headers(full_format_info)
2286 # Remove private housekeeping stuff
2287 if '__x_forwarded_for_ip' in info_dict:
2288 del info_dict['__x_forwarded_for_ip']
2289
2290 # TODO Central sorting goes here
2291
2292 if not formats or formats[0] is not info_dict:
2293 # only set the 'formats' fields if the original info_dict list them
2294 # otherwise we end up with a circular reference, the first (and unique)
2295 # element in the 'formats' field in info_dict is info_dict itself,
2296 # which can't be exported to json
2297 info_dict['formats'] = formats
2298
2299 info_dict, _ = self.pre_process(info_dict)
2300
2301 if self.params.get('list_thumbnails'):
2302 self.list_thumbnails(info_dict)
2303 if self.params.get('listformats'):
2304 if not info_dict.get('formats') and not info_dict.get('url'):
2305 self.to_screen('%s has no formats' % info_dict['id'])
2306 else:
2307 self.list_formats(info_dict)
2308 if self.params.get('listsubtitles'):
2309 if 'automatic_captions' in info_dict:
2310 self.list_subtitles(
2311 info_dict['id'], automatic_captions, 'automatic captions')
2312 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2313 list_only = self.params.get('simulate') is None and (
2314 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2315 if list_only:
2316 # Without this printing, -F --print-json will not work
2317 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
2318 return
2319
2320 format_selector = self.format_selector
2321 if format_selector is None:
2322 req_format = self._default_format_spec(info_dict, download=download)
2323 self.write_debug('Default format spec: %s' % req_format)
2324 format_selector = self.build_format_selector(req_format)
2325
2326 # While in format selection we may need to have an access to the original
2327 # format set in order to calculate some metrics or do some processing.
2328 # For now we need to be able to guess whether original formats provided
2329 # by extractor are incomplete or not (i.e. whether extractor provides only
2330 # video-only or audio-only formats) for proper formats selection for
2331 # extractors with such incomplete formats (see
2332 # https://github.com/ytdl-org/youtube-dl/pull/5556).
2333 # Since formats may be filtered during format selection and may not match
2334 # the original formats the results may be incorrect. Thus original formats
2335 # or pre-calculated metrics should be passed to format selection routines
2336 # as well.
2337 # We will pass a context object containing all necessary additional data
2338 # instead of just formats.
2339 # This fixes incorrect format selection issue (see
2340 # https://github.com/ytdl-org/youtube-dl/issues/10083).
2341 incomplete_formats = (
2342 # All formats are video-only or
2343 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2344 # all formats are audio-only
2345 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
2346
2347 ctx = {
2348 'formats': formats,
2349 'incomplete_formats': incomplete_formats,
2350 }
2351
2352 formats_to_download = list(format_selector(ctx))
2353 if not formats_to_download:
2354 if not self.params.get('ignore_no_formats_error'):
2355 raise ExtractorError('Requested format is not available', expected=True,
2356 video_id=info_dict['id'], ie=info_dict['extractor'])
2357 else:
2358 self.report_warning('Requested format is not available')
2359 # Process what we can, even without any available formats.
2360 self.process_info(dict(info_dict))
2361 elif download:
2362 self.to_screen(
2363 '[info] %s: Downloading %d format(s): %s' % (
2364 info_dict['id'], len(formats_to_download),
2365 ", ".join([f['format_id'] for f in formats_to_download])))
2366 for fmt in formats_to_download:
2367 new_info = dict(info_dict)
2368 # Save a reference to the original info_dict so that it can be modified in process_info if needed
2369 new_info['__original_infodict'] = info_dict
2370 new_info.update(fmt)
2371 self.process_info(new_info)
2372 # We update the info dict with the best quality format (backwards compatibility)
2373 if formats_to_download:
2374 info_dict.update(formats_to_download[-1])
2375 return info_dict
2376
2377 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
2378 """Select the requested subtitles and their format"""
2379 available_subs = {}
2380 if normal_subtitles and self.params.get('writesubtitles'):
2381 available_subs.update(normal_subtitles)
2382 if automatic_captions and self.params.get('writeautomaticsub'):
2383 for lang, cap_info in automatic_captions.items():
2384 if lang not in available_subs:
2385 available_subs[lang] = cap_info
2386
2387 if (not self.params.get('writesubtitles') and not
2388 self.params.get('writeautomaticsub') or not
2389 available_subs):
2390 return None
2391
2392 all_sub_langs = available_subs.keys()
2393 if self.params.get('allsubtitles', False):
2394 requested_langs = all_sub_langs
2395 elif self.params.get('subtitleslangs', False):
2396 # A list is used so that the order of languages will be the same as
2397 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2398 requested_langs = []
2399 for lang_re in self.params.get('subtitleslangs'):
2400 if lang_re == 'all':
2401 requested_langs.extend(all_sub_langs)
2402 continue
2403 discard = lang_re[0] == '-'
2404 if discard:
2405 lang_re = lang_re[1:]
2406 current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
2407 if discard:
2408 for lang in current_langs:
2409 while lang in requested_langs:
2410 requested_langs.remove(lang)
2411 else:
2412 requested_langs.extend(current_langs)
2413 requested_langs = orderedSet(requested_langs)
2414 elif 'en' in available_subs:
2415 requested_langs = ['en']
2416 else:
2417 requested_langs = [list(all_sub_langs)[0]]
2418 if requested_langs:
2419 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
2420
2421 formats_query = self.params.get('subtitlesformat', 'best')
2422 formats_preference = formats_query.split('/') if formats_query else []
2423 subs = {}
2424 for lang in requested_langs:
2425 formats = available_subs.get(lang)
2426 if formats is None:
2427 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
2428 continue
2429 for ext in formats_preference:
2430 if ext == 'best':
2431 f = formats[-1]
2432 break
2433 matches = list(filter(lambda f: f['ext'] == ext, formats))
2434 if matches:
2435 f = matches[-1]
2436 break
2437 else:
2438 f = formats[-1]
2439 self.report_warning(
2440 'No subtitle format found matching "%s" for language %s, '
2441 'using %s' % (formats_query, lang, f['ext']))
2442 subs[lang] = f
2443 return subs
2444
2445 def __forced_printings(self, info_dict, filename, incomplete):
2446 def print_mandatory(field, actual_field=None):
2447 if actual_field is None:
2448 actual_field = field
2449 if (self.params.get('force%s' % field, False)
2450 and (not incomplete or info_dict.get(actual_field) is not None)):
2451 self.to_stdout(info_dict[actual_field])
2452
2453 def print_optional(field):
2454 if (self.params.get('force%s' % field, False)
2455 and info_dict.get(field) is not None):
2456 self.to_stdout(info_dict[field])
2457
2458 info_dict = info_dict.copy()
2459 if filename is not None:
2460 info_dict['filename'] = filename
2461 if info_dict.get('requested_formats') is not None:
2462 # For RTMP URLs, also include the playpath
2463 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2464 elif 'url' in info_dict:
2465 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2466
2467 if self.params.get('forceprint') or self.params.get('forcejson'):
2468 self.post_extract(info_dict)
2469 for tmpl in self.params.get('forceprint', []):
2470 mobj = re.match(r'\w+(=?)$', tmpl)
2471 if mobj and mobj.group(1):
2472 tmpl = f'{tmpl[:-1]} = %({tmpl[:-1]})s'
2473 elif mobj:
2474 tmpl = '%({})s'.format(tmpl)
2475 self.to_stdout(self.evaluate_outtmpl(tmpl, info_dict))
2476
2477 print_mandatory('title')
2478 print_mandatory('id')
2479 print_mandatory('url', 'urls')
2480 print_optional('thumbnail')
2481 print_optional('description')
2482 print_optional('filename')
2483 if self.params.get('forceduration') and info_dict.get('duration') is not None:
2484 self.to_stdout(formatSeconds(info_dict['duration']))
2485 print_mandatory('format')
2486
2487 if self.params.get('forcejson'):
2488 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
2489
2490 def dl(self, name, info, subtitle=False, test=False):
2491 if not info.get('url'):
2492 self.raise_no_formats(info, True)
2493
2494 if test:
2495 verbose = self.params.get('verbose')
2496 params = {
2497 'test': True,
2498 'quiet': self.params.get('quiet') or not verbose,
2499 'verbose': verbose,
2500 'noprogress': not verbose,
2501 'nopart': True,
2502 'skip_unavailable_fragments': False,
2503 'keep_fragments': False,
2504 'overwrites': True,
2505 '_no_ytdl_file': True,
2506 }
2507 else:
2508 params = self.params
2509 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
2510 if not test:
2511 for ph in self._progress_hooks:
2512 fd.add_progress_hook(ph)
2513 urls = '", "'.join([f['url'] for f in info.get('requested_formats', [])] or [info['url']])
2514 self.write_debug('Invoking downloader on "%s"' % urls)
2515 new_info = dict(info)
2516 if new_info.get('http_headers') is None:
2517 new_info['http_headers'] = self._calc_headers(new_info)
2518 return fd.download(name, new_info, subtitle)
2519
2520 def process_info(self, info_dict):
2521 """Process a single resolved IE result."""
2522
2523 assert info_dict.get('_type', 'video') == 'video'
2524
2525 max_downloads = self.params.get('max_downloads')
2526 if max_downloads is not None:
2527 if self._num_downloads >= int(max_downloads):
2528 raise MaxDownloadsReached()
2529
2530 # TODO: backward compatibility, to be removed
2531 info_dict['fulltitle'] = info_dict['title']
2532
2533 if 'format' not in info_dict and 'ext' in info_dict:
2534 info_dict['format'] = info_dict['ext']
2535
2536 if self._match_entry(info_dict) is not None:
2537 return
2538
2539 self.post_extract(info_dict)
2540 self._num_downloads += 1
2541
2542 # info_dict['_filename'] needs to be set for backward compatibility
2543 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2544 temp_filename = self.prepare_filename(info_dict, 'temp')
2545 files_to_move = {}
2546
2547 # Forced printings
2548 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
2549
2550 if self.params.get('simulate'):
2551 if self.params.get('force_write_download_archive', False):
2552 self.record_download_archive(info_dict)
2553 # Do nothing else if in simulate mode
2554 return
2555
2556 if full_filename is None:
2557 return
2558 if not self._ensure_dir_exists(encodeFilename(full_filename)):
2559 return
2560 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
2561 return
2562
2563 if self._write_description('video', info_dict,
2564 self.prepare_filename(info_dict, 'description')) is None:
2565 return
2566
2567 sub_files = self._write_subtitles(info_dict, temp_filename)
2568 if sub_files is None:
2569 return
2570 files_to_move.update(dict(sub_files))
2571
2572 thumb_files = self._write_thumbnails(
2573 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2574 if thumb_files is None:
2575 return
2576 files_to_move.update(dict(thumb_files))
2577
2578 infofn = self.prepare_filename(info_dict, 'infojson')
2579 _infojson_written = self._write_info_json('video', info_dict, infofn)
2580 if _infojson_written:
2581 info_dict['__infojson_filename'] = infofn
2582 elif _infojson_written is None:
2583 return
2584
2585 # Note: Annotations are deprecated
2586 annofn = None
2587 if self.params.get('writeannotations', False):
2588 annofn = self.prepare_filename(info_dict, 'annotation')
2589 if annofn:
2590 if not self._ensure_dir_exists(encodeFilename(annofn)):
2591 return
2592 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
2593 self.to_screen('[info] Video annotations are already present')
2594 elif not info_dict.get('annotations'):
2595 self.report_warning('There are no annotations to write.')
2596 else:
2597 try:
2598 self.to_screen('[info] Writing video annotations to: ' + annofn)
2599 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2600 annofile.write(info_dict['annotations'])
2601 except (KeyError, TypeError):
2602 self.report_warning('There are no annotations to write.')
2603 except (OSError, IOError):
2604 self.report_error('Cannot write annotations file: ' + annofn)
2605 return
2606
2607 # Write internet shortcut files
2608 url_link = webloc_link = desktop_link = False
2609 if self.params.get('writelink', False):
2610 if sys.platform == "darwin": # macOS.
2611 webloc_link = True
2612 elif sys.platform.startswith("linux"):
2613 desktop_link = True
2614 else: # if sys.platform in ['win32', 'cygwin']:
2615 url_link = True
2616 if self.params.get('writeurllink', False):
2617 url_link = True
2618 if self.params.get('writewebloclink', False):
2619 webloc_link = True
2620 if self.params.get('writedesktoplink', False):
2621 desktop_link = True
2622
2623 if url_link or webloc_link or desktop_link:
2624 if 'webpage_url' not in info_dict:
2625 self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
2626 return
2627 ascii_url = iri_to_uri(info_dict['webpage_url'])
2628
2629 def _write_link_file(extension, template, newline, embed_filename):
2630 linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
2631 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
2632 self.to_screen('[info] Internet shortcut is already present')
2633 else:
2634 try:
2635 self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
2636 with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
2637 template_vars = {'url': ascii_url}
2638 if embed_filename:
2639 template_vars['filename'] = linkfn[:-(len(extension) + 1)]
2640 linkfile.write(template % template_vars)
2641 except (OSError, IOError):
2642 self.report_error('Cannot write internet shortcut ' + linkfn)
2643 return False
2644 return True
2645
2646 if url_link:
2647 if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
2648 return
2649 if webloc_link:
2650 if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
2651 return
2652 if desktop_link:
2653 if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
2654 return
2655
2656 try:
2657 info_dict, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
2658 except PostProcessingError as err:
2659 self.report_error('Preprocessing: %s' % str(err))
2660 return
2661
2662 must_record_download_archive = False
2663 if self.params.get('skip_download', False):
2664 info_dict['filepath'] = temp_filename
2665 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2666 info_dict['__files_to_move'] = files_to_move
2667 info_dict = self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict)
2668 else:
2669 # Download
2670 info_dict.setdefault('__postprocessors', [])
2671 try:
2672
2673 def existing_file(*filepaths):
2674 ext = info_dict.get('ext')
2675 final_ext = self.params.get('final_ext', ext)
2676 existing_files = []
2677 for file in orderedSet(filepaths):
2678 if final_ext != ext:
2679 converted = replace_extension(file, final_ext, ext)
2680 if os.path.exists(encodeFilename(converted)):
2681 existing_files.append(converted)
2682 if os.path.exists(encodeFilename(file)):
2683 existing_files.append(file)
2684
2685 if not existing_files or self.params.get('overwrites', False):
2686 for file in orderedSet(existing_files):
2687 self.report_file_delete(file)
2688 os.remove(encodeFilename(file))
2689 return None
2690
2691 info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
2692 return existing_files[0]
2693
2694 success = True
2695 if info_dict.get('requested_formats') is not None:
2696
2697 def compatible_formats(formats):
2698 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
2699 video_formats = [format for format in formats if format.get('vcodec') != 'none']
2700 audio_formats = [format for format in formats if format.get('acodec') != 'none']
2701 if len(video_formats) > 2 or len(audio_formats) > 2:
2702 return False
2703
2704 # Check extension
2705 exts = set(format.get('ext') for format in formats)
2706 COMPATIBLE_EXTS = (
2707 set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
2708 set(('webm',)),
2709 )
2710 for ext_sets in COMPATIBLE_EXTS:
2711 if ext_sets.issuperset(exts):
2712 return True
2713 # TODO: Check acodec/vcodec
2714 return False
2715
2716 requested_formats = info_dict['requested_formats']
2717 old_ext = info_dict['ext']
2718 if self.params.get('merge_output_format') is None:
2719 if not compatible_formats(requested_formats):
2720 info_dict['ext'] = 'mkv'
2721 self.report_warning(
2722 'Requested formats are incompatible for merge and will be merged into mkv')
2723 if (info_dict['ext'] == 'webm'
2724 and info_dict.get('thumbnails')
2725 # check with type instead of pp_key, __name__, or isinstance
2726 # since we dont want any custom PPs to trigger this
2727 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])):
2728 info_dict['ext'] = 'mkv'
2729 self.report_warning(
2730 'webm doesn\'t support embedding a thumbnail, mkv will be used')
2731 new_ext = info_dict['ext']
2732
2733 def correct_ext(filename, ext=new_ext):
2734 if filename == '-':
2735 return filename
2736 filename_real_ext = os.path.splitext(filename)[1][1:]
2737 filename_wo_ext = (
2738 os.path.splitext(filename)[0]
2739 if filename_real_ext in (old_ext, new_ext)
2740 else filename)
2741 return '%s.%s' % (filename_wo_ext, ext)
2742
2743 # Ensure filename always has a correct extension for successful merge
2744 full_filename = correct_ext(full_filename)
2745 temp_filename = correct_ext(temp_filename)
2746 dl_filename = existing_file(full_filename, temp_filename)
2747 info_dict['__real_download'] = False
2748
2749 if dl_filename is not None:
2750 self.report_file_already_downloaded(dl_filename)
2751 elif get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-'):
2752 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
2753 success, real_download = self.dl(temp_filename, info_dict)
2754 info_dict['__real_download'] = real_download
2755 else:
2756 downloaded = []
2757 merger = FFmpegMergerPP(self)
2758 if self.params.get('allow_unplayable_formats'):
2759 self.report_warning(
2760 'You have requested merging of multiple formats '
2761 'while also allowing unplayable formats to be downloaded. '
2762 'The formats won\'t be merged to prevent data corruption.')
2763 elif not merger.available:
2764 self.report_warning(
2765 'You have requested merging of multiple formats but ffmpeg is not installed. '
2766 'The formats won\'t be merged.')
2767
2768 if temp_filename == '-':
2769 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict)
2770 else 'but the formats are incompatible for simultaneous download' if merger.available
2771 else 'but ffmpeg is not installed')
2772 self.report_warning(
2773 f'You have requested downloading multiple formats to stdout {reason}. '
2774 'The formats will be streamed one after the other')
2775 fname = temp_filename
2776 for f in requested_formats:
2777 new_info = dict(info_dict)
2778 del new_info['requested_formats']
2779 new_info.update(f)
2780 if temp_filename != '-':
2781 fname = prepend_extension(
2782 correct_ext(temp_filename, new_info['ext']),
2783 'f%s' % f['format_id'], new_info['ext'])
2784 if not self._ensure_dir_exists(fname):
2785 return
2786 f['filepath'] = fname
2787 downloaded.append(fname)
2788 partial_success, real_download = self.dl(fname, new_info)
2789 info_dict['__real_download'] = info_dict['__real_download'] or real_download
2790 success = success and partial_success
2791 if merger.available and not self.params.get('allow_unplayable_formats'):
2792 info_dict['__postprocessors'].append(merger)
2793 info_dict['__files_to_merge'] = downloaded
2794 # Even if there were no downloads, it is being merged only now
2795 info_dict['__real_download'] = True
2796 else:
2797 for file in downloaded:
2798 files_to_move[file] = None
2799 else:
2800 # Just a single file
2801 dl_filename = existing_file(full_filename, temp_filename)
2802 if dl_filename is None or dl_filename == temp_filename:
2803 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
2804 # So we should try to resume the download
2805 success, real_download = self.dl(temp_filename, info_dict)
2806 info_dict['__real_download'] = real_download
2807 else:
2808 self.report_file_already_downloaded(dl_filename)
2809
2810 dl_filename = dl_filename or temp_filename
2811 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2812
2813 except network_exceptions as err:
2814 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
2815 return
2816 except (OSError, IOError) as err:
2817 raise UnavailableVideoError(err)
2818 except (ContentTooShortError, ) as err:
2819 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
2820 return
2821
2822 if success and full_filename != '-':
2823
2824 def fixup():
2825 do_fixup = True
2826 fixup_policy = self.params.get('fixup')
2827 vid = info_dict['id']
2828
2829 if fixup_policy in ('ignore', 'never'):
2830 return
2831 elif fixup_policy == 'warn':
2832 do_fixup = False
2833 elif fixup_policy != 'force':
2834 assert fixup_policy in ('detect_or_warn', None)
2835 if not info_dict.get('__real_download'):
2836 do_fixup = False
2837
2838 def ffmpeg_fixup(cndn, msg, cls):
2839 if not cndn:
2840 return
2841 if not do_fixup:
2842 self.report_warning(f'{vid}: {msg}')
2843 return
2844 pp = cls(self)
2845 if pp.available:
2846 info_dict['__postprocessors'].append(pp)
2847 else:
2848 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
2849
2850 stretched_ratio = info_dict.get('stretched_ratio')
2851 ffmpeg_fixup(
2852 stretched_ratio not in (1, None),
2853 f'Non-uniform pixel ratio {stretched_ratio}',
2854 FFmpegFixupStretchedPP)
2855
2856 ffmpeg_fixup(
2857 (info_dict.get('requested_formats') is None
2858 and info_dict.get('container') == 'm4a_dash'
2859 and info_dict.get('ext') == 'm4a'),
2860 'writing DASH m4a. Only some players support this container',
2861 FFmpegFixupM4aPP)
2862
2863 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
2864 downloader = downloader.__name__ if downloader else None
2865 ffmpeg_fixup(info_dict.get('requested_formats') is None and downloader == 'HlsFD',
2866 'malformed AAC bitstream detected', FFmpegFixupM3u8PP)
2867 ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed timestamps detected', FFmpegFixupTimestampPP)
2868 ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed duration detected', FFmpegFixupDurationPP)
2869
2870 fixup()
2871 try:
2872 info_dict = self.post_process(dl_filename, info_dict, files_to_move)
2873 except PostProcessingError as err:
2874 self.report_error('Postprocessing: %s' % str(err))
2875 return
2876 try:
2877 for ph in self._post_hooks:
2878 ph(info_dict['filepath'])
2879 except Exception as err:
2880 self.report_error('post hooks: %s' % str(err))
2881 return
2882 must_record_download_archive = True
2883
2884 if must_record_download_archive or self.params.get('force_write_download_archive', False):
2885 self.record_download_archive(info_dict)
2886 max_downloads = self.params.get('max_downloads')
2887 if max_downloads is not None and self._num_downloads >= int(max_downloads):
2888 raise MaxDownloadsReached()
2889
2890 def download(self, url_list):
2891 """Download a given list of URLs."""
2892 outtmpl = self.outtmpl_dict['default']
2893 if (len(url_list) > 1
2894 and outtmpl != '-'
2895 and '%' not in outtmpl
2896 and self.params.get('max_downloads') != 1):
2897 raise SameFileError(outtmpl)
2898
2899 for url in url_list:
2900 try:
2901 # It also downloads the videos
2902 res = self.extract_info(
2903 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
2904 except UnavailableVideoError:
2905 self.report_error('unable to download video')
2906 except MaxDownloadsReached:
2907 self.to_screen('[info] Maximum number of downloads reached')
2908 raise
2909 except ExistingVideoReached:
2910 self.to_screen('[info] Encountered a video that is already in the archive, stopping due to --break-on-existing')
2911 raise
2912 except RejectedVideoReached:
2913 self.to_screen('[info] Encountered a video that did not match filter, stopping due to --break-on-reject')
2914 raise
2915 else:
2916 if self.params.get('dump_single_json', False):
2917 self.post_extract(res)
2918 self.to_stdout(json.dumps(self.sanitize_info(res)))
2919
2920 return self._download_retcode
2921
2922 def download_with_info_file(self, info_filename):
2923 with contextlib.closing(fileinput.FileInput(
2924 [info_filename], mode='r',
2925 openhook=fileinput.hook_encoded('utf-8'))) as f:
2926 # FileInput doesn't have a read method, we can't call json.load
2927 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
2928 try:
2929 self.process_ie_result(info, download=True)
2930 except (DownloadError, EntryNotInPlaylist, ThrottledDownload):
2931 webpage_url = info.get('webpage_url')
2932 if webpage_url is not None:
2933 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
2934 return self.download([webpage_url])
2935 else:
2936 raise
2937 return self._download_retcode
2938
2939 @staticmethod
2940 def sanitize_info(info_dict, remove_private_keys=False):
2941 ''' Sanitize the infodict for converting to json '''
2942 if info_dict is None:
2943 return info_dict
2944 info_dict.setdefault('epoch', int(time.time()))
2945 remove_keys = {'__original_infodict'} # Always remove this since this may contain a copy of the entire dict
2946 keep_keys = ['_type'], # Always keep this to facilitate load-info-json
2947 if remove_private_keys:
2948 remove_keys |= {
2949 'requested_formats', 'requested_subtitles', 'requested_entries',
2950 'filepath', 'entries', 'original_url', 'playlist_autonumber',
2951 }
2952 empty_values = (None, {}, [], set(), tuple())
2953 reject = lambda k, v: k not in keep_keys and (
2954 k.startswith('_') or k in remove_keys or v in empty_values)
2955 else:
2956 reject = lambda k, v: k in remove_keys
2957 filter_fn = lambda obj: (
2958 list(map(filter_fn, obj)) if isinstance(obj, (LazyList, list, tuple, set))
2959 else obj if not isinstance(obj, dict)
2960 else dict((k, filter_fn(v)) for k, v in obj.items() if not reject(k, v)))
2961 return filter_fn(info_dict)
2962
2963 @staticmethod
2964 def filter_requested_info(info_dict, actually_filter=True):
2965 ''' Alias of sanitize_info for backward compatibility '''
2966 return YoutubeDL.sanitize_info(info_dict, actually_filter)
2967
2968 def run_pp(self, pp, infodict):
2969 files_to_delete = []
2970 if '__files_to_move' not in infodict:
2971 infodict['__files_to_move'] = {}
2972 try:
2973 files_to_delete, infodict = pp.run(infodict)
2974 except PostProcessingError as e:
2975 # Must be True and not 'only_download'
2976 if self.params.get('ignoreerrors') is True:
2977 self.report_error(e)
2978 return infodict
2979 raise
2980
2981 if not files_to_delete:
2982 return infodict
2983 if self.params.get('keepvideo', False):
2984 for f in files_to_delete:
2985 infodict['__files_to_move'].setdefault(f, '')
2986 else:
2987 for old_filename in set(files_to_delete):
2988 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2989 try:
2990 os.remove(encodeFilename(old_filename))
2991 except (IOError, OSError):
2992 self.report_warning('Unable to remove downloaded original file')
2993 if old_filename in infodict['__files_to_move']:
2994 del infodict['__files_to_move'][old_filename]
2995 return infodict
2996
2997 @staticmethod
2998 def post_extract(info_dict):
2999 def actual_post_extract(info_dict):
3000 if info_dict.get('_type') in ('playlist', 'multi_video'):
3001 for video_dict in info_dict.get('entries', {}):
3002 actual_post_extract(video_dict or {})
3003 return
3004
3005 post_extractor = info_dict.get('__post_extractor') or (lambda: {})
3006 extra = post_extractor().items()
3007 info_dict.update(extra)
3008 info_dict.pop('__post_extractor', None)
3009
3010 original_infodict = info_dict.get('__original_infodict') or {}
3011 original_infodict.update(extra)
3012 original_infodict.pop('__post_extractor', None)
3013
3014 actual_post_extract(info_dict or {})
3015
3016 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
3017 info = dict(ie_info)
3018 info['__files_to_move'] = files_to_move or {}
3019 for pp in self._pps[key]:
3020 info = self.run_pp(pp, info)
3021 return info, info.pop('__files_to_move', None)
3022
3023 def post_process(self, filename, ie_info, files_to_move=None):
3024 """Run all the postprocessors on the given file."""
3025 info = dict(ie_info)
3026 info['filepath'] = filename
3027 info['__files_to_move'] = files_to_move or {}
3028
3029 for pp in ie_info.get('__postprocessors', []) + self._pps['post_process']:
3030 info = self.run_pp(pp, info)
3031 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3032 del info['__files_to_move']
3033 for pp in self._pps['after_move']:
3034 info = self.run_pp(pp, info)
3035 return info
3036
3037 def _make_archive_id(self, info_dict):
3038 video_id = info_dict.get('id')
3039 if not video_id:
3040 return
3041 # Future-proof against any change in case
3042 # and backwards compatibility with prior versions
3043 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
3044 if extractor is None:
3045 url = str_or_none(info_dict.get('url'))
3046 if not url:
3047 return
3048 # Try to find matching extractor for the URL and take its ie_key
3049 for ie_key, ie in self._ies.items():
3050 if ie.suitable(url):
3051 extractor = ie_key
3052 break
3053 else:
3054 return
3055 return '%s %s' % (extractor.lower(), video_id)
3056
3057 def in_download_archive(self, info_dict):
3058 fn = self.params.get('download_archive')
3059 if fn is None:
3060 return False
3061
3062 vid_id = self._make_archive_id(info_dict)
3063 if not vid_id:
3064 return False # Incomplete video information
3065
3066 return vid_id in self.archive
3067
3068 def record_download_archive(self, info_dict):
3069 fn = self.params.get('download_archive')
3070 if fn is None:
3071 return
3072 vid_id = self._make_archive_id(info_dict)
3073 assert vid_id
3074 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3075 archive_file.write(vid_id + '\n')
3076 self.archive.add(vid_id)
3077
3078 @staticmethod
3079 def format_resolution(format, default='unknown'):
3080 is_images = format.get('vcodec') == 'none' and format.get('acodec') == 'none'
3081 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
3082 return 'audio only'
3083 if format.get('resolution') is not None:
3084 return format['resolution']
3085 if format.get('width') and format.get('height'):
3086 res = '%dx%d' % (format['width'], format['height'])
3087 elif format.get('height'):
3088 res = '%sp' % format['height']
3089 elif format.get('width'):
3090 res = '%dx?' % format['width']
3091 elif is_images:
3092 return 'images'
3093 else:
3094 return default
3095 return f'{res} images' if is_images else res
3096
3097 def _format_note(self, fdict):
3098 res = ''
3099 if fdict.get('ext') in ['f4f', 'f4m']:
3100 res += '(unsupported) '
3101 if fdict.get('language'):
3102 if res:
3103 res += ' '
3104 res += '[%s] ' % fdict['language']
3105 if fdict.get('format_note') is not None:
3106 res += fdict['format_note'] + ' '
3107 if fdict.get('tbr') is not None:
3108 res += '%4dk ' % fdict['tbr']
3109 if fdict.get('container') is not None:
3110 if res:
3111 res += ', '
3112 res += '%s container' % fdict['container']
3113 if (fdict.get('vcodec') is not None
3114 and fdict.get('vcodec') != 'none'):
3115 if res:
3116 res += ', '
3117 res += fdict['vcodec']
3118 if fdict.get('vbr') is not None:
3119 res += '@'
3120 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3121 res += 'video@'
3122 if fdict.get('vbr') is not None:
3123 res += '%4dk' % fdict['vbr']
3124 if fdict.get('fps') is not None:
3125 if res:
3126 res += ', '
3127 res += '%sfps' % fdict['fps']
3128 if fdict.get('acodec') is not None:
3129 if res:
3130 res += ', '
3131 if fdict['acodec'] == 'none':
3132 res += 'video only'
3133 else:
3134 res += '%-5s' % fdict['acodec']
3135 elif fdict.get('abr') is not None:
3136 if res:
3137 res += ', '
3138 res += 'audio'
3139 if fdict.get('abr') is not None:
3140 res += '@%3dk' % fdict['abr']
3141 if fdict.get('asr') is not None:
3142 res += ' (%5dHz)' % fdict['asr']
3143 if fdict.get('filesize') is not None:
3144 if res:
3145 res += ', '
3146 res += format_bytes(fdict['filesize'])
3147 elif fdict.get('filesize_approx') is not None:
3148 if res:
3149 res += ', '
3150 res += '~' + format_bytes(fdict['filesize_approx'])
3151 return res
3152
3153 def list_formats(self, info_dict):
3154 formats = info_dict.get('formats', [info_dict])
3155 new_format = (
3156 'list-formats' not in self.params.get('compat_opts', [])
3157 and self.params.get('listformats_table', True) is not False)
3158 if new_format:
3159 table = [
3160 [
3161 format_field(f, 'format_id'),
3162 format_field(f, 'ext'),
3163 self.format_resolution(f),
3164 format_field(f, 'fps', '%d'),
3165 '|',
3166 format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
3167 format_field(f, 'tbr', '%4dk'),
3168 shorten_protocol_name(f.get('protocol', '').replace("native", "n")),
3169 '|',
3170 format_field(f, 'vcodec', default='unknown').replace('none', ''),
3171 format_field(f, 'vbr', '%4dk'),
3172 format_field(f, 'acodec', default='unknown').replace('none', ''),
3173 format_field(f, 'abr', '%3dk'),
3174 format_field(f, 'asr', '%5dHz'),
3175 ', '.join(filter(None, (
3176 'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
3177 format_field(f, 'language', '[%s]'),
3178 format_field(f, 'format_note'),
3179 format_field(f, 'container', ignore=(None, f.get('ext'))),
3180 ))),
3181 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3182 header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', ' TBR', 'PROTO',
3183 '|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'MORE INFO']
3184 else:
3185 table = [
3186 [
3187 format_field(f, 'format_id'),
3188 format_field(f, 'ext'),
3189 self.format_resolution(f),
3190 self._format_note(f)]
3191 for f in formats
3192 if f.get('preference') is None or f['preference'] >= -1000]
3193 header_line = ['format code', 'extension', 'resolution', 'note']
3194
3195 self.to_screen(
3196 '[info] Available formats for %s:' % info_dict['id'])
3197 self.to_stdout(render_table(
3198 header_line, table, delim=new_format, extraGap=(0 if new_format else 1), hideEmpty=new_format))
3199
3200 def list_thumbnails(self, info_dict):
3201 thumbnails = list(info_dict.get('thumbnails'))
3202 if not thumbnails:
3203 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
3204 return
3205
3206 self.to_screen(
3207 '[info] Thumbnails for %s:' % info_dict['id'])
3208 self.to_stdout(render_table(
3209 ['ID', 'width', 'height', 'URL'],
3210 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
3211
3212 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3213 if not subtitles:
3214 self.to_screen('%s has no %s' % (video_id, name))
3215 return
3216 self.to_screen(
3217 'Available %s for %s:' % (name, video_id))
3218
3219 def _row(lang, formats):
3220 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
3221 if len(set(names)) == 1:
3222 names = [] if names[0] == 'unknown' else names[:1]
3223 return [lang, ', '.join(names), ', '.join(exts)]
3224
3225 self.to_stdout(render_table(
3226 ['Language', 'Name', 'Formats'],
3227 [_row(lang, formats) for lang, formats in subtitles.items()],
3228 hideEmpty=True))
3229
3230 def urlopen(self, req):
3231 """ Start an HTTP download """
3232 if isinstance(req, compat_basestring):
3233 req = sanitized_Request(req)
3234 return self._opener.open(req, timeout=self._socket_timeout)
3235
3236 def print_debug_header(self):
3237 if not self.params.get('verbose'):
3238 return
3239 get_encoding = lambda stream: getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__)
3240 encoding_str = (
3241 '[debug] Encodings: locale %s, fs %s, stdout %s, stderr %s, pref %s\n' % (
3242 locale.getpreferredencoding(),
3243 sys.getfilesystemencoding(),
3244 get_encoding(self._screen_file), get_encoding(self._err_file),
3245 self.get_encoding()))
3246
3247 logger = self.params.get('logger')
3248 if logger:
3249 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3250 write_debug(encoding_str)
3251 else:
3252 write_debug = lambda msg: self._write_string(f'[debug] {msg}')
3253 write_string(encoding_str, encoding=None)
3254
3255 source = detect_variant()
3256 write_debug('yt-dlp version %s%s\n' % (__version__, '' if source == 'unknown' else f' ({source})'))
3257 if _LAZY_LOADER:
3258 write_debug('Lazy loading extractors enabled\n')
3259 if plugin_extractors or plugin_postprocessors:
3260 write_debug('Plugins: %s\n' % [
3261 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3262 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
3263 if self.params.get('compat_opts'):
3264 write_debug('Compatibility options: %s\n' % ', '.join(self.params.get('compat_opts')))
3265 try:
3266 sp = subprocess.Popen(
3267 ['git', 'rev-parse', '--short', 'HEAD'],
3268 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3269 cwd=os.path.dirname(os.path.abspath(__file__)))
3270 out, err = process_communicate_or_kill(sp)
3271 out = out.decode().strip()
3272 if re.match('[0-9a-f]+', out):
3273 write_debug('Git HEAD: %s\n' % out)
3274 except Exception:
3275 try:
3276 sys.exc_clear()
3277 except Exception:
3278 pass
3279
3280 def python_implementation():
3281 impl_name = platform.python_implementation()
3282 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
3283 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
3284 return impl_name
3285
3286 write_debug('Python version %s (%s %s) - %s\n' % (
3287 platform.python_version(),
3288 python_implementation(),
3289 platform.architecture()[0],
3290 platform_name()))
3291
3292 exe_versions = FFmpegPostProcessor.get_versions(self)
3293 exe_versions['rtmpdump'] = rtmpdump_version()
3294 exe_versions['phantomjs'] = PhantomJSwrapper._version()
3295 exe_str = ', '.join(
3296 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3297 ) or 'none'
3298 write_debug('exe versions: %s\n' % exe_str)
3299
3300 from .downloader.websocket import has_websockets
3301 from .postprocessor.embedthumbnail import has_mutagen
3302 from .cookies import SQLITE_AVAILABLE, KEYRING_AVAILABLE
3303
3304 lib_str = ', '.join(sorted(filter(None, (
3305 compat_pycrypto_AES and compat_pycrypto_AES.__name__.split('.')[0],
3306 has_websockets and 'websockets',
3307 has_mutagen and 'mutagen',
3308 SQLITE_AVAILABLE and 'sqlite',
3309 KEYRING_AVAILABLE and 'keyring',
3310 )))) or 'none'
3311 write_debug('Optional libraries: %s\n' % lib_str)
3312 write_debug('ANSI escape support: stdout = %s, stderr = %s\n' % (
3313 supports_terminal_sequences(self._screen_file),
3314 supports_terminal_sequences(self._err_file)))
3315
3316 proxy_map = {}
3317 for handler in self._opener.handlers:
3318 if hasattr(handler, 'proxies'):
3319 proxy_map.update(handler.proxies)
3320 write_debug('Proxy map: ' + compat_str(proxy_map) + '\n')
3321
3322 if self.params.get('call_home', False):
3323 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
3324 write_debug('Public IP address: %s\n' % ipaddr)
3325 return
3326 latest_version = self.urlopen(
3327 'https://yt-dl.org/latest/version').read().decode('utf-8')
3328 if version_tuple(latest_version) > version_tuple(__version__):
3329 self.report_warning(
3330 'You are using an outdated version (newest version: %s)! '
3331 'See https://yt-dl.org/update if you need help updating.' %
3332 latest_version)
3333
3334 def _setup_opener(self):
3335 timeout_val = self.params.get('socket_timeout')
3336 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
3337
3338 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
3339 opts_cookiefile = self.params.get('cookiefile')
3340 opts_proxy = self.params.get('proxy')
3341
3342 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
3343
3344 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
3345 if opts_proxy is not None:
3346 if opts_proxy == '':
3347 proxies = {}
3348 else:
3349 proxies = {'http': opts_proxy, 'https': opts_proxy}
3350 else:
3351 proxies = compat_urllib_request.getproxies()
3352 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3353 if 'http' in proxies and 'https' not in proxies:
3354 proxies['https'] = proxies['http']
3355 proxy_handler = PerRequestProxyHandler(proxies)
3356
3357 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
3358 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3359 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
3360 redirect_handler = YoutubeDLRedirectHandler()
3361 data_handler = compat_urllib_request_DataHandler()
3362
3363 # When passing our own FileHandler instance, build_opener won't add the
3364 # default FileHandler and allows us to disable the file protocol, which
3365 # can be used for malicious purposes (see
3366 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3367 file_handler = compat_urllib_request.FileHandler()
3368
3369 def file_open(*args, **kwargs):
3370 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3371 file_handler.file_open = file_open
3372
3373 opener = compat_urllib_request.build_opener(
3374 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
3375
3376 # Delete the default user-agent header, which would otherwise apply in
3377 # cases where our custom HTTP handler doesn't come into play
3378 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3379 opener.addheaders = []
3380 self._opener = opener
3381
3382 def encode(self, s):
3383 if isinstance(s, bytes):
3384 return s # Already encoded
3385
3386 try:
3387 return s.encode(self.get_encoding())
3388 except UnicodeEncodeError as err:
3389 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3390 raise
3391
3392 def get_encoding(self):
3393 encoding = self.params.get('encoding')
3394 if encoding is None:
3395 encoding = preferredencoding()
3396 return encoding
3397
3398 def _write_info_json(self, label, ie_result, infofn):
3399 ''' Write infojson and returns True = written, False = skip, None = error '''
3400 if not self.params.get('writeinfojson'):
3401 return False
3402 elif not infofn:
3403 self.write_debug(f'Skipping writing {label} infojson')
3404 return False
3405 elif not self._ensure_dir_exists(infofn):
3406 return None
3407 elif not self.params.get('overwrites', True) and os.path.exists(infofn):
3408 self.to_screen(f'[info] {label.title()} metadata is already present')
3409 else:
3410 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3411 try:
3412 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3413 except (OSError, IOError):
3414 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3415 return None
3416 return True
3417
3418 def _write_description(self, label, ie_result, descfn):
3419 ''' Write description and returns True = written, False = skip, None = error '''
3420 if not self.params.get('writedescription'):
3421 return False
3422 elif not descfn:
3423 self.write_debug(f'Skipping writing {label} description')
3424 return False
3425 elif not self._ensure_dir_exists(descfn):
3426 return None
3427 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3428 self.to_screen(f'[info] {label.title()} description is already present')
3429 elif ie_result.get('description') is None:
3430 self.report_warning(f'There\'s no {label} description to write')
3431 return False
3432 else:
3433 try:
3434 self.to_screen(f'[info] Writing {label} description to: {descfn}')
3435 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
3436 descfile.write(ie_result['description'])
3437 except (OSError, IOError):
3438 self.report_error(f'Cannot write {label} description file {descfn}')
3439 return None
3440 return True
3441
3442 def _write_subtitles(self, info_dict, filename):
3443 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3444 ret = []
3445 subtitles = info_dict.get('requested_subtitles')
3446 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3447 # subtitles download errors are already managed as troubles in relevant IE
3448 # that way it will silently go on when used with unsupporting IE
3449 return ret
3450
3451 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3452 if not sub_filename_base:
3453 self.to_screen('[info] Skipping writing video subtitles')
3454 return ret
3455 for sub_lang, sub_info in subtitles.items():
3456 sub_format = sub_info['ext']
3457 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3458 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
3459 if not self.params.get('overwrites', True) and os.path.exists(sub_filename):
3460 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3461 sub_info['filepath'] = sub_filename
3462 ret.append((sub_filename, sub_filename_final))
3463 continue
3464
3465 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3466 if sub_info.get('data') is not None:
3467 try:
3468 # Use newline='' to prevent conversion of newline characters
3469 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3470 with io.open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
3471 subfile.write(sub_info['data'])
3472 sub_info['filepath'] = sub_filename
3473 ret.append((sub_filename, sub_filename_final))
3474 continue
3475 except (OSError, IOError):
3476 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3477 return None
3478
3479 try:
3480 sub_copy = sub_info.copy()
3481 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3482 self.dl(sub_filename, sub_copy, subtitle=True)
3483 sub_info['filepath'] = sub_filename
3484 ret.append((sub_filename, sub_filename_final))
3485 except (ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
3486 self.report_warning(f'Unable to download video subtitles for {sub_lang!r}: {err}')
3487 continue
3488 return ret
3489
3490 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3491 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3492 write_all = self.params.get('write_all_thumbnails', False)
3493 thumbnails, ret = [], []
3494 if write_all or self.params.get('writethumbnail', False):
3495 thumbnails = info_dict.get('thumbnails') or []
3496 multiple = write_all and len(thumbnails) > 1
3497
3498 if thumb_filename_base is None:
3499 thumb_filename_base = filename
3500 if thumbnails and not thumb_filename_base:
3501 self.write_debug(f'Skipping writing {label} thumbnail')
3502 return ret
3503
3504 for t in thumbnails[::-1]:
3505 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
3506 thumb_display_id = f'{label} thumbnail' + (f' {t["id"]}' if multiple else '')
3507 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3508 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
3509
3510 if not self.params.get('overwrites', True) and os.path.exists(thumb_filename):
3511 ret.append((thumb_filename, thumb_filename_final))
3512 t['filepath'] = thumb_filename
3513 self.to_screen(f'[info] {thumb_display_id.title()} is already present')
3514 else:
3515 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
3516 try:
3517 uf = self.urlopen(t['url'])
3518 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
3519 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
3520 shutil.copyfileobj(uf, thumbf)
3521 ret.append((thumb_filename, thumb_filename_final))
3522 t['filepath'] = thumb_filename
3523 except network_exceptions as err:
3524 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
3525 if ret and not write_all:
3526 break
3527 return ret