]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
[cleanup] Add deprecation warnings
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 #!/usr/bin/env python3
2 # coding: utf-8
3
4 from __future__ import absolute_import, unicode_literals
5
6 import collections
7 import contextlib
8 import copy
9 import datetime
10 import errno
11 import fileinput
12 import functools
13 import io
14 import itertools
15 import json
16 import locale
17 import operator
18 import os
19 import platform
20 import re
21 import shutil
22 import subprocess
23 import sys
24 import tempfile
25 import time
26 import tokenize
27 import traceback
28 import random
29 import unicodedata
30
31 from enum import Enum
32 from string import ascii_letters
33
34 from .compat import (
35 compat_basestring,
36 compat_get_terminal_size,
37 compat_kwargs,
38 compat_numeric_types,
39 compat_os_name,
40 compat_pycrypto_AES,
41 compat_shlex_quote,
42 compat_str,
43 compat_tokenize_tokenize,
44 compat_urllib_error,
45 compat_urllib_request,
46 compat_urllib_request_DataHandler,
47 windows_enable_vt_mode,
48 )
49 from .cookies import load_cookies
50 from .utils import (
51 age_restricted,
52 args_to_str,
53 ContentTooShortError,
54 date_from_str,
55 DateRange,
56 DEFAULT_OUTTMPL,
57 determine_ext,
58 determine_protocol,
59 DownloadCancelled,
60 DownloadError,
61 encode_compat_str,
62 encodeFilename,
63 EntryNotInPlaylist,
64 error_to_compat_str,
65 ExistingVideoReached,
66 expand_path,
67 ExtractorError,
68 float_or_none,
69 format_bytes,
70 format_field,
71 formatSeconds,
72 GeoRestrictedError,
73 HEADRequest,
74 int_or_none,
75 iri_to_uri,
76 ISO3166Utils,
77 join_nonempty,
78 LazyList,
79 LINK_TEMPLATES,
80 locked_file,
81 make_dir,
82 make_HTTPS_handler,
83 MaxDownloadsReached,
84 network_exceptions,
85 number_of_digits,
86 orderedSet,
87 OUTTMPL_TYPES,
88 PagedList,
89 parse_filesize,
90 PerRequestProxyHandler,
91 platform_name,
92 Popen,
93 PostProcessingError,
94 preferredencoding,
95 prepend_extension,
96 ReExtractInfo,
97 register_socks_protocols,
98 RejectedVideoReached,
99 render_table,
100 replace_extension,
101 SameFileError,
102 sanitize_filename,
103 sanitize_path,
104 sanitize_url,
105 sanitized_Request,
106 std_headers,
107 STR_FORMAT_RE_TMPL,
108 STR_FORMAT_TYPES,
109 str_or_none,
110 strftime_or_none,
111 subtitles_filename,
112 supports_terminal_sequences,
113 timetuple_from_msec,
114 to_high_limit_path,
115 traverse_obj,
116 try_get,
117 UnavailableVideoError,
118 url_basename,
119 variadic,
120 version_tuple,
121 write_json_file,
122 write_string,
123 YoutubeDLCookieProcessor,
124 YoutubeDLHandler,
125 YoutubeDLRedirectHandler,
126 )
127 from .cache import Cache
128 from .minicurses import format_text
129 from .extractor import (
130 gen_extractor_classes,
131 get_info_extractor,
132 _LAZY_LOADER,
133 _PLUGIN_CLASSES as plugin_extractors
134 )
135 from .extractor.openload import PhantomJSwrapper
136 from .downloader import (
137 FFmpegFD,
138 get_suitable_downloader,
139 shorten_protocol_name
140 )
141 from .downloader.rtmp import rtmpdump_version
142 from .postprocessor import (
143 get_postprocessor,
144 EmbedThumbnailPP,
145 FFmpegFixupDurationPP,
146 FFmpegFixupM3u8PP,
147 FFmpegFixupM4aPP,
148 FFmpegFixupStretchedPP,
149 FFmpegFixupTimestampPP,
150 FFmpegMergerPP,
151 FFmpegPostProcessor,
152 MoveFilesAfterDownloadPP,
153 _PLUGIN_CLASSES as plugin_postprocessors
154 )
155 from .update import detect_variant
156 from .version import __version__, RELEASE_GIT_HEAD
157
158 if compat_os_name == 'nt':
159 import ctypes
160
161
162 class YoutubeDL(object):
163 """YoutubeDL class.
164
165 YoutubeDL objects are the ones responsible of downloading the
166 actual video file and writing it to disk if the user has requested
167 it, among some other tasks. In most cases there should be one per
168 program. As, given a video URL, the downloader doesn't know how to
169 extract all the needed information, task that InfoExtractors do, it
170 has to pass the URL to one of them.
171
172 For this, YoutubeDL objects have a method that allows
173 InfoExtractors to be registered in a given order. When it is passed
174 a URL, the YoutubeDL object handles it to the first InfoExtractor it
175 finds that reports being able to handle it. The InfoExtractor extracts
176 all the information about the video or videos the URL refers to, and
177 YoutubeDL process the extracted information, possibly using a File
178 Downloader to download the video.
179
180 YoutubeDL objects accept a lot of parameters. In order not to saturate
181 the object constructor with arguments, it receives a dictionary of
182 options instead. These options are available through the params
183 attribute for the InfoExtractors to use. The YoutubeDL also
184 registers itself as the downloader in charge for the InfoExtractors
185 that are added to it, so this is a "mutual registration".
186
187 Available options:
188
189 username: Username for authentication purposes.
190 password: Password for authentication purposes.
191 videopassword: Password for accessing a video.
192 ap_mso: Adobe Pass multiple-system operator identifier.
193 ap_username: Multiple-system operator account username.
194 ap_password: Multiple-system operator account password.
195 usenetrc: Use netrc for authentication instead.
196 verbose: Print additional info to stdout.
197 quiet: Do not print messages to stdout.
198 no_warnings: Do not print out anything for warnings.
199 forceprint: A list of templates to force print
200 forceurl: Force printing final URL. (Deprecated)
201 forcetitle: Force printing title. (Deprecated)
202 forceid: Force printing ID. (Deprecated)
203 forcethumbnail: Force printing thumbnail URL. (Deprecated)
204 forcedescription: Force printing description. (Deprecated)
205 forcefilename: Force printing final filename. (Deprecated)
206 forceduration: Force printing duration. (Deprecated)
207 forcejson: Force printing info_dict as JSON.
208 dump_single_json: Force printing the info_dict of the whole playlist
209 (or video) as a single JSON line.
210 force_write_download_archive: Force writing download archive regardless
211 of 'skip_download' or 'simulate'.
212 simulate: Do not download the video files. If unset (or None),
213 simulate only if listsubtitles, listformats or list_thumbnails is used
214 format: Video format code. see "FORMAT SELECTION" for more details.
215 You can also pass a function. The function takes 'ctx' as
216 argument and returns the formats to download.
217 See "build_format_selector" for an implementation
218 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
219 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
220 extracting metadata even if the video is not actually
221 available for download (experimental)
222 format_sort: A list of fields by which to sort the video formats.
223 See "Sorting Formats" for more details.
224 format_sort_force: Force the given format_sort. see "Sorting Formats"
225 for more details.
226 allow_multiple_video_streams: Allow multiple video streams to be merged
227 into a single file
228 allow_multiple_audio_streams: Allow multiple audio streams to be merged
229 into a single file
230 check_formats Whether to test if the formats are downloadable.
231 Can be True (check all), False (check none),
232 'selected' (check selected formats),
233 or None (check only if requested by extractor)
234 paths: Dictionary of output paths. The allowed keys are 'home'
235 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
236 outtmpl: Dictionary of templates for output names. Allowed keys
237 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
238 For compatibility with youtube-dl, a single string can also be used
239 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
240 restrictfilenames: Do not allow "&" and spaces in file names
241 trim_file_name: Limit length of filename (extension excluded)
242 windowsfilenames: Force the filenames to be windows compatible
243 ignoreerrors: Do not stop on download/postprocessing errors.
244 Can be 'only_download' to ignore only download errors.
245 Default is 'only_download' for CLI, but False for API
246 skip_playlist_after_errors: Number of allowed failures until the rest of
247 the playlist is skipped
248 force_generic_extractor: Force downloader to use the generic extractor
249 overwrites: Overwrite all video and metadata files if True,
250 overwrite only non-video files if None
251 and don't overwrite any file if False
252 For compatibility with youtube-dl,
253 "nooverwrites" may also be used instead
254 playliststart: Playlist item to start at.
255 playlistend: Playlist item to end at.
256 playlist_items: Specific indices of playlist to download.
257 playlistreverse: Download playlist items in reverse order.
258 playlistrandom: Download playlist items in random order.
259 matchtitle: Download only matching titles.
260 rejecttitle: Reject downloads for matching titles.
261 logger: Log messages to a logging.Logger instance.
262 logtostderr: Log messages to stderr instead of stdout.
263 consoletitle: Display progress in console window's titlebar.
264 writedescription: Write the video description to a .description file
265 writeinfojson: Write the video description to a .info.json file
266 clean_infojson: Remove private fields from the infojson
267 getcomments: Extract video comments. This will not be written to disk
268 unless writeinfojson is also given
269 writeannotations: Write the video annotations to a .annotations.xml file
270 writethumbnail: Write the thumbnail image to a file
271 allow_playlist_files: Whether to write playlists' description, infojson etc
272 also to disk when using the 'write*' options
273 write_all_thumbnails: Write all thumbnail formats to files
274 writelink: Write an internet shortcut file, depending on the
275 current platform (.url/.webloc/.desktop)
276 writeurllink: Write a Windows internet shortcut file (.url)
277 writewebloclink: Write a macOS internet shortcut file (.webloc)
278 writedesktoplink: Write a Linux internet shortcut file (.desktop)
279 writesubtitles: Write the video subtitles to a file
280 writeautomaticsub: Write the automatically generated subtitles to a file
281 allsubtitles: Deprecated - Use subtitleslangs = ['all']
282 Downloads all the subtitles of the video
283 (requires writesubtitles or writeautomaticsub)
284 listsubtitles: Lists all available subtitles for the video
285 subtitlesformat: The format code for subtitles
286 subtitleslangs: List of languages of the subtitles to download (can be regex).
287 The list may contain "all" to refer to all the available
288 subtitles. The language can be prefixed with a "-" to
289 exclude it from the requested languages. Eg: ['all', '-live_chat']
290 keepvideo: Keep the video file after post-processing
291 daterange: A DateRange object, download only if the upload_date is in the range.
292 skip_download: Skip the actual download of the video file
293 cachedir: Location of the cache files in the filesystem.
294 False to disable filesystem cache.
295 noplaylist: Download single video instead of a playlist if in doubt.
296 age_limit: An integer representing the user's age in years.
297 Unsuitable videos for the given age are skipped.
298 min_views: An integer representing the minimum view count the video
299 must have in order to not be skipped.
300 Videos without view count information are always
301 downloaded. None for no limit.
302 max_views: An integer representing the maximum view count.
303 Videos that are more popular than that are not
304 downloaded.
305 Videos without view count information are always
306 downloaded. None for no limit.
307 download_archive: File name of a file where all downloads are recorded.
308 Videos already present in the file are not downloaded
309 again.
310 break_on_existing: Stop the download process after attempting to download a
311 file that is in the archive.
312 break_on_reject: Stop the download process when encountering a video that
313 has been filtered out.
314 break_per_url: Whether break_on_reject and break_on_existing
315 should act on each input URL as opposed to for the entire queue
316 cookiefile: File name where cookies should be read from and dumped to
317 cookiesfrombrowser: A tuple containing the name of the browser and the profile
318 name/path from where cookies are loaded.
319 Eg: ('chrome', ) or ('vivaldi', 'default')
320 nocheckcertificate:Do not verify SSL certificates
321 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
322 At the moment, this is only supported by YouTube.
323 proxy: URL of the proxy server to use
324 geo_verification_proxy: URL of the proxy to use for IP address verification
325 on geo-restricted sites.
326 socket_timeout: Time to wait for unresponsive hosts, in seconds
327 bidi_workaround: Work around buggy terminals without bidirectional text
328 support, using fridibi
329 debug_printtraffic:Print out sent and received HTTP traffic
330 include_ads: Download ads as well
331 default_search: Prepend this string if an input url is not valid.
332 'auto' for elaborate guessing
333 encoding: Use this encoding instead of the system-specified.
334 extract_flat: Do not resolve URLs, return the immediate result.
335 Pass in 'in_playlist' to only show this behavior for
336 playlist items.
337 wait_for_video: If given, wait for scheduled streams to become available.
338 The value should be a tuple containing the range
339 (min_secs, max_secs) to wait between retries
340 postprocessors: A list of dictionaries, each with an entry
341 * key: The name of the postprocessor. See
342 yt_dlp/postprocessor/__init__.py for a list.
343 * when: When to run the postprocessor. Can be one of
344 pre_process|before_dl|post_process|after_move.
345 Assumed to be 'post_process' if not given
346 post_hooks: Deprecated - Register a custom postprocessor instead
347 A list of functions that get called as the final step
348 for each video file, after all postprocessors have been
349 called. The filename will be passed as the only argument.
350 progress_hooks: A list of functions that get called on download
351 progress, with a dictionary with the entries
352 * status: One of "downloading", "error", or "finished".
353 Check this first and ignore unknown values.
354 * info_dict: The extracted info_dict
355
356 If status is one of "downloading", or "finished", the
357 following properties may also be present:
358 * filename: The final filename (always present)
359 * tmpfilename: The filename we're currently writing to
360 * downloaded_bytes: Bytes on disk
361 * total_bytes: Size of the whole file, None if unknown
362 * total_bytes_estimate: Guess of the eventual file size,
363 None if unavailable.
364 * elapsed: The number of seconds since download started.
365 * eta: The estimated time in seconds, None if unknown
366 * speed: The download speed in bytes/second, None if
367 unknown
368 * fragment_index: The counter of the currently
369 downloaded video fragment.
370 * fragment_count: The number of fragments (= individual
371 files that will be merged)
372
373 Progress hooks are guaranteed to be called at least once
374 (with status "finished") if the download is successful.
375 postprocessor_hooks: A list of functions that get called on postprocessing
376 progress, with a dictionary with the entries
377 * status: One of "started", "processing", or "finished".
378 Check this first and ignore unknown values.
379 * postprocessor: Name of the postprocessor
380 * info_dict: The extracted info_dict
381
382 Progress hooks are guaranteed to be called at least twice
383 (with status "started" and "finished") if the processing is successful.
384 merge_output_format: Extension to use when merging formats.
385 final_ext: Expected final extension; used to detect when the file was
386 already downloaded and converted
387 fixup: Automatically correct known faults of the file.
388 One of:
389 - "never": do nothing
390 - "warn": only emit a warning
391 - "detect_or_warn": check whether we can do anything
392 about it, warn otherwise (default)
393 source_address: Client-side IP address to bind to.
394 call_home: Boolean, true iff we are allowed to contact the
395 yt-dlp servers for debugging. (BROKEN)
396 sleep_interval_requests: Number of seconds to sleep between requests
397 during extraction
398 sleep_interval: Number of seconds to sleep before each download when
399 used alone or a lower bound of a range for randomized
400 sleep before each download (minimum possible number
401 of seconds to sleep) when used along with
402 max_sleep_interval.
403 max_sleep_interval:Upper bound of a range for randomized sleep before each
404 download (maximum possible number of seconds to sleep).
405 Must only be used along with sleep_interval.
406 Actual sleep time will be a random float from range
407 [sleep_interval; max_sleep_interval].
408 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
409 listformats: Print an overview of available video formats and exit.
410 list_thumbnails: Print a table of all thumbnails and exit.
411 match_filter: A function that gets called with the info_dict of
412 every video.
413 If it returns a message, the video is ignored.
414 If it returns None, the video is downloaded.
415 match_filter_func in utils.py is one example for this.
416 no_color: Do not emit color codes in output.
417 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
418 HTTP header
419 geo_bypass_country:
420 Two-letter ISO 3166-2 country code that will be used for
421 explicit geographic restriction bypassing via faking
422 X-Forwarded-For HTTP header
423 geo_bypass_ip_block:
424 IP range in CIDR notation that will be used similarly to
425 geo_bypass_country
426
427 The following options determine which downloader is picked:
428 external_downloader: A dictionary of protocol keys and the executable of the
429 external downloader to use for it. The allowed protocols
430 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
431 Set the value to 'native' to use the native downloader
432 hls_prefer_native: Deprecated - Use external_downloader = {'m3u8': 'native'}
433 or {'m3u8': 'ffmpeg'} instead.
434 Use the native HLS downloader instead of ffmpeg/avconv
435 if True, otherwise use ffmpeg/avconv if False, otherwise
436 use downloader suggested by extractor if None.
437 compat_opts: Compatibility options. See "Differences in default behavior".
438 The following options do not work when used through the API:
439 filename, abort-on-error, multistreams, no-live-chat, format-sort
440 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
441 Refer __init__.py for their implementation
442 progress_template: Dictionary of templates for progress outputs.
443 Allowed keys are 'download', 'postprocess',
444 'download-title' (console title) and 'postprocess-title'.
445 The template is mapped on a dictionary with keys 'progress' and 'info'
446
447 The following parameters are not used by YoutubeDL itself, they are used by
448 the downloader (see yt_dlp/downloader/common.py):
449 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
450 max_filesize, test, noresizebuffer, retries, fragment_retries, continuedl,
451 noprogress, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
452 external_downloader_args, concurrent_fragment_downloads.
453
454 The following options are used by the post processors:
455 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
456 otherwise prefer ffmpeg. (avconv support is deprecated)
457 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
458 to the binary or its containing directory.
459 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
460 and a list of additional command-line arguments for the
461 postprocessor/executable. The dict can also have "PP+EXE" keys
462 which are used when the given exe is used by the given PP.
463 Use 'default' as the name for arguments to passed to all PP
464 For compatibility with youtube-dl, a single list of args
465 can also be used
466
467 The following options are used by the extractors:
468 extractor_retries: Number of times to retry for known errors
469 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
470 hls_split_discontinuity: Split HLS playlists to different formats at
471 discontinuities such as ad breaks (default: False)
472 extractor_args: A dictionary of arguments to be passed to the extractors.
473 See "EXTRACTOR ARGUMENTS" for details.
474 Eg: {'youtube': {'skip': ['dash', 'hls']}}
475 youtube_include_dash_manifest: Deprecated - Use extractor_args instead.
476 If True (default), DASH manifests and related
477 data will be downloaded and processed by extractor.
478 You can reduce network I/O by disabling it if you don't
479 care about DASH. (only for youtube)
480 youtube_include_hls_manifest: Deprecated - Use extractor_args instead.
481 If True (default), HLS manifests and related
482 data will be downloaded and processed by extractor.
483 You can reduce network I/O by disabling it if you don't
484 care about HLS. (only for youtube)
485 """
486
487 _NUMERIC_FIELDS = set((
488 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
489 'timestamp', 'release_timestamp',
490 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
491 'average_rating', 'comment_count', 'age_limit',
492 'start_time', 'end_time',
493 'chapter_number', 'season_number', 'episode_number',
494 'track_number', 'disc_number', 'release_year',
495 ))
496
497 _format_selection_exts = {
498 'audio': {'m4a', 'mp3', 'ogg', 'aac'},
499 'video': {'mp4', 'flv', 'webm', '3gp'},
500 'storyboards': {'mhtml'},
501 }
502
503 params = None
504 _ies = {}
505 _pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
506 _printed_messages = set()
507 _first_webpage_request = True
508 _download_retcode = None
509 _num_downloads = None
510 _playlist_level = 0
511 _playlist_urls = set()
512 _screen_file = None
513
514 def __init__(self, params=None, auto_init=True):
515 """Create a FileDownloader object with the given options.
516 @param auto_init Whether to load the default extractors and print header (if verbose).
517 Set to 'no_verbose_header' to not print the header
518 """
519 if params is None:
520 params = {}
521 self._ies = {}
522 self._ies_instances = {}
523 self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
524 self._printed_messages = set()
525 self._first_webpage_request = True
526 self._post_hooks = []
527 self._progress_hooks = []
528 self._postprocessor_hooks = []
529 self._download_retcode = 0
530 self._num_downloads = 0
531 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
532 self._err_file = sys.stderr
533 self.params = params
534 self.cache = Cache(self)
535
536 windows_enable_vt_mode()
537 self._allow_colors = {
538 'screen': not self.params.get('no_color') and supports_terminal_sequences(self._screen_file),
539 'err': not self.params.get('no_color') and supports_terminal_sequences(self._err_file),
540 }
541
542 if sys.version_info < (3, 6):
543 self.report_warning(
544 'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys.version_info[:2])
545
546 if self.params.get('allow_unplayable_formats'):
547 self.report_warning(
548 f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
549 'This is a developer option intended for debugging. \n'
550 ' If you experience any issues while using this option, '
551 f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
552
553 def check_deprecated(param, option, suggestion):
554 if self.params.get(param) is not None:
555 self.report_warning('%s is deprecated. Use %s instead' % (option, suggestion))
556 return True
557 return False
558
559 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
560 if self.params.get('geo_verification_proxy') is None:
561 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
562
563 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
564 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
565 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
566
567 for msg in self.params.get('_warnings', []):
568 self.report_warning(msg)
569 for msg in self.params.get('_deprecation_warnings', []):
570 self.deprecation_warning(msg)
571
572 if 'list-formats' in self.params.get('compat_opts', []):
573 self.params['listformats_table'] = False
574
575 if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
576 # nooverwrites was unnecessarily changed to overwrites
577 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
578 # This ensures compatibility with both keys
579 self.params['overwrites'] = not self.params['nooverwrites']
580 elif self.params.get('overwrites') is None:
581 self.params.pop('overwrites', None)
582 else:
583 self.params['nooverwrites'] = not self.params['overwrites']
584
585 if params.get('bidi_workaround', False):
586 try:
587 import pty
588 master, slave = pty.openpty()
589 width = compat_get_terminal_size().columns
590 if width is None:
591 width_args = []
592 else:
593 width_args = ['-w', str(width)]
594 sp_kwargs = dict(
595 stdin=subprocess.PIPE,
596 stdout=slave,
597 stderr=self._err_file)
598 try:
599 self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
600 except OSError:
601 self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
602 self._output_channel = os.fdopen(master, 'rb')
603 except OSError as ose:
604 if ose.errno == errno.ENOENT:
605 self.report_warning(
606 'Could not find fribidi executable, ignoring --bidi-workaround. '
607 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
608 else:
609 raise
610
611 if (sys.platform != 'win32'
612 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
613 and not params.get('restrictfilenames', False)):
614 # Unicode filesystem API will throw errors (#1474, #13027)
615 self.report_warning(
616 'Assuming --restrict-filenames since file system encoding '
617 'cannot encode all characters. '
618 'Set the LC_ALL environment variable to fix this.')
619 self.params['restrictfilenames'] = True
620
621 self.outtmpl_dict = self.parse_outtmpl()
622
623 # Creating format selector here allows us to catch syntax errors before the extraction
624 self.format_selector = (
625 None if self.params.get('format') is None
626 else self.params['format'] if callable(self.params['format'])
627 else self.build_format_selector(self.params['format']))
628
629 self._setup_opener()
630
631 if auto_init:
632 if auto_init != 'no_verbose_header':
633 self.print_debug_header()
634 self.add_default_info_extractors()
635
636 for pp_def_raw in self.params.get('postprocessors', []):
637 pp_def = dict(pp_def_raw)
638 when = pp_def.pop('when', 'post_process')
639 pp_class = get_postprocessor(pp_def.pop('key'))
640 pp = pp_class(self, **compat_kwargs(pp_def))
641 self.add_post_processor(pp, when=when)
642
643 hooks = {
644 'post_hooks': self.add_post_hook,
645 'progress_hooks': self.add_progress_hook,
646 'postprocessor_hooks': self.add_postprocessor_hook,
647 }
648 for opt, fn in hooks.items():
649 for ph in self.params.get(opt, []):
650 fn(ph)
651
652 register_socks_protocols()
653
654 def preload_download_archive(fn):
655 """Preload the archive, if any is specified"""
656 if fn is None:
657 return False
658 self.write_debug(f'Loading archive file {fn!r}')
659 try:
660 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
661 for line in archive_file:
662 self.archive.add(line.strip())
663 except IOError as ioe:
664 if ioe.errno != errno.ENOENT:
665 raise
666 return False
667 return True
668
669 self.archive = set()
670 preload_download_archive(self.params.get('download_archive'))
671
672 def warn_if_short_id(self, argv):
673 # short YouTube ID starting with dash?
674 idxs = [
675 i for i, a in enumerate(argv)
676 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
677 if idxs:
678 correct_argv = (
679 ['yt-dlp']
680 + [a for i, a in enumerate(argv) if i not in idxs]
681 + ['--'] + [argv[i] for i in idxs]
682 )
683 self.report_warning(
684 'Long argument string detected. '
685 'Use -- to separate parameters and URLs, like this:\n%s' %
686 args_to_str(correct_argv))
687
688 def add_info_extractor(self, ie):
689 """Add an InfoExtractor object to the end of the list."""
690 ie_key = ie.ie_key()
691 self._ies[ie_key] = ie
692 if not isinstance(ie, type):
693 self._ies_instances[ie_key] = ie
694 ie.set_downloader(self)
695
696 def _get_info_extractor_class(self, ie_key):
697 ie = self._ies.get(ie_key)
698 if ie is None:
699 ie = get_info_extractor(ie_key)
700 self.add_info_extractor(ie)
701 return ie
702
703 def get_info_extractor(self, ie_key):
704 """
705 Get an instance of an IE with name ie_key, it will try to get one from
706 the _ies list, if there's no instance it will create a new one and add
707 it to the extractor list.
708 """
709 ie = self._ies_instances.get(ie_key)
710 if ie is None:
711 ie = get_info_extractor(ie_key)()
712 self.add_info_extractor(ie)
713 return ie
714
715 def add_default_info_extractors(self):
716 """
717 Add the InfoExtractors returned by gen_extractors to the end of the list
718 """
719 for ie in gen_extractor_classes():
720 self.add_info_extractor(ie)
721
722 def add_post_processor(self, pp, when='post_process'):
723 """Add a PostProcessor object to the end of the chain."""
724 self._pps[when].append(pp)
725 pp.set_downloader(self)
726
727 def add_post_hook(self, ph):
728 """Add the post hook"""
729 self._post_hooks.append(ph)
730
731 def add_progress_hook(self, ph):
732 """Add the download progress hook"""
733 self._progress_hooks.append(ph)
734
735 def add_postprocessor_hook(self, ph):
736 """Add the postprocessing progress hook"""
737 self._postprocessor_hooks.append(ph)
738
739 def _bidi_workaround(self, message):
740 if not hasattr(self, '_output_channel'):
741 return message
742
743 assert hasattr(self, '_output_process')
744 assert isinstance(message, compat_str)
745 line_count = message.count('\n') + 1
746 self._output_process.stdin.write((message + '\n').encode('utf-8'))
747 self._output_process.stdin.flush()
748 res = ''.join(self._output_channel.readline().decode('utf-8')
749 for _ in range(line_count))
750 return res[:-len('\n')]
751
752 def _write_string(self, message, out=None, only_once=False):
753 if only_once:
754 if message in self._printed_messages:
755 return
756 self._printed_messages.add(message)
757 write_string(message, out=out, encoding=self.params.get('encoding'))
758
759 def to_stdout(self, message, skip_eol=False, quiet=False):
760 """Print message to stdout"""
761 if self.params.get('logger'):
762 self.params['logger'].debug(message)
763 elif not quiet or self.params.get('verbose'):
764 self._write_string(
765 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
766 self._err_file if quiet else self._screen_file)
767
768 def to_stderr(self, message, only_once=False):
769 """Print message to stderr"""
770 assert isinstance(message, compat_str)
771 if self.params.get('logger'):
772 self.params['logger'].error(message)
773 else:
774 self._write_string('%s\n' % self._bidi_workaround(message), self._err_file, only_once=only_once)
775
776 def to_console_title(self, message):
777 if not self.params.get('consoletitle', False):
778 return
779 if compat_os_name == 'nt':
780 if ctypes.windll.kernel32.GetConsoleWindow():
781 # c_wchar_p() might not be necessary if `message` is
782 # already of type unicode()
783 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
784 elif 'TERM' in os.environ:
785 self._write_string('\033]0;%s\007' % message, self._screen_file)
786
787 def save_console_title(self):
788 if not self.params.get('consoletitle', False):
789 return
790 if self.params.get('simulate'):
791 return
792 if compat_os_name != 'nt' and 'TERM' in os.environ:
793 # Save the title on stack
794 self._write_string('\033[22;0t', self._screen_file)
795
796 def restore_console_title(self):
797 if not self.params.get('consoletitle', False):
798 return
799 if self.params.get('simulate'):
800 return
801 if compat_os_name != 'nt' and 'TERM' in os.environ:
802 # Restore the title from stack
803 self._write_string('\033[23;0t', self._screen_file)
804
805 def __enter__(self):
806 self.save_console_title()
807 return self
808
809 def __exit__(self, *args):
810 self.restore_console_title()
811
812 if self.params.get('cookiefile') is not None:
813 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
814
815 def trouble(self, message=None, tb=None):
816 """Determine action to take when a download problem appears.
817
818 Depending on if the downloader has been configured to ignore
819 download errors or not, this method may throw an exception or
820 not when errors are found, after printing the message.
821
822 tb, if given, is additional traceback information.
823 """
824 if message is not None:
825 self.to_stderr(message)
826 if self.params.get('verbose'):
827 if tb is None:
828 if sys.exc_info()[0]: # if .trouble has been called from an except block
829 tb = ''
830 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
831 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
832 tb += encode_compat_str(traceback.format_exc())
833 else:
834 tb_data = traceback.format_list(traceback.extract_stack())
835 tb = ''.join(tb_data)
836 if tb:
837 self.to_stderr(tb)
838 if not self.params.get('ignoreerrors'):
839 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
840 exc_info = sys.exc_info()[1].exc_info
841 else:
842 exc_info = sys.exc_info()
843 raise DownloadError(message, exc_info)
844 self._download_retcode = 1
845
846 def to_screen(self, message, skip_eol=False):
847 """Print message to stdout if not in quiet mode"""
848 self.to_stdout(
849 message, skip_eol, quiet=self.params.get('quiet', False))
850
851 class Styles(Enum):
852 HEADERS = 'yellow'
853 EMPHASIS = 'light blue'
854 ID = 'green'
855 DELIM = 'blue'
856 ERROR = 'red'
857 WARNING = 'yellow'
858 SUPPRESS = 'light black'
859
860 def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
861 if test_encoding:
862 original_text = text
863 encoding = self.params.get('encoding') or getattr(handle, 'encoding', 'ascii')
864 text = text.encode(encoding, 'ignore').decode(encoding)
865 if fallback is not None and text != original_text:
866 text = fallback
867 if isinstance(f, self.Styles):
868 f = f.value
869 return format_text(text, f) if allow_colors else text if fallback is None else fallback
870
871 def _format_screen(self, *args, **kwargs):
872 return self._format_text(
873 self._screen_file, self._allow_colors['screen'], *args, **kwargs)
874
875 def _format_err(self, *args, **kwargs):
876 return self._format_text(
877 self._err_file, self._allow_colors['err'], *args, **kwargs)
878
879 def report_warning(self, message, only_once=False):
880 '''
881 Print the message to stderr, it will be prefixed with 'WARNING:'
882 If stderr is a tty file the 'WARNING:' will be colored
883 '''
884 if self.params.get('logger') is not None:
885 self.params['logger'].warning(message)
886 else:
887 if self.params.get('no_warnings'):
888 return
889 self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
890
891 def deprecation_warning(self, message):
892 if self.params.get('logger') is not None:
893 self.params['logger'].warning('DeprecationWarning: {message}')
894 else:
895 self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
896
897 def report_error(self, message, tb=None):
898 '''
899 Do the same as trouble, but prefixes the message with 'ERROR:', colored
900 in red if stderr is a tty file.
901 '''
902 self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', tb)
903
904 def write_debug(self, message, only_once=False):
905 '''Log debug message or Print message to stderr'''
906 if not self.params.get('verbose', False):
907 return
908 message = '[debug] %s' % message
909 if self.params.get('logger'):
910 self.params['logger'].debug(message)
911 else:
912 self.to_stderr(message, only_once)
913
914 def report_file_already_downloaded(self, file_name):
915 """Report file has already been fully downloaded."""
916 try:
917 self.to_screen('[download] %s has already been downloaded' % file_name)
918 except UnicodeEncodeError:
919 self.to_screen('[download] The file has already been downloaded')
920
921 def report_file_delete(self, file_name):
922 """Report that existing file will be deleted."""
923 try:
924 self.to_screen('Deleting existing file %s' % file_name)
925 except UnicodeEncodeError:
926 self.to_screen('Deleting existing file')
927
928 def raise_no_formats(self, info, forced=False):
929 has_drm = info.get('__has_drm')
930 msg = 'This video is DRM protected' if has_drm else 'No video formats found!'
931 expected = self.params.get('ignore_no_formats_error')
932 if forced or not expected:
933 raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
934 expected=has_drm or expected)
935 else:
936 self.report_warning(msg)
937
938 def parse_outtmpl(self):
939 outtmpl_dict = self.params.get('outtmpl', {})
940 if not isinstance(outtmpl_dict, dict):
941 outtmpl_dict = {'default': outtmpl_dict}
942 # Remove spaces in the default template
943 if self.params.get('restrictfilenames'):
944 sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
945 else:
946 sanitize = lambda x: x
947 outtmpl_dict.update({
948 k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items()
949 if outtmpl_dict.get(k) is None})
950 for key, val in outtmpl_dict.items():
951 if isinstance(val, bytes):
952 self.report_warning(
953 'Parameter outtmpl is bytes, but should be a unicode string. '
954 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
955 return outtmpl_dict
956
957 def get_output_path(self, dir_type='', filename=None):
958 paths = self.params.get('paths', {})
959 assert isinstance(paths, dict)
960 path = os.path.join(
961 expand_path(paths.get('home', '').strip()),
962 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
963 filename or '')
964
965 # Temporary fix for #4787
966 # 'Treat' all problem characters by passing filename through preferredencoding
967 # to workaround encoding issues with subprocess on python2 @ Windows
968 if sys.version_info < (3, 0) and sys.platform == 'win32':
969 path = encodeFilename(path, True).decode(preferredencoding())
970 return sanitize_path(path, force=self.params.get('windowsfilenames'))
971
972 @staticmethod
973 def _outtmpl_expandpath(outtmpl):
974 # expand_path translates '%%' into '%' and '$$' into '$'
975 # correspondingly that is not what we want since we need to keep
976 # '%%' intact for template dict substitution step. Working around
977 # with boundary-alike separator hack.
978 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
979 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
980
981 # outtmpl should be expand_path'ed before template dict substitution
982 # because meta fields may contain env variables we don't want to
983 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
984 # title "Hello $PATH", we don't want `$PATH` to be expanded.
985 return expand_path(outtmpl).replace(sep, '')
986
987 @staticmethod
988 def escape_outtmpl(outtmpl):
989 ''' Escape any remaining strings like %s, %abc% etc. '''
990 return re.sub(
991 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
992 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
993 outtmpl)
994
995 @classmethod
996 def validate_outtmpl(cls, outtmpl):
997 ''' @return None or Exception object '''
998 outtmpl = re.sub(
999 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljqBU]'),
1000 lambda mobj: f'{mobj.group(0)[:-1]}s',
1001 cls._outtmpl_expandpath(outtmpl))
1002 try:
1003 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
1004 return None
1005 except ValueError as err:
1006 return err
1007
1008 @staticmethod
1009 def _copy_infodict(info_dict):
1010 info_dict = dict(info_dict)
1011 for key in ('__original_infodict', '__postprocessors'):
1012 info_dict.pop(key, None)
1013 return info_dict
1014
1015 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
1016 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict """
1017 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
1018
1019 info_dict = self._copy_infodict(info_dict)
1020 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1021 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
1022 if info_dict.get('duration', None) is not None
1023 else None)
1024 info_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
1025 if info_dict.get('resolution') is None:
1026 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
1027
1028 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1029 # of %(field)s to %(field)0Nd for backward compatibility
1030 field_size_compat_map = {
1031 'playlist_index': number_of_digits(info_dict.get('_last_playlist_index') or 0),
1032 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
1033 'autonumber': self.params.get('autonumber_size') or 5,
1034 }
1035
1036 TMPL_DICT = {}
1037 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljqBU]'))
1038 MATH_FUNCTIONS = {
1039 '+': float.__add__,
1040 '-': float.__sub__,
1041 }
1042 # Field is of the form key1.key2...
1043 # where keys (except first) can be string, int or slice
1044 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
1045 MATH_FIELD_RE = r'''{field}|{num}'''.format(field=FIELD_RE, num=r'-?\d+(?:.\d+)?')
1046 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
1047 INTERNAL_FORMAT_RE = re.compile(r'''(?x)
1048 (?P<negate>-)?
1049 (?P<fields>{field})
1050 (?P<maths>(?:{math_op}{math_field})*)
1051 (?:>(?P<strf_format>.+?))?
1052 (?P<alternate>(?<!\\),[^|)]+)?
1053 (?:\|(?P<default>.*?))?
1054 $'''.format(field=FIELD_RE, math_op=MATH_OPERATORS_RE, math_field=MATH_FIELD_RE))
1055
1056 def _traverse_infodict(k):
1057 k = k.split('.')
1058 if k[0] == '':
1059 k.pop(0)
1060 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
1061
1062 def get_value(mdict):
1063 # Object traversal
1064 value = _traverse_infodict(mdict['fields'])
1065 # Negative
1066 if mdict['negate']:
1067 value = float_or_none(value)
1068 if value is not None:
1069 value *= -1
1070 # Do maths
1071 offset_key = mdict['maths']
1072 if offset_key:
1073 value = float_or_none(value)
1074 operator = None
1075 while offset_key:
1076 item = re.match(
1077 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
1078 offset_key).group(0)
1079 offset_key = offset_key[len(item):]
1080 if operator is None:
1081 operator = MATH_FUNCTIONS[item]
1082 continue
1083 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
1084 offset = float_or_none(item)
1085 if offset is None:
1086 offset = float_or_none(_traverse_infodict(item))
1087 try:
1088 value = operator(value, multiplier * offset)
1089 except (TypeError, ZeroDivisionError):
1090 return None
1091 operator = None
1092 # Datetime formatting
1093 if mdict['strf_format']:
1094 value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
1095
1096 return value
1097
1098 na = self.params.get('outtmpl_na_placeholder', 'NA')
1099
1100 def _dumpjson_default(obj):
1101 if isinstance(obj, (set, LazyList)):
1102 return list(obj)
1103 raise TypeError(f'Object of type {type(obj).__name__} is not JSON serializable')
1104
1105 def create_key(outer_mobj):
1106 if not outer_mobj.group('has_key'):
1107 return outer_mobj.group(0)
1108 key = outer_mobj.group('key')
1109 mobj = re.match(INTERNAL_FORMAT_RE, key)
1110 initial_field = mobj.group('fields').split('.')[-1] if mobj else ''
1111 value, default = None, na
1112 while mobj:
1113 mobj = mobj.groupdict()
1114 default = mobj['default'] if mobj['default'] is not None else default
1115 value = get_value(mobj)
1116 if value is None and mobj['alternate']:
1117 mobj = re.match(INTERNAL_FORMAT_RE, mobj['alternate'][1:])
1118 else:
1119 break
1120
1121 fmt = outer_mobj.group('format')
1122 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1123 fmt = '0{:d}d'.format(field_size_compat_map[key])
1124
1125 value = default if value is None else value
1126
1127 flags = outer_mobj.group('conversion') or ''
1128 str_fmt = f'{fmt[:-1]}s'
1129 if fmt[-1] == 'l': # list
1130 delim = '\n' if '#' in flags else ', '
1131 value, fmt = delim.join(variadic(value)), str_fmt
1132 elif fmt[-1] == 'j': # json
1133 value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
1134 elif fmt[-1] == 'q': # quoted
1135 value = map(str, variadic(value) if '#' in flags else [value])
1136 value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
1137 elif fmt[-1] == 'B': # bytes
1138 value = f'%{str_fmt}'.encode('utf-8') % str(value).encode('utf-8')
1139 value, fmt = value.decode('utf-8', 'ignore'), 's'
1140 elif fmt[-1] == 'U': # unicode normalized
1141 value, fmt = unicodedata.normalize(
1142 # "+" = compatibility equivalence, "#" = NFD
1143 'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
1144 value), str_fmt
1145 elif fmt[-1] == 'c':
1146 if value:
1147 value = str(value)[0]
1148 else:
1149 fmt = str_fmt
1150 elif fmt[-1] not in 'rs': # numeric
1151 value = float_or_none(value)
1152 if value is None:
1153 value, fmt = default, 's'
1154
1155 if sanitize:
1156 if fmt[-1] == 'r':
1157 # If value is an object, sanitize might convert it to a string
1158 # So we convert it to repr first
1159 value, fmt = repr(value), str_fmt
1160 if fmt[-1] in 'csr':
1161 value = sanitize(initial_field, value)
1162
1163 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
1164 TMPL_DICT[key] = value
1165 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
1166
1167 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
1168
1169 def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
1170 outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
1171 return self.escape_outtmpl(outtmpl) % info_dict
1172
1173 def _prepare_filename(self, info_dict, tmpl_type='default'):
1174 try:
1175 sanitize = lambda k, v: sanitize_filename(
1176 compat_str(v),
1177 restricted=self.params.get('restrictfilenames'),
1178 is_id=(k == 'id' or k.endswith('_id')))
1179 outtmpl = self._outtmpl_expandpath(self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default']))
1180 filename = self.evaluate_outtmpl(outtmpl, info_dict, sanitize)
1181
1182 force_ext = OUTTMPL_TYPES.get(tmpl_type)
1183 if filename and force_ext is not None:
1184 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
1185
1186 # https://github.com/blackjack4494/youtube-dlc/issues/85
1187 trim_file_name = self.params.get('trim_file_name', False)
1188 if trim_file_name:
1189 fn_groups = filename.rsplit('.')
1190 ext = fn_groups[-1]
1191 sub_ext = ''
1192 if len(fn_groups) > 2:
1193 sub_ext = fn_groups[-2]
1194 filename = join_nonempty(fn_groups[0][:trim_file_name], sub_ext, ext, delim='.')
1195
1196 return filename
1197 except ValueError as err:
1198 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
1199 return None
1200
1201 def prepare_filename(self, info_dict, dir_type='', warn=False):
1202 """Generate the output filename."""
1203
1204 filename = self._prepare_filename(info_dict, dir_type or 'default')
1205 if not filename and dir_type not in ('', 'temp'):
1206 return ''
1207
1208 if warn:
1209 if not self.params.get('paths'):
1210 pass
1211 elif filename == '-':
1212 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
1213 elif os.path.isabs(filename):
1214 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
1215 if filename == '-' or not filename:
1216 return filename
1217
1218 return self.get_output_path(dir_type, filename)
1219
1220 def _match_entry(self, info_dict, incomplete=False, silent=False):
1221 """ Returns None if the file should be downloaded """
1222
1223 video_title = info_dict.get('title', info_dict.get('id', 'video'))
1224
1225 def check_filter():
1226 if 'title' in info_dict:
1227 # This can happen when we're just evaluating the playlist
1228 title = info_dict['title']
1229 matchtitle = self.params.get('matchtitle', False)
1230 if matchtitle:
1231 if not re.search(matchtitle, title, re.IGNORECASE):
1232 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1233 rejecttitle = self.params.get('rejecttitle', False)
1234 if rejecttitle:
1235 if re.search(rejecttitle, title, re.IGNORECASE):
1236 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1237 date = info_dict.get('upload_date')
1238 if date is not None:
1239 dateRange = self.params.get('daterange', DateRange())
1240 if date not in dateRange:
1241 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
1242 view_count = info_dict.get('view_count')
1243 if view_count is not None:
1244 min_views = self.params.get('min_views')
1245 if min_views is not None and view_count < min_views:
1246 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1247 max_views = self.params.get('max_views')
1248 if max_views is not None and view_count > max_views:
1249 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1250 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1251 return 'Skipping "%s" because it is age restricted' % video_title
1252
1253 match_filter = self.params.get('match_filter')
1254 if match_filter is not None:
1255 try:
1256 ret = match_filter(info_dict, incomplete=incomplete)
1257 except TypeError:
1258 # For backward compatibility
1259 ret = None if incomplete else match_filter(info_dict)
1260 if ret is not None:
1261 return ret
1262 return None
1263
1264 if self.in_download_archive(info_dict):
1265 reason = '%s has already been recorded in the archive' % video_title
1266 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1267 else:
1268 reason = check_filter()
1269 break_opt, break_err = 'break_on_reject', RejectedVideoReached
1270 if reason is not None:
1271 if not silent:
1272 self.to_screen('[download] ' + reason)
1273 if self.params.get(break_opt, False):
1274 raise break_err()
1275 return reason
1276
1277 @staticmethod
1278 def add_extra_info(info_dict, extra_info):
1279 '''Set the keys from extra_info in info dict if they are missing'''
1280 for key, value in extra_info.items():
1281 info_dict.setdefault(key, value)
1282
1283 def extract_info(self, url, download=True, ie_key=None, extra_info=None,
1284 process=True, force_generic_extractor=False):
1285 """
1286 Return a list with a dictionary for each video extracted.
1287
1288 Arguments:
1289 url -- URL to extract
1290
1291 Keyword arguments:
1292 download -- whether to download videos during extraction
1293 ie_key -- extractor key hint
1294 extra_info -- dictionary containing the extra values to add to each result
1295 process -- whether to resolve all unresolved references (URLs, playlist items),
1296 must be True for download to work.
1297 force_generic_extractor -- force using the generic extractor
1298 """
1299
1300 if extra_info is None:
1301 extra_info = {}
1302
1303 if not ie_key and force_generic_extractor:
1304 ie_key = 'Generic'
1305
1306 if ie_key:
1307 ies = {ie_key: self._get_info_extractor_class(ie_key)}
1308 else:
1309 ies = self._ies
1310
1311 for ie_key, ie in ies.items():
1312 if not ie.suitable(url):
1313 continue
1314
1315 if not ie.working():
1316 self.report_warning('The program functionality for this site has been marked as broken, '
1317 'and will probably not work.')
1318
1319 temp_id = ie.get_temp_id(url)
1320 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1321 self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive')
1322 if self.params.get('break_on_existing', False):
1323 raise ExistingVideoReached()
1324 break
1325 return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
1326 else:
1327 self.report_error('no suitable InfoExtractor for URL %s' % url)
1328
1329 def __handle_extraction_exceptions(func):
1330 @functools.wraps(func)
1331 def wrapper(self, *args, **kwargs):
1332 try:
1333 return func(self, *args, **kwargs)
1334 except GeoRestrictedError as e:
1335 msg = e.msg
1336 if e.countries:
1337 msg += '\nThis video is available in %s.' % ', '.join(
1338 map(ISO3166Utils.short2full, e.countries))
1339 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1340 self.report_error(msg)
1341 except ExtractorError as e: # An error we somewhat expected
1342 self.report_error(compat_str(e), e.format_traceback())
1343 except ReExtractInfo as e:
1344 if e.expected:
1345 self.to_screen(f'{e}; Re-extracting data')
1346 else:
1347 self.to_stderr('\r')
1348 self.report_warning(f'{e}; Re-extracting data')
1349 return wrapper(self, *args, **kwargs)
1350 except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
1351 raise
1352 except Exception as e:
1353 if self.params.get('ignoreerrors'):
1354 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
1355 else:
1356 raise
1357 return wrapper
1358
1359 def _wait_for_video(self, ie_result):
1360 if (not self.params.get('wait_for_video')
1361 or ie_result.get('_type', 'video') != 'video'
1362 or ie_result.get('formats') or ie_result.get('url')):
1363 return
1364
1365 format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
1366 last_msg = ''
1367
1368 def progress(msg):
1369 nonlocal last_msg
1370 self.to_screen(msg + ' ' * (len(last_msg) - len(msg)) + '\r', skip_eol=True)
1371 last_msg = msg
1372
1373 min_wait, max_wait = self.params.get('wait_for_video')
1374 diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
1375 if diff is None and ie_result.get('live_status') == 'is_upcoming':
1376 diff = random.randrange(min_wait or 0, max_wait) if max_wait else min_wait
1377 self.report_warning('Release time of video is not known')
1378 elif (diff or 0) <= 0:
1379 self.report_warning('Video should already be available according to extracted info')
1380 diff = min(max(diff, min_wait or 0), max_wait or float('inf'))
1381 self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1382
1383 wait_till = time.time() + diff
1384 try:
1385 while True:
1386 diff = wait_till - time.time()
1387 if diff <= 0:
1388 progress('')
1389 raise ReExtractInfo('[wait] Wait period ended', expected=True)
1390 progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1391 time.sleep(1)
1392 except KeyboardInterrupt:
1393 progress('')
1394 raise ReExtractInfo('[wait] Interrupted by user', expected=True)
1395 except BaseException as e:
1396 if not isinstance(e, ReExtractInfo):
1397 self.to_screen('')
1398 raise
1399
1400 @__handle_extraction_exceptions
1401 def __extract_info(self, url, ie, download, extra_info, process):
1402 ie_result = ie.extract(url)
1403 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1404 return
1405 if isinstance(ie_result, list):
1406 # Backwards compatibility: old IE result format
1407 ie_result = {
1408 '_type': 'compat_list',
1409 'entries': ie_result,
1410 }
1411 if extra_info.get('original_url'):
1412 ie_result.setdefault('original_url', extra_info['original_url'])
1413 self.add_default_extra_info(ie_result, ie, url)
1414 if process:
1415 self._wait_for_video(ie_result)
1416 return self.process_ie_result(ie_result, download, extra_info)
1417 else:
1418 return ie_result
1419
1420 def add_default_extra_info(self, ie_result, ie, url):
1421 if url is not None:
1422 self.add_extra_info(ie_result, {
1423 'webpage_url': url,
1424 'original_url': url,
1425 'webpage_url_basename': url_basename(url),
1426 })
1427 if ie is not None:
1428 self.add_extra_info(ie_result, {
1429 'extractor': ie.IE_NAME,
1430 'extractor_key': ie.ie_key(),
1431 })
1432
1433 def process_ie_result(self, ie_result, download=True, extra_info=None):
1434 """
1435 Take the result of the ie(may be modified) and resolve all unresolved
1436 references (URLs, playlist items).
1437
1438 It will also download the videos if 'download'.
1439 Returns the resolved ie_result.
1440 """
1441 if extra_info is None:
1442 extra_info = {}
1443 result_type = ie_result.get('_type', 'video')
1444
1445 if result_type in ('url', 'url_transparent'):
1446 ie_result['url'] = sanitize_url(ie_result['url'])
1447 if ie_result.get('original_url'):
1448 extra_info.setdefault('original_url', ie_result['original_url'])
1449
1450 extract_flat = self.params.get('extract_flat', False)
1451 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1452 or extract_flat is True):
1453 info_copy = ie_result.copy()
1454 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1455 if ie and not ie_result.get('id'):
1456 info_copy['id'] = ie.get_temp_id(ie_result['url'])
1457 self.add_default_extra_info(info_copy, ie, ie_result['url'])
1458 self.add_extra_info(info_copy, extra_info)
1459 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
1460 if self.params.get('force_write_download_archive', False):
1461 self.record_download_archive(info_copy)
1462 return ie_result
1463
1464 if result_type == 'video':
1465 self.add_extra_info(ie_result, extra_info)
1466 ie_result = self.process_video_result(ie_result, download=download)
1467 additional_urls = (ie_result or {}).get('additional_urls')
1468 if additional_urls:
1469 # TODO: Improve MetadataParserPP to allow setting a list
1470 if isinstance(additional_urls, compat_str):
1471 additional_urls = [additional_urls]
1472 self.to_screen(
1473 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1474 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1475 ie_result['additional_entries'] = [
1476 self.extract_info(
1477 url, download, extra_info,
1478 force_generic_extractor=self.params.get('force_generic_extractor'))
1479 for url in additional_urls
1480 ]
1481 return ie_result
1482 elif result_type == 'url':
1483 # We have to add extra_info to the results because it may be
1484 # contained in a playlist
1485 return self.extract_info(
1486 ie_result['url'], download,
1487 ie_key=ie_result.get('ie_key'),
1488 extra_info=extra_info)
1489 elif result_type == 'url_transparent':
1490 # Use the information from the embedding page
1491 info = self.extract_info(
1492 ie_result['url'], ie_key=ie_result.get('ie_key'),
1493 extra_info=extra_info, download=False, process=False)
1494
1495 # extract_info may return None when ignoreerrors is enabled and
1496 # extraction failed with an error, don't crash and return early
1497 # in this case
1498 if not info:
1499 return info
1500
1501 force_properties = dict(
1502 (k, v) for k, v in ie_result.items() if v is not None)
1503 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
1504 if f in force_properties:
1505 del force_properties[f]
1506 new_result = info.copy()
1507 new_result.update(force_properties)
1508
1509 # Extracted info may not be a video result (i.e.
1510 # info.get('_type', 'video') != video) but rather an url or
1511 # url_transparent. In such cases outer metadata (from ie_result)
1512 # should be propagated to inner one (info). For this to happen
1513 # _type of info should be overridden with url_transparent. This
1514 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1515 if new_result.get('_type') == 'url':
1516 new_result['_type'] = 'url_transparent'
1517
1518 return self.process_ie_result(
1519 new_result, download=download, extra_info=extra_info)
1520 elif result_type in ('playlist', 'multi_video'):
1521 # Protect from infinite recursion due to recursively nested playlists
1522 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1523 webpage_url = ie_result['webpage_url']
1524 if webpage_url in self._playlist_urls:
1525 self.to_screen(
1526 '[download] Skipping already downloaded playlist: %s'
1527 % ie_result.get('title') or ie_result.get('id'))
1528 return
1529
1530 self._playlist_level += 1
1531 self._playlist_urls.add(webpage_url)
1532 self._sanitize_thumbnails(ie_result)
1533 try:
1534 return self.__process_playlist(ie_result, download)
1535 finally:
1536 self._playlist_level -= 1
1537 if not self._playlist_level:
1538 self._playlist_urls.clear()
1539 elif result_type == 'compat_list':
1540 self.report_warning(
1541 'Extractor %s returned a compat_list result. '
1542 'It needs to be updated.' % ie_result.get('extractor'))
1543
1544 def _fixup(r):
1545 self.add_extra_info(r, {
1546 'extractor': ie_result['extractor'],
1547 'webpage_url': ie_result['webpage_url'],
1548 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1549 'extractor_key': ie_result['extractor_key'],
1550 })
1551 return r
1552 ie_result['entries'] = [
1553 self.process_ie_result(_fixup(r), download, extra_info)
1554 for r in ie_result['entries']
1555 ]
1556 return ie_result
1557 else:
1558 raise Exception('Invalid result type: %s' % result_type)
1559
1560 def _ensure_dir_exists(self, path):
1561 return make_dir(path, self.report_error)
1562
1563 def __process_playlist(self, ie_result, download):
1564 # We process each entry in the playlist
1565 playlist = ie_result.get('title') or ie_result.get('id')
1566 self.to_screen('[download] Downloading playlist: %s' % playlist)
1567
1568 if 'entries' not in ie_result:
1569 raise EntryNotInPlaylist('There are no entries')
1570
1571 MissingEntry = object()
1572 incomplete_entries = bool(ie_result.get('requested_entries'))
1573 if incomplete_entries:
1574 def fill_missing_entries(entries, indices):
1575 ret = [MissingEntry] * max(indices)
1576 for i, entry in zip(indices, entries):
1577 ret[i - 1] = entry
1578 return ret
1579 ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
1580
1581 playlist_results = []
1582
1583 playliststart = self.params.get('playliststart', 1)
1584 playlistend = self.params.get('playlistend')
1585 # For backwards compatibility, interpret -1 as whole list
1586 if playlistend == -1:
1587 playlistend = None
1588
1589 playlistitems_str = self.params.get('playlist_items')
1590 playlistitems = None
1591 if playlistitems_str is not None:
1592 def iter_playlistitems(format):
1593 for string_segment in format.split(','):
1594 if '-' in string_segment:
1595 start, end = string_segment.split('-')
1596 for item in range(int(start), int(end) + 1):
1597 yield int(item)
1598 else:
1599 yield int(string_segment)
1600 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
1601
1602 ie_entries = ie_result['entries']
1603 msg = (
1604 'Downloading %d videos' if not isinstance(ie_entries, list)
1605 else 'Collected %d videos; downloading %%d of them' % len(ie_entries))
1606
1607 if isinstance(ie_entries, list):
1608 def get_entry(i):
1609 return ie_entries[i - 1]
1610 else:
1611 if not isinstance(ie_entries, (PagedList, LazyList)):
1612 ie_entries = LazyList(ie_entries)
1613
1614 def get_entry(i):
1615 return YoutubeDL.__handle_extraction_exceptions(
1616 lambda self, i: ie_entries[i - 1]
1617 )(self, i)
1618
1619 entries = []
1620 items = playlistitems if playlistitems is not None else itertools.count(playliststart)
1621 for i in items:
1622 if i == 0:
1623 continue
1624 if playlistitems is None and playlistend is not None and playlistend < i:
1625 break
1626 entry = None
1627 try:
1628 entry = get_entry(i)
1629 if entry is MissingEntry:
1630 raise EntryNotInPlaylist()
1631 except (IndexError, EntryNotInPlaylist):
1632 if incomplete_entries:
1633 raise EntryNotInPlaylist(f'Entry {i} cannot be found')
1634 elif not playlistitems:
1635 break
1636 entries.append(entry)
1637 try:
1638 if entry is not None:
1639 self._match_entry(entry, incomplete=True, silent=True)
1640 except (ExistingVideoReached, RejectedVideoReached):
1641 break
1642 ie_result['entries'] = entries
1643
1644 # Save playlist_index before re-ordering
1645 entries = [
1646 ((playlistitems[i - 1] if playlistitems else i + playliststart - 1), entry)
1647 for i, entry in enumerate(entries, 1)
1648 if entry is not None]
1649 n_entries = len(entries)
1650
1651 if not playlistitems and (playliststart != 1 or playlistend):
1652 playlistitems = list(range(playliststart, playliststart + n_entries))
1653 ie_result['requested_entries'] = playlistitems
1654
1655 _infojson_written = False
1656 if not self.params.get('simulate') and self.params.get('allow_playlist_files', True):
1657 ie_copy = {
1658 'playlist': playlist,
1659 'playlist_id': ie_result.get('id'),
1660 'playlist_title': ie_result.get('title'),
1661 'playlist_uploader': ie_result.get('uploader'),
1662 'playlist_uploader_id': ie_result.get('uploader_id'),
1663 'playlist_index': 0,
1664 'n_entries': n_entries,
1665 }
1666 ie_copy.update(dict(ie_result))
1667
1668 _infojson_written = self._write_info_json(
1669 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
1670 if _infojson_written is None:
1671 return
1672 if self._write_description('playlist', ie_result,
1673 self.prepare_filename(ie_copy, 'pl_description')) is None:
1674 return
1675 # TODO: This should be passed to ThumbnailsConvertor if necessary
1676 self._write_thumbnails('playlist', ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1677
1678 if self.params.get('playlistreverse', False):
1679 entries = entries[::-1]
1680 if self.params.get('playlistrandom', False):
1681 random.shuffle(entries)
1682
1683 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
1684
1685 self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries))
1686 failures = 0
1687 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
1688 for i, entry_tuple in enumerate(entries, 1):
1689 playlist_index, entry = entry_tuple
1690 if 'playlist-index' in self.params.get('compat_opts', []):
1691 playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1
1692 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
1693 # This __x_forwarded_for_ip thing is a bit ugly but requires
1694 # minimal changes
1695 if x_forwarded_for:
1696 entry['__x_forwarded_for_ip'] = x_forwarded_for
1697 extra = {
1698 'n_entries': n_entries,
1699 '_last_playlist_index': max(playlistitems) if playlistitems else (playlistend or n_entries),
1700 'playlist_index': playlist_index,
1701 'playlist_autonumber': i,
1702 'playlist': playlist,
1703 'playlist_id': ie_result.get('id'),
1704 'playlist_title': ie_result.get('title'),
1705 'playlist_uploader': ie_result.get('uploader'),
1706 'playlist_uploader_id': ie_result.get('uploader_id'),
1707 'extractor': ie_result['extractor'],
1708 'webpage_url': ie_result['webpage_url'],
1709 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1710 'extractor_key': ie_result['extractor_key'],
1711 }
1712
1713 if self._match_entry(entry, incomplete=True) is not None:
1714 continue
1715
1716 entry_result = self.__process_iterable_entry(entry, download, extra)
1717 if not entry_result:
1718 failures += 1
1719 if failures >= max_failures:
1720 self.report_error(
1721 'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures))
1722 break
1723 playlist_results.append(entry_result)
1724 ie_result['entries'] = playlist_results
1725
1726 # Write the updated info to json
1727 if _infojson_written and self._write_info_json(
1728 'updated playlist', ie_result,
1729 self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
1730 return
1731 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1732 return ie_result
1733
1734 @__handle_extraction_exceptions
1735 def __process_iterable_entry(self, entry, download, extra_info):
1736 return self.process_ie_result(
1737 entry, download=download, extra_info=extra_info)
1738
1739 def _build_format_filter(self, filter_spec):
1740 " Returns a function to filter the formats according to the filter_spec "
1741
1742 OPERATORS = {
1743 '<': operator.lt,
1744 '<=': operator.le,
1745 '>': operator.gt,
1746 '>=': operator.ge,
1747 '=': operator.eq,
1748 '!=': operator.ne,
1749 }
1750 operator_rex = re.compile(r'''(?x)\s*
1751 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1752 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1753 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1754 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1755 m = operator_rex.fullmatch(filter_spec)
1756 if m:
1757 try:
1758 comparison_value = int(m.group('value'))
1759 except ValueError:
1760 comparison_value = parse_filesize(m.group('value'))
1761 if comparison_value is None:
1762 comparison_value = parse_filesize(m.group('value') + 'B')
1763 if comparison_value is None:
1764 raise ValueError(
1765 'Invalid value %r in format specification %r' % (
1766 m.group('value'), filter_spec))
1767 op = OPERATORS[m.group('op')]
1768
1769 if not m:
1770 STR_OPERATORS = {
1771 '=': operator.eq,
1772 '^=': lambda attr, value: attr.startswith(value),
1773 '$=': lambda attr, value: attr.endswith(value),
1774 '*=': lambda attr, value: value in attr,
1775 }
1776 str_operator_rex = re.compile(r'''(?x)\s*
1777 (?P<key>[a-zA-Z0-9._-]+)\s*
1778 (?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1779 (?P<value>[a-zA-Z0-9._-]+)\s*
1780 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1781 m = str_operator_rex.fullmatch(filter_spec)
1782 if m:
1783 comparison_value = m.group('value')
1784 str_op = STR_OPERATORS[m.group('op')]
1785 if m.group('negation'):
1786 op = lambda attr, value: not str_op(attr, value)
1787 else:
1788 op = str_op
1789
1790 if not m:
1791 raise SyntaxError('Invalid filter specification %r' % filter_spec)
1792
1793 def _filter(f):
1794 actual_value = f.get(m.group('key'))
1795 if actual_value is None:
1796 return m.group('none_inclusive')
1797 return op(actual_value, comparison_value)
1798 return _filter
1799
1800 def _check_formats(self, formats):
1801 for f in formats:
1802 self.to_screen('[info] Testing format %s' % f['format_id'])
1803 path = self.get_output_path('temp')
1804 if not self._ensure_dir_exists(f'{path}/'):
1805 continue
1806 temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
1807 temp_file.close()
1808 try:
1809 success, _ = self.dl(temp_file.name, f, test=True)
1810 except (DownloadError, IOError, OSError, ValueError) + network_exceptions:
1811 success = False
1812 finally:
1813 if os.path.exists(temp_file.name):
1814 try:
1815 os.remove(temp_file.name)
1816 except OSError:
1817 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
1818 if success:
1819 yield f
1820 else:
1821 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1822
1823 def _default_format_spec(self, info_dict, download=True):
1824
1825 def can_merge():
1826 merger = FFmpegMergerPP(self)
1827 return merger.available and merger.can_merge()
1828
1829 prefer_best = (
1830 not self.params.get('simulate')
1831 and download
1832 and (
1833 not can_merge()
1834 or info_dict.get('is_live', False)
1835 or self.outtmpl_dict['default'] == '-'))
1836 compat = (
1837 prefer_best
1838 or self.params.get('allow_multiple_audio_streams', False)
1839 or 'format-spec' in self.params.get('compat_opts', []))
1840
1841 return (
1842 'best/bestvideo+bestaudio' if prefer_best
1843 else 'bestvideo*+bestaudio/best' if not compat
1844 else 'bestvideo+bestaudio/best')
1845
1846 def build_format_selector(self, format_spec):
1847 def syntax_error(note, start):
1848 message = (
1849 'Invalid format specification: '
1850 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1851 return SyntaxError(message)
1852
1853 PICKFIRST = 'PICKFIRST'
1854 MERGE = 'MERGE'
1855 SINGLE = 'SINGLE'
1856 GROUP = 'GROUP'
1857 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1858
1859 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1860 'video': self.params.get('allow_multiple_video_streams', False)}
1861
1862 check_formats = self.params.get('check_formats') == 'selected'
1863
1864 def _parse_filter(tokens):
1865 filter_parts = []
1866 for type, string, start, _, _ in tokens:
1867 if type == tokenize.OP and string == ']':
1868 return ''.join(filter_parts)
1869 else:
1870 filter_parts.append(string)
1871
1872 def _remove_unused_ops(tokens):
1873 # Remove operators that we don't use and join them with the surrounding strings
1874 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1875 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1876 last_string, last_start, last_end, last_line = None, None, None, None
1877 for type, string, start, end, line in tokens:
1878 if type == tokenize.OP and string == '[':
1879 if last_string:
1880 yield tokenize.NAME, last_string, last_start, last_end, last_line
1881 last_string = None
1882 yield type, string, start, end, line
1883 # everything inside brackets will be handled by _parse_filter
1884 for type, string, start, end, line in tokens:
1885 yield type, string, start, end, line
1886 if type == tokenize.OP and string == ']':
1887 break
1888 elif type == tokenize.OP and string in ALLOWED_OPS:
1889 if last_string:
1890 yield tokenize.NAME, last_string, last_start, last_end, last_line
1891 last_string = None
1892 yield type, string, start, end, line
1893 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1894 if not last_string:
1895 last_string = string
1896 last_start = start
1897 last_end = end
1898 else:
1899 last_string += string
1900 if last_string:
1901 yield tokenize.NAME, last_string, last_start, last_end, last_line
1902
1903 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
1904 selectors = []
1905 current_selector = None
1906 for type, string, start, _, _ in tokens:
1907 # ENCODING is only defined in python 3.x
1908 if type == getattr(tokenize, 'ENCODING', None):
1909 continue
1910 elif type in [tokenize.NAME, tokenize.NUMBER]:
1911 current_selector = FormatSelector(SINGLE, string, [])
1912 elif type == tokenize.OP:
1913 if string == ')':
1914 if not inside_group:
1915 # ')' will be handled by the parentheses group
1916 tokens.restore_last_token()
1917 break
1918 elif inside_merge and string in ['/', ',']:
1919 tokens.restore_last_token()
1920 break
1921 elif inside_choice and string == ',':
1922 tokens.restore_last_token()
1923 break
1924 elif string == ',':
1925 if not current_selector:
1926 raise syntax_error('"," must follow a format selector', start)
1927 selectors.append(current_selector)
1928 current_selector = None
1929 elif string == '/':
1930 if not current_selector:
1931 raise syntax_error('"/" must follow a format selector', start)
1932 first_choice = current_selector
1933 second_choice = _parse_format_selection(tokens, inside_choice=True)
1934 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
1935 elif string == '[':
1936 if not current_selector:
1937 current_selector = FormatSelector(SINGLE, 'best', [])
1938 format_filter = _parse_filter(tokens)
1939 current_selector.filters.append(format_filter)
1940 elif string == '(':
1941 if current_selector:
1942 raise syntax_error('Unexpected "("', start)
1943 group = _parse_format_selection(tokens, inside_group=True)
1944 current_selector = FormatSelector(GROUP, group, [])
1945 elif string == '+':
1946 if not current_selector:
1947 raise syntax_error('Unexpected "+"', start)
1948 selector_1 = current_selector
1949 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1950 if not selector_2:
1951 raise syntax_error('Expected a selector', start)
1952 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
1953 else:
1954 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1955 elif type == tokenize.ENDMARKER:
1956 break
1957 if current_selector:
1958 selectors.append(current_selector)
1959 return selectors
1960
1961 def _merge(formats_pair):
1962 format_1, format_2 = formats_pair
1963
1964 formats_info = []
1965 formats_info.extend(format_1.get('requested_formats', (format_1,)))
1966 formats_info.extend(format_2.get('requested_formats', (format_2,)))
1967
1968 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
1969 get_no_more = {'video': False, 'audio': False}
1970 for (i, fmt_info) in enumerate(formats_info):
1971 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
1972 formats_info.pop(i)
1973 continue
1974 for aud_vid in ['audio', 'video']:
1975 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
1976 if get_no_more[aud_vid]:
1977 formats_info.pop(i)
1978 break
1979 get_no_more[aud_vid] = True
1980
1981 if len(formats_info) == 1:
1982 return formats_info[0]
1983
1984 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
1985 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
1986
1987 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
1988 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
1989
1990 output_ext = self.params.get('merge_output_format')
1991 if not output_ext:
1992 if the_only_video:
1993 output_ext = the_only_video['ext']
1994 elif the_only_audio and not video_fmts:
1995 output_ext = the_only_audio['ext']
1996 else:
1997 output_ext = 'mkv'
1998
1999 filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
2000
2001 new_dict = {
2002 'requested_formats': formats_info,
2003 'format': '+'.join(filtered('format')),
2004 'format_id': '+'.join(filtered('format_id')),
2005 'ext': output_ext,
2006 'protocol': '+'.join(map(determine_protocol, formats_info)),
2007 'language': '+'.join(orderedSet(filtered('language'))) or None,
2008 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2009 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2010 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2011 }
2012
2013 if the_only_video:
2014 new_dict.update({
2015 'width': the_only_video.get('width'),
2016 'height': the_only_video.get('height'),
2017 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
2018 'fps': the_only_video.get('fps'),
2019 'dynamic_range': the_only_video.get('dynamic_range'),
2020 'vcodec': the_only_video.get('vcodec'),
2021 'vbr': the_only_video.get('vbr'),
2022 'stretched_ratio': the_only_video.get('stretched_ratio'),
2023 })
2024
2025 if the_only_audio:
2026 new_dict.update({
2027 'acodec': the_only_audio.get('acodec'),
2028 'abr': the_only_audio.get('abr'),
2029 'asr': the_only_audio.get('asr'),
2030 })
2031
2032 return new_dict
2033
2034 def _check_formats(formats):
2035 if not check_formats:
2036 yield from formats
2037 return
2038 yield from self._check_formats(formats)
2039
2040 def _build_selector_function(selector):
2041 if isinstance(selector, list): # ,
2042 fs = [_build_selector_function(s) for s in selector]
2043
2044 def selector_function(ctx):
2045 for f in fs:
2046 yield from f(ctx)
2047 return selector_function
2048
2049 elif selector.type == GROUP: # ()
2050 selector_function = _build_selector_function(selector.selector)
2051
2052 elif selector.type == PICKFIRST: # /
2053 fs = [_build_selector_function(s) for s in selector.selector]
2054
2055 def selector_function(ctx):
2056 for f in fs:
2057 picked_formats = list(f(ctx))
2058 if picked_formats:
2059 return picked_formats
2060 return []
2061
2062 elif selector.type == MERGE: # +
2063 selector_1, selector_2 = map(_build_selector_function, selector.selector)
2064
2065 def selector_function(ctx):
2066 for pair in itertools.product(
2067 selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
2068 yield _merge(pair)
2069
2070 elif selector.type == SINGLE: # atom
2071 format_spec = selector.selector or 'best'
2072
2073 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2074 if format_spec == 'all':
2075 def selector_function(ctx):
2076 yield from _check_formats(ctx['formats'][::-1])
2077 elif format_spec == 'mergeall':
2078 def selector_function(ctx):
2079 formats = list(_check_formats(ctx['formats']))
2080 if not formats:
2081 return
2082 merged_format = formats[-1]
2083 for f in formats[-2::-1]:
2084 merged_format = _merge((merged_format, f))
2085 yield merged_format
2086
2087 else:
2088 format_fallback, format_reverse, format_idx = False, True, 1
2089 mobj = re.match(
2090 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2091 format_spec)
2092 if mobj is not None:
2093 format_idx = int_or_none(mobj.group('n'), default=1)
2094 format_reverse = mobj.group('bw')[0] == 'b'
2095 format_type = (mobj.group('type') or [None])[0]
2096 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
2097 format_modified = mobj.group('mod') is not None
2098
2099 format_fallback = not format_type and not format_modified # for b, w
2100 _filter_f = (
2101 (lambda f: f.get('%scodec' % format_type) != 'none')
2102 if format_type and format_modified # bv*, ba*, wv*, wa*
2103 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
2104 if format_type # bv, ba, wv, wa
2105 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
2106 if not format_modified # b, w
2107 else lambda f: True) # b*, w*
2108 filter_f = lambda f: _filter_f(f) and (
2109 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
2110 else:
2111 if format_spec in self._format_selection_exts['audio']:
2112 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
2113 elif format_spec in self._format_selection_exts['video']:
2114 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
2115 elif format_spec in self._format_selection_exts['storyboards']:
2116 filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
2117 else:
2118 filter_f = lambda f: f.get('format_id') == format_spec # id
2119
2120 def selector_function(ctx):
2121 formats = list(ctx['formats'])
2122 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
2123 if format_fallback and ctx['incomplete_formats'] and not matches:
2124 # for extractors with incomplete formats (audio only (soundcloud)
2125 # or video only (imgur)) best/worst will fallback to
2126 # best/worst {video,audio}-only format
2127 matches = formats
2128 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
2129 try:
2130 yield matches[format_idx - 1]
2131 except IndexError:
2132 return
2133
2134 filters = [self._build_format_filter(f) for f in selector.filters]
2135
2136 def final_selector(ctx):
2137 ctx_copy = copy.deepcopy(ctx)
2138 for _filter in filters:
2139 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
2140 return selector_function(ctx_copy)
2141 return final_selector
2142
2143 stream = io.BytesIO(format_spec.encode('utf-8'))
2144 try:
2145 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
2146 except tokenize.TokenError:
2147 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
2148
2149 class TokenIterator(object):
2150 def __init__(self, tokens):
2151 self.tokens = tokens
2152 self.counter = 0
2153
2154 def __iter__(self):
2155 return self
2156
2157 def __next__(self):
2158 if self.counter >= len(self.tokens):
2159 raise StopIteration()
2160 value = self.tokens[self.counter]
2161 self.counter += 1
2162 return value
2163
2164 next = __next__
2165
2166 def restore_last_token(self):
2167 self.counter -= 1
2168
2169 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
2170 return _build_selector_function(parsed_selector)
2171
2172 def _calc_headers(self, info_dict):
2173 res = std_headers.copy()
2174
2175 add_headers = info_dict.get('http_headers')
2176 if add_headers:
2177 res.update(add_headers)
2178
2179 cookies = self._calc_cookies(info_dict)
2180 if cookies:
2181 res['Cookie'] = cookies
2182
2183 if 'X-Forwarded-For' not in res:
2184 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2185 if x_forwarded_for_ip:
2186 res['X-Forwarded-For'] = x_forwarded_for_ip
2187
2188 return res
2189
2190 def _calc_cookies(self, info_dict):
2191 pr = sanitized_Request(info_dict['url'])
2192 self.cookiejar.add_cookie_header(pr)
2193 return pr.get_header('Cookie')
2194
2195 def _sort_thumbnails(self, thumbnails):
2196 thumbnails.sort(key=lambda t: (
2197 t.get('preference') if t.get('preference') is not None else -1,
2198 t.get('width') if t.get('width') is not None else -1,
2199 t.get('height') if t.get('height') is not None else -1,
2200 t.get('id') if t.get('id') is not None else '',
2201 t.get('url')))
2202
2203 def _sanitize_thumbnails(self, info_dict):
2204 thumbnails = info_dict.get('thumbnails')
2205 if thumbnails is None:
2206 thumbnail = info_dict.get('thumbnail')
2207 if thumbnail:
2208 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2209 if not thumbnails:
2210 return
2211
2212 def check_thumbnails(thumbnails):
2213 for t in thumbnails:
2214 self.to_screen(f'[info] Testing thumbnail {t["id"]}')
2215 try:
2216 self.urlopen(HEADRequest(t['url']))
2217 except network_exceptions as err:
2218 self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2219 continue
2220 yield t
2221
2222 self._sort_thumbnails(thumbnails)
2223 for i, t in enumerate(thumbnails):
2224 if t.get('id') is None:
2225 t['id'] = '%d' % i
2226 if t.get('width') and t.get('height'):
2227 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2228 t['url'] = sanitize_url(t['url'])
2229
2230 if self.params.get('check_formats') is True:
2231 info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
2232 else:
2233 info_dict['thumbnails'] = thumbnails
2234
2235 def process_video_result(self, info_dict, download=True):
2236 assert info_dict.get('_type', 'video') == 'video'
2237
2238 if 'id' not in info_dict:
2239 raise ExtractorError('Missing "id" field in extractor result')
2240 if 'title' not in info_dict:
2241 raise ExtractorError('Missing "title" field in extractor result',
2242 video_id=info_dict['id'], ie=info_dict['extractor'])
2243
2244 def report_force_conversion(field, field_not, conversion):
2245 self.report_warning(
2246 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2247 % (field, field_not, conversion))
2248
2249 def sanitize_string_field(info, string_field):
2250 field = info.get(string_field)
2251 if field is None or isinstance(field, compat_str):
2252 return
2253 report_force_conversion(string_field, 'a string', 'string')
2254 info[string_field] = compat_str(field)
2255
2256 def sanitize_numeric_fields(info):
2257 for numeric_field in self._NUMERIC_FIELDS:
2258 field = info.get(numeric_field)
2259 if field is None or isinstance(field, compat_numeric_types):
2260 continue
2261 report_force_conversion(numeric_field, 'numeric', 'int')
2262 info[numeric_field] = int_or_none(field)
2263
2264 sanitize_string_field(info_dict, 'id')
2265 sanitize_numeric_fields(info_dict)
2266
2267 if 'playlist' not in info_dict:
2268 # It isn't part of a playlist
2269 info_dict['playlist'] = None
2270 info_dict['playlist_index'] = None
2271
2272 self._sanitize_thumbnails(info_dict)
2273
2274 thumbnail = info_dict.get('thumbnail')
2275 thumbnails = info_dict.get('thumbnails')
2276 if thumbnail:
2277 info_dict['thumbnail'] = sanitize_url(thumbnail)
2278 elif thumbnails:
2279 info_dict['thumbnail'] = thumbnails[-1]['url']
2280
2281 if info_dict.get('display_id') is None and 'id' in info_dict:
2282 info_dict['display_id'] = info_dict['id']
2283
2284 if info_dict.get('duration') is not None:
2285 info_dict['duration_string'] = formatSeconds(info_dict['duration'])
2286
2287 for ts_key, date_key in (
2288 ('timestamp', 'upload_date'),
2289 ('release_timestamp', 'release_date'),
2290 ):
2291 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2292 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2293 # see http://bugs.python.org/issue1646728)
2294 try:
2295 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2296 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2297 except (ValueError, OverflowError, OSError):
2298 pass
2299
2300 live_keys = ('is_live', 'was_live')
2301 live_status = info_dict.get('live_status')
2302 if live_status is None:
2303 for key in live_keys:
2304 if info_dict.get(key) is False:
2305 continue
2306 if info_dict.get(key):
2307 live_status = key
2308 break
2309 if all(info_dict.get(key) is False for key in live_keys):
2310 live_status = 'not_live'
2311 if live_status:
2312 info_dict['live_status'] = live_status
2313 for key in live_keys:
2314 if info_dict.get(key) is None:
2315 info_dict[key] = (live_status == key)
2316
2317 # Auto generate title fields corresponding to the *_number fields when missing
2318 # in order to always have clean titles. This is very common for TV series.
2319 for field in ('chapter', 'season', 'episode'):
2320 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2321 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2322
2323 for cc_kind in ('subtitles', 'automatic_captions'):
2324 cc = info_dict.get(cc_kind)
2325 if cc:
2326 for _, subtitle in cc.items():
2327 for subtitle_format in subtitle:
2328 if subtitle_format.get('url'):
2329 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2330 if subtitle_format.get('ext') is None:
2331 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2332
2333 automatic_captions = info_dict.get('automatic_captions')
2334 subtitles = info_dict.get('subtitles')
2335
2336 info_dict['requested_subtitles'] = self.process_subtitles(
2337 info_dict['id'], subtitles, automatic_captions)
2338
2339 if info_dict.get('formats') is None:
2340 # There's only one format available
2341 formats = [info_dict]
2342 else:
2343 formats = info_dict['formats']
2344
2345 info_dict['__has_drm'] = any(f.get('has_drm') for f in formats)
2346 if not self.params.get('allow_unplayable_formats'):
2347 formats = [f for f in formats if not f.get('has_drm')]
2348
2349 if not formats:
2350 self.raise_no_formats(info_dict)
2351
2352 def is_wellformed(f):
2353 url = f.get('url')
2354 if not url:
2355 self.report_warning(
2356 '"url" field is missing or empty - skipping format, '
2357 'there is an error in extractor')
2358 return False
2359 if isinstance(url, bytes):
2360 sanitize_string_field(f, 'url')
2361 return True
2362
2363 # Filter out malformed formats for better extraction robustness
2364 formats = list(filter(is_wellformed, formats))
2365
2366 formats_dict = {}
2367
2368 # We check that all the formats have the format and format_id fields
2369 for i, format in enumerate(formats):
2370 sanitize_string_field(format, 'format_id')
2371 sanitize_numeric_fields(format)
2372 format['url'] = sanitize_url(format['url'])
2373 if not format.get('format_id'):
2374 format['format_id'] = compat_str(i)
2375 else:
2376 # Sanitize format_id from characters used in format selector expression
2377 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
2378 format_id = format['format_id']
2379 if format_id not in formats_dict:
2380 formats_dict[format_id] = []
2381 formats_dict[format_id].append(format)
2382
2383 # Make sure all formats have unique format_id
2384 common_exts = set(itertools.chain(*self._format_selection_exts.values()))
2385 for format_id, ambiguous_formats in formats_dict.items():
2386 ambigious_id = len(ambiguous_formats) > 1
2387 for i, format in enumerate(ambiguous_formats):
2388 if ambigious_id:
2389 format['format_id'] = '%s-%d' % (format_id, i)
2390 if format.get('ext') is None:
2391 format['ext'] = determine_ext(format['url']).lower()
2392 # Ensure there is no conflict between id and ext in format selection
2393 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2394 if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
2395 format['format_id'] = 'f%s' % format['format_id']
2396
2397 for i, format in enumerate(formats):
2398 if format.get('format') is None:
2399 format['format'] = '{id} - {res}{note}'.format(
2400 id=format['format_id'],
2401 res=self.format_resolution(format),
2402 note=format_field(format, 'format_note', ' (%s)'),
2403 )
2404 if format.get('protocol') is None:
2405 format['protocol'] = determine_protocol(format)
2406 if format.get('resolution') is None:
2407 format['resolution'] = self.format_resolution(format, default=None)
2408 if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
2409 format['dynamic_range'] = 'SDR'
2410 if (info_dict.get('duration') and format.get('tbr')
2411 and not format.get('filesize') and not format.get('filesize_approx')):
2412 format['filesize_approx'] = info_dict['duration'] * format['tbr'] * (1024 / 8)
2413
2414 # Add HTTP headers, so that external programs can use them from the
2415 # json output
2416 full_format_info = info_dict.copy()
2417 full_format_info.update(format)
2418 format['http_headers'] = self._calc_headers(full_format_info)
2419 # Remove private housekeeping stuff
2420 if '__x_forwarded_for_ip' in info_dict:
2421 del info_dict['__x_forwarded_for_ip']
2422
2423 # TODO Central sorting goes here
2424
2425 if self.params.get('check_formats') is True:
2426 formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
2427
2428 if not formats or formats[0] is not info_dict:
2429 # only set the 'formats' fields if the original info_dict list them
2430 # otherwise we end up with a circular reference, the first (and unique)
2431 # element in the 'formats' field in info_dict is info_dict itself,
2432 # which can't be exported to json
2433 info_dict['formats'] = formats
2434
2435 info_dict, _ = self.pre_process(info_dict)
2436
2437 # The pre-processors may have modified the formats
2438 formats = info_dict.get('formats', [info_dict])
2439
2440 if self.params.get('list_thumbnails'):
2441 self.list_thumbnails(info_dict)
2442 if self.params.get('listformats'):
2443 if not info_dict.get('formats') and not info_dict.get('url'):
2444 self.to_screen('%s has no formats' % info_dict['id'])
2445 else:
2446 self.list_formats(info_dict)
2447 if self.params.get('listsubtitles'):
2448 if 'automatic_captions' in info_dict:
2449 self.list_subtitles(
2450 info_dict['id'], automatic_captions, 'automatic captions')
2451 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2452 list_only = self.params.get('simulate') is None and (
2453 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
2454 if list_only:
2455 # Without this printing, -F --print-json will not work
2456 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
2457 return
2458
2459 format_selector = self.format_selector
2460 if format_selector is None:
2461 req_format = self._default_format_spec(info_dict, download=download)
2462 self.write_debug('Default format spec: %s' % req_format)
2463 format_selector = self.build_format_selector(req_format)
2464
2465 # While in format selection we may need to have an access to the original
2466 # format set in order to calculate some metrics or do some processing.
2467 # For now we need to be able to guess whether original formats provided
2468 # by extractor are incomplete or not (i.e. whether extractor provides only
2469 # video-only or audio-only formats) for proper formats selection for
2470 # extractors with such incomplete formats (see
2471 # https://github.com/ytdl-org/youtube-dl/pull/5556).
2472 # Since formats may be filtered during format selection and may not match
2473 # the original formats the results may be incorrect. Thus original formats
2474 # or pre-calculated metrics should be passed to format selection routines
2475 # as well.
2476 # We will pass a context object containing all necessary additional data
2477 # instead of just formats.
2478 # This fixes incorrect format selection issue (see
2479 # https://github.com/ytdl-org/youtube-dl/issues/10083).
2480 incomplete_formats = (
2481 # All formats are video-only or
2482 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
2483 # all formats are audio-only
2484 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
2485
2486 ctx = {
2487 'formats': formats,
2488 'incomplete_formats': incomplete_formats,
2489 }
2490
2491 formats_to_download = list(format_selector(ctx))
2492 if not formats_to_download:
2493 if not self.params.get('ignore_no_formats_error'):
2494 raise ExtractorError('Requested format is not available', expected=True,
2495 video_id=info_dict['id'], ie=info_dict['extractor'])
2496 else:
2497 self.report_warning('Requested format is not available')
2498 # Process what we can, even without any available formats.
2499 self.process_info(dict(info_dict))
2500 elif download:
2501 self.to_screen(
2502 '[info] %s: Downloading %d format(s): %s' % (
2503 info_dict['id'], len(formats_to_download),
2504 ", ".join([f['format_id'] for f in formats_to_download])))
2505 for fmt in formats_to_download:
2506 new_info = dict(info_dict)
2507 # Save a reference to the original info_dict so that it can be modified in process_info if needed
2508 new_info['__original_infodict'] = info_dict
2509 new_info.update(fmt)
2510 self.process_info(new_info)
2511 # We update the info dict with the selected best quality format (backwards compatibility)
2512 if formats_to_download:
2513 info_dict.update(formats_to_download[-1])
2514 return info_dict
2515
2516 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
2517 """Select the requested subtitles and their format"""
2518 available_subs = {}
2519 if normal_subtitles and self.params.get('writesubtitles'):
2520 available_subs.update(normal_subtitles)
2521 if automatic_captions and self.params.get('writeautomaticsub'):
2522 for lang, cap_info in automatic_captions.items():
2523 if lang not in available_subs:
2524 available_subs[lang] = cap_info
2525
2526 if (not self.params.get('writesubtitles') and not
2527 self.params.get('writeautomaticsub') or not
2528 available_subs):
2529 return None
2530
2531 all_sub_langs = available_subs.keys()
2532 if self.params.get('allsubtitles', False):
2533 requested_langs = all_sub_langs
2534 elif self.params.get('subtitleslangs', False):
2535 # A list is used so that the order of languages will be the same as
2536 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2537 requested_langs = []
2538 for lang_re in self.params.get('subtitleslangs'):
2539 if lang_re == 'all':
2540 requested_langs.extend(all_sub_langs)
2541 continue
2542 discard = lang_re[0] == '-'
2543 if discard:
2544 lang_re = lang_re[1:]
2545 current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
2546 if discard:
2547 for lang in current_langs:
2548 while lang in requested_langs:
2549 requested_langs.remove(lang)
2550 else:
2551 requested_langs.extend(current_langs)
2552 requested_langs = orderedSet(requested_langs)
2553 elif 'en' in available_subs:
2554 requested_langs = ['en']
2555 else:
2556 requested_langs = [list(all_sub_langs)[0]]
2557 if requested_langs:
2558 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
2559
2560 formats_query = self.params.get('subtitlesformat', 'best')
2561 formats_preference = formats_query.split('/') if formats_query else []
2562 subs = {}
2563 for lang in requested_langs:
2564 formats = available_subs.get(lang)
2565 if formats is None:
2566 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
2567 continue
2568 for ext in formats_preference:
2569 if ext == 'best':
2570 f = formats[-1]
2571 break
2572 matches = list(filter(lambda f: f['ext'] == ext, formats))
2573 if matches:
2574 f = matches[-1]
2575 break
2576 else:
2577 f = formats[-1]
2578 self.report_warning(
2579 'No subtitle format found matching "%s" for language %s, '
2580 'using %s' % (formats_query, lang, f['ext']))
2581 subs[lang] = f
2582 return subs
2583
2584 def __forced_printings(self, info_dict, filename, incomplete):
2585 def print_mandatory(field, actual_field=None):
2586 if actual_field is None:
2587 actual_field = field
2588 if (self.params.get('force%s' % field, False)
2589 and (not incomplete or info_dict.get(actual_field) is not None)):
2590 self.to_stdout(info_dict[actual_field])
2591
2592 def print_optional(field):
2593 if (self.params.get('force%s' % field, False)
2594 and info_dict.get(field) is not None):
2595 self.to_stdout(info_dict[field])
2596
2597 info_dict = info_dict.copy()
2598 if filename is not None:
2599 info_dict['filename'] = filename
2600 if info_dict.get('requested_formats') is not None:
2601 # For RTMP URLs, also include the playpath
2602 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2603 elif 'url' in info_dict:
2604 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2605
2606 if self.params.get('forceprint') or self.params.get('forcejson'):
2607 self.post_extract(info_dict)
2608 for tmpl in self.params.get('forceprint', []):
2609 mobj = re.match(r'\w+(=?)$', tmpl)
2610 if mobj and mobj.group(1):
2611 tmpl = f'{tmpl[:-1]} = %({tmpl[:-1]})s'
2612 elif mobj:
2613 tmpl = '%({})s'.format(tmpl)
2614 self.to_stdout(self.evaluate_outtmpl(tmpl, info_dict))
2615
2616 print_mandatory('title')
2617 print_mandatory('id')
2618 print_mandatory('url', 'urls')
2619 print_optional('thumbnail')
2620 print_optional('description')
2621 print_optional('filename')
2622 if self.params.get('forceduration') and info_dict.get('duration') is not None:
2623 self.to_stdout(formatSeconds(info_dict['duration']))
2624 print_mandatory('format')
2625
2626 if self.params.get('forcejson'):
2627 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
2628
2629 def dl(self, name, info, subtitle=False, test=False):
2630 if not info.get('url'):
2631 self.raise_no_formats(info, True)
2632
2633 if test:
2634 verbose = self.params.get('verbose')
2635 params = {
2636 'test': True,
2637 'quiet': self.params.get('quiet') or not verbose,
2638 'verbose': verbose,
2639 'noprogress': not verbose,
2640 'nopart': True,
2641 'skip_unavailable_fragments': False,
2642 'keep_fragments': False,
2643 'overwrites': True,
2644 '_no_ytdl_file': True,
2645 }
2646 else:
2647 params = self.params
2648 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
2649 if not test:
2650 for ph in self._progress_hooks:
2651 fd.add_progress_hook(ph)
2652 urls = '", "'.join([f['url'] for f in info.get('requested_formats', [])] or [info['url']])
2653 self.write_debug('Invoking downloader on "%s"' % urls)
2654
2655 new_info = copy.deepcopy(self._copy_infodict(info))
2656 if new_info.get('http_headers') is None:
2657 new_info['http_headers'] = self._calc_headers(new_info)
2658 return fd.download(name, new_info, subtitle)
2659
2660 def process_info(self, info_dict):
2661 """Process a single resolved IE result."""
2662
2663 assert info_dict.get('_type', 'video') == 'video'
2664
2665 max_downloads = self.params.get('max_downloads')
2666 if max_downloads is not None:
2667 if self._num_downloads >= int(max_downloads):
2668 raise MaxDownloadsReached()
2669
2670 # TODO: backward compatibility, to be removed
2671 info_dict['fulltitle'] = info_dict['title']
2672
2673 if 'format' not in info_dict and 'ext' in info_dict:
2674 info_dict['format'] = info_dict['ext']
2675
2676 if self._match_entry(info_dict) is not None:
2677 return
2678
2679 self.post_extract(info_dict)
2680 self._num_downloads += 1
2681
2682 # info_dict['_filename'] needs to be set for backward compatibility
2683 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2684 temp_filename = self.prepare_filename(info_dict, 'temp')
2685 files_to_move = {}
2686
2687 # Forced printings
2688 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
2689
2690 if self.params.get('simulate'):
2691 if self.params.get('force_write_download_archive', False):
2692 self.record_download_archive(info_dict)
2693 # Do nothing else if in simulate mode
2694 return
2695
2696 if full_filename is None:
2697 return
2698 if not self._ensure_dir_exists(encodeFilename(full_filename)):
2699 return
2700 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
2701 return
2702
2703 if self._write_description('video', info_dict,
2704 self.prepare_filename(info_dict, 'description')) is None:
2705 return
2706
2707 sub_files = self._write_subtitles(info_dict, temp_filename)
2708 if sub_files is None:
2709 return
2710 files_to_move.update(dict(sub_files))
2711
2712 thumb_files = self._write_thumbnails(
2713 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
2714 if thumb_files is None:
2715 return
2716 files_to_move.update(dict(thumb_files))
2717
2718 infofn = self.prepare_filename(info_dict, 'infojson')
2719 _infojson_written = self._write_info_json('video', info_dict, infofn)
2720 if _infojson_written:
2721 info_dict['infojson_filename'] = infofn
2722 # For backward compatability, even though it was a private field
2723 info_dict['__infojson_filename'] = infofn
2724 elif _infojson_written is None:
2725 return
2726
2727 # Note: Annotations are deprecated
2728 annofn = None
2729 if self.params.get('writeannotations', False):
2730 annofn = self.prepare_filename(info_dict, 'annotation')
2731 if annofn:
2732 if not self._ensure_dir_exists(encodeFilename(annofn)):
2733 return
2734 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
2735 self.to_screen('[info] Video annotations are already present')
2736 elif not info_dict.get('annotations'):
2737 self.report_warning('There are no annotations to write.')
2738 else:
2739 try:
2740 self.to_screen('[info] Writing video annotations to: ' + annofn)
2741 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2742 annofile.write(info_dict['annotations'])
2743 except (KeyError, TypeError):
2744 self.report_warning('There are no annotations to write.')
2745 except (OSError, IOError):
2746 self.report_error('Cannot write annotations file: ' + annofn)
2747 return
2748
2749 # Write internet shortcut files
2750 def _write_link_file(link_type):
2751 if 'webpage_url' not in info_dict:
2752 self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
2753 return False
2754 linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
2755 if not self._ensure_dir_exists(encodeFilename(linkfn)):
2756 return False
2757 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
2758 self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
2759 return True
2760 try:
2761 self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
2762 with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
2763 newline='\r\n' if link_type == 'url' else '\n') as linkfile:
2764 template_vars = {'url': iri_to_uri(info_dict['webpage_url'])}
2765 if link_type == 'desktop':
2766 template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
2767 linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
2768 except (OSError, IOError):
2769 self.report_error(f'Cannot write internet shortcut {linkfn}')
2770 return False
2771 return True
2772
2773 write_links = {
2774 'url': self.params.get('writeurllink'),
2775 'webloc': self.params.get('writewebloclink'),
2776 'desktop': self.params.get('writedesktoplink'),
2777 }
2778 if self.params.get('writelink'):
2779 link_type = ('webloc' if sys.platform == 'darwin'
2780 else 'desktop' if sys.platform.startswith('linux')
2781 else 'url')
2782 write_links[link_type] = True
2783
2784 if any(should_write and not _write_link_file(link_type)
2785 for link_type, should_write in write_links.items()):
2786 return
2787
2788 try:
2789 info_dict, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
2790 except PostProcessingError as err:
2791 self.report_error('Preprocessing: %s' % str(err))
2792 return
2793
2794 must_record_download_archive = False
2795 if self.params.get('skip_download', False):
2796 info_dict['filepath'] = temp_filename
2797 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2798 info_dict['__files_to_move'] = files_to_move
2799 info_dict = self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict)
2800 else:
2801 # Download
2802 info_dict.setdefault('__postprocessors', [])
2803 try:
2804
2805 def existing_file(*filepaths):
2806 ext = info_dict.get('ext')
2807 final_ext = self.params.get('final_ext', ext)
2808 existing_files = []
2809 for file in orderedSet(filepaths):
2810 if final_ext != ext:
2811 converted = replace_extension(file, final_ext, ext)
2812 if os.path.exists(encodeFilename(converted)):
2813 existing_files.append(converted)
2814 if os.path.exists(encodeFilename(file)):
2815 existing_files.append(file)
2816
2817 if not existing_files or self.params.get('overwrites', False):
2818 for file in orderedSet(existing_files):
2819 self.report_file_delete(file)
2820 os.remove(encodeFilename(file))
2821 return None
2822
2823 info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
2824 return existing_files[0]
2825
2826 success = True
2827 if info_dict.get('requested_formats') is not None:
2828
2829 def compatible_formats(formats):
2830 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
2831 video_formats = [format for format in formats if format.get('vcodec') != 'none']
2832 audio_formats = [format for format in formats if format.get('acodec') != 'none']
2833 if len(video_formats) > 2 or len(audio_formats) > 2:
2834 return False
2835
2836 # Check extension
2837 exts = set(format.get('ext') for format in formats)
2838 COMPATIBLE_EXTS = (
2839 set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
2840 set(('webm',)),
2841 )
2842 for ext_sets in COMPATIBLE_EXTS:
2843 if ext_sets.issuperset(exts):
2844 return True
2845 # TODO: Check acodec/vcodec
2846 return False
2847
2848 requested_formats = info_dict['requested_formats']
2849 old_ext = info_dict['ext']
2850 if self.params.get('merge_output_format') is None:
2851 if not compatible_formats(requested_formats):
2852 info_dict['ext'] = 'mkv'
2853 self.report_warning(
2854 'Requested formats are incompatible for merge and will be merged into mkv')
2855 if (info_dict['ext'] == 'webm'
2856 and info_dict.get('thumbnails')
2857 # check with type instead of pp_key, __name__, or isinstance
2858 # since we dont want any custom PPs to trigger this
2859 and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])):
2860 info_dict['ext'] = 'mkv'
2861 self.report_warning(
2862 'webm doesn\'t support embedding a thumbnail, mkv will be used')
2863 new_ext = info_dict['ext']
2864
2865 def correct_ext(filename, ext=new_ext):
2866 if filename == '-':
2867 return filename
2868 filename_real_ext = os.path.splitext(filename)[1][1:]
2869 filename_wo_ext = (
2870 os.path.splitext(filename)[0]
2871 if filename_real_ext in (old_ext, new_ext)
2872 else filename)
2873 return '%s.%s' % (filename_wo_ext, ext)
2874
2875 # Ensure filename always has a correct extension for successful merge
2876 full_filename = correct_ext(full_filename)
2877 temp_filename = correct_ext(temp_filename)
2878 dl_filename = existing_file(full_filename, temp_filename)
2879 info_dict['__real_download'] = False
2880
2881 if dl_filename is not None:
2882 self.report_file_already_downloaded(dl_filename)
2883 elif get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-'):
2884 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
2885 success, real_download = self.dl(temp_filename, info_dict)
2886 info_dict['__real_download'] = real_download
2887 else:
2888 downloaded = []
2889 merger = FFmpegMergerPP(self)
2890 if self.params.get('allow_unplayable_formats'):
2891 self.report_warning(
2892 'You have requested merging of multiple formats '
2893 'while also allowing unplayable formats to be downloaded. '
2894 'The formats won\'t be merged to prevent data corruption.')
2895 elif not merger.available:
2896 self.report_warning(
2897 'You have requested merging of multiple formats but ffmpeg is not installed. '
2898 'The formats won\'t be merged.')
2899
2900 if temp_filename == '-':
2901 reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict)
2902 else 'but the formats are incompatible for simultaneous download' if merger.available
2903 else 'but ffmpeg is not installed')
2904 self.report_warning(
2905 f'You have requested downloading multiple formats to stdout {reason}. '
2906 'The formats will be streamed one after the other')
2907 fname = temp_filename
2908 for f in requested_formats:
2909 new_info = dict(info_dict)
2910 del new_info['requested_formats']
2911 new_info.update(f)
2912 if temp_filename != '-':
2913 fname = prepend_extension(
2914 correct_ext(temp_filename, new_info['ext']),
2915 'f%s' % f['format_id'], new_info['ext'])
2916 if not self._ensure_dir_exists(fname):
2917 return
2918 f['filepath'] = fname
2919 downloaded.append(fname)
2920 partial_success, real_download = self.dl(fname, new_info)
2921 info_dict['__real_download'] = info_dict['__real_download'] or real_download
2922 success = success and partial_success
2923 if merger.available and not self.params.get('allow_unplayable_formats'):
2924 info_dict['__postprocessors'].append(merger)
2925 info_dict['__files_to_merge'] = downloaded
2926 # Even if there were no downloads, it is being merged only now
2927 info_dict['__real_download'] = True
2928 else:
2929 for file in downloaded:
2930 files_to_move[file] = None
2931 else:
2932 # Just a single file
2933 dl_filename = existing_file(full_filename, temp_filename)
2934 if dl_filename is None or dl_filename == temp_filename:
2935 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
2936 # So we should try to resume the download
2937 success, real_download = self.dl(temp_filename, info_dict)
2938 info_dict['__real_download'] = real_download
2939 else:
2940 self.report_file_already_downloaded(dl_filename)
2941
2942 dl_filename = dl_filename or temp_filename
2943 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2944
2945 except network_exceptions as err:
2946 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
2947 return
2948 except (OSError, IOError) as err:
2949 raise UnavailableVideoError(err)
2950 except (ContentTooShortError, ) as err:
2951 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
2952 return
2953
2954 if success and full_filename != '-':
2955
2956 def fixup():
2957 do_fixup = True
2958 fixup_policy = self.params.get('fixup')
2959 vid = info_dict['id']
2960
2961 if fixup_policy in ('ignore', 'never'):
2962 return
2963 elif fixup_policy == 'warn':
2964 do_fixup = False
2965 elif fixup_policy != 'force':
2966 assert fixup_policy in ('detect_or_warn', None)
2967 if not info_dict.get('__real_download'):
2968 do_fixup = False
2969
2970 def ffmpeg_fixup(cndn, msg, cls):
2971 if not cndn:
2972 return
2973 if not do_fixup:
2974 self.report_warning(f'{vid}: {msg}')
2975 return
2976 pp = cls(self)
2977 if pp.available:
2978 info_dict['__postprocessors'].append(pp)
2979 else:
2980 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
2981
2982 stretched_ratio = info_dict.get('stretched_ratio')
2983 ffmpeg_fixup(
2984 stretched_ratio not in (1, None),
2985 f'Non-uniform pixel ratio {stretched_ratio}',
2986 FFmpegFixupStretchedPP)
2987
2988 ffmpeg_fixup(
2989 (info_dict.get('requested_formats') is None
2990 and info_dict.get('container') == 'm4a_dash'
2991 and info_dict.get('ext') == 'm4a'),
2992 'writing DASH m4a. Only some players support this container',
2993 FFmpegFixupM4aPP)
2994
2995 downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
2996 downloader = downloader.__name__ if downloader else None
2997 ffmpeg_fixup(info_dict.get('requested_formats') is None and downloader == 'HlsFD',
2998 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
2999 FFmpegFixupM3u8PP)
3000 ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
3001 ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'Malformed duration detected', FFmpegFixupDurationPP)
3002
3003 fixup()
3004 try:
3005 info_dict = self.post_process(dl_filename, info_dict, files_to_move)
3006 except PostProcessingError as err:
3007 self.report_error('Postprocessing: %s' % str(err))
3008 return
3009 try:
3010 for ph in self._post_hooks:
3011 ph(info_dict['filepath'])
3012 except Exception as err:
3013 self.report_error('post hooks: %s' % str(err))
3014 return
3015 must_record_download_archive = True
3016
3017 if must_record_download_archive or self.params.get('force_write_download_archive', False):
3018 self.record_download_archive(info_dict)
3019 max_downloads = self.params.get('max_downloads')
3020 if max_downloads is not None and self._num_downloads >= int(max_downloads):
3021 raise MaxDownloadsReached()
3022
3023 def __download_wrapper(self, func):
3024 @functools.wraps(func)
3025 def wrapper(*args, **kwargs):
3026 try:
3027 res = func(*args, **kwargs)
3028 except UnavailableVideoError as e:
3029 self.report_error(e)
3030 except MaxDownloadsReached as e:
3031 self.to_screen(f'[info] {e}')
3032 raise
3033 except DownloadCancelled as e:
3034 self.to_screen(f'[info] {e}')
3035 if not self.params.get('break_per_url'):
3036 raise
3037 else:
3038 if self.params.get('dump_single_json', False):
3039 self.post_extract(res)
3040 self.to_stdout(json.dumps(self.sanitize_info(res)))
3041 return wrapper
3042
3043 def download(self, url_list):
3044 """Download a given list of URLs."""
3045 url_list = variadic(url_list) # Passing a single URL is a common mistake
3046 outtmpl = self.outtmpl_dict['default']
3047 if (len(url_list) > 1
3048 and outtmpl != '-'
3049 and '%' not in outtmpl
3050 and self.params.get('max_downloads') != 1):
3051 raise SameFileError(outtmpl)
3052
3053 for url in url_list:
3054 self.__download_wrapper(self.extract_info)(
3055 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
3056
3057 return self._download_retcode
3058
3059 def download_with_info_file(self, info_filename):
3060 with contextlib.closing(fileinput.FileInput(
3061 [info_filename], mode='r',
3062 openhook=fileinput.hook_encoded('utf-8'))) as f:
3063 # FileInput doesn't have a read method, we can't call json.load
3064 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
3065 try:
3066 self.__download_wrapper(self.process_ie_result)(info, download=True)
3067 except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
3068 if not isinstance(e, EntryNotInPlaylist):
3069 self.to_stderr('\r')
3070 webpage_url = info.get('webpage_url')
3071 if webpage_url is not None:
3072 self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
3073 return self.download([webpage_url])
3074 else:
3075 raise
3076 return self._download_retcode
3077
3078 @staticmethod
3079 def sanitize_info(info_dict, remove_private_keys=False):
3080 ''' Sanitize the infodict for converting to json '''
3081 if info_dict is None:
3082 return info_dict
3083 info_dict.setdefault('epoch', int(time.time()))
3084 remove_keys = {'__original_infodict'} # Always remove this since this may contain a copy of the entire dict
3085 keep_keys = ['_type'] # Always keep this to facilitate load-info-json
3086 if remove_private_keys:
3087 remove_keys |= {
3088 'requested_formats', 'requested_subtitles', 'requested_entries', 'entries',
3089 'filepath', 'infojson_filename', 'original_url', 'playlist_autonumber',
3090 }
3091 empty_values = (None, {}, [], set(), tuple())
3092 reject = lambda k, v: k not in keep_keys and (
3093 k.startswith('_') or k in remove_keys or v in empty_values)
3094 else:
3095 reject = lambda k, v: k in remove_keys
3096 filter_fn = lambda obj: (
3097 list(map(filter_fn, obj)) if isinstance(obj, (LazyList, list, tuple, set))
3098 else obj if not isinstance(obj, dict)
3099 else dict((k, filter_fn(v)) for k, v in obj.items() if not reject(k, v)))
3100 return filter_fn(info_dict)
3101
3102 @staticmethod
3103 def filter_requested_info(info_dict, actually_filter=True):
3104 ''' Alias of sanitize_info for backward compatibility '''
3105 return YoutubeDL.sanitize_info(info_dict, actually_filter)
3106
3107 def run_pp(self, pp, infodict):
3108 files_to_delete = []
3109 if '__files_to_move' not in infodict:
3110 infodict['__files_to_move'] = {}
3111 try:
3112 files_to_delete, infodict = pp.run(infodict)
3113 except PostProcessingError as e:
3114 # Must be True and not 'only_download'
3115 if self.params.get('ignoreerrors') is True:
3116 self.report_error(e)
3117 return infodict
3118 raise
3119
3120 if not files_to_delete:
3121 return infodict
3122 if self.params.get('keepvideo', False):
3123 for f in files_to_delete:
3124 infodict['__files_to_move'].setdefault(f, '')
3125 else:
3126 for old_filename in set(files_to_delete):
3127 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
3128 try:
3129 os.remove(encodeFilename(old_filename))
3130 except (IOError, OSError):
3131 self.report_warning('Unable to remove downloaded original file')
3132 if old_filename in infodict['__files_to_move']:
3133 del infodict['__files_to_move'][old_filename]
3134 return infodict
3135
3136 @staticmethod
3137 def post_extract(info_dict):
3138 def actual_post_extract(info_dict):
3139 if info_dict.get('_type') in ('playlist', 'multi_video'):
3140 for video_dict in info_dict.get('entries', {}):
3141 actual_post_extract(video_dict or {})
3142 return
3143
3144 post_extractor = info_dict.get('__post_extractor') or (lambda: {})
3145 extra = post_extractor().items()
3146 info_dict.update(extra)
3147 info_dict.pop('__post_extractor', None)
3148
3149 original_infodict = info_dict.get('__original_infodict') or {}
3150 original_infodict.update(extra)
3151 original_infodict.pop('__post_extractor', None)
3152
3153 actual_post_extract(info_dict or {})
3154
3155 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
3156 info = dict(ie_info)
3157 info['__files_to_move'] = files_to_move or {}
3158 for pp in self._pps[key]:
3159 info = self.run_pp(pp, info)
3160 return info, info.pop('__files_to_move', None)
3161
3162 def post_process(self, filename, ie_info, files_to_move=None):
3163 """Run all the postprocessors on the given file."""
3164 info = dict(ie_info)
3165 info['filepath'] = filename
3166 info['__files_to_move'] = files_to_move or {}
3167
3168 for pp in ie_info.get('__postprocessors', []) + self._pps['post_process']:
3169 info = self.run_pp(pp, info)
3170 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3171 del info['__files_to_move']
3172 for pp in self._pps['after_move']:
3173 info = self.run_pp(pp, info)
3174 return info
3175
3176 def _make_archive_id(self, info_dict):
3177 video_id = info_dict.get('id')
3178 if not video_id:
3179 return
3180 # Future-proof against any change in case
3181 # and backwards compatibility with prior versions
3182 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
3183 if extractor is None:
3184 url = str_or_none(info_dict.get('url'))
3185 if not url:
3186 return
3187 # Try to find matching extractor for the URL and take its ie_key
3188 for ie_key, ie in self._ies.items():
3189 if ie.suitable(url):
3190 extractor = ie_key
3191 break
3192 else:
3193 return
3194 return '%s %s' % (extractor.lower(), video_id)
3195
3196 def in_download_archive(self, info_dict):
3197 fn = self.params.get('download_archive')
3198 if fn is None:
3199 return False
3200
3201 vid_id = self._make_archive_id(info_dict)
3202 if not vid_id:
3203 return False # Incomplete video information
3204
3205 return vid_id in self.archive
3206
3207 def record_download_archive(self, info_dict):
3208 fn = self.params.get('download_archive')
3209 if fn is None:
3210 return
3211 vid_id = self._make_archive_id(info_dict)
3212 assert vid_id
3213 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
3214 archive_file.write(vid_id + '\n')
3215 self.archive.add(vid_id)
3216
3217 @staticmethod
3218 def format_resolution(format, default='unknown'):
3219 if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
3220 return 'audio only'
3221 if format.get('resolution') is not None:
3222 return format['resolution']
3223 if format.get('width') and format.get('height'):
3224 return '%dx%d' % (format['width'], format['height'])
3225 elif format.get('height'):
3226 return '%sp' % format['height']
3227 elif format.get('width'):
3228 return '%dx?' % format['width']
3229 return default
3230
3231 def _format_note(self, fdict):
3232 res = ''
3233 if fdict.get('ext') in ['f4f', 'f4m']:
3234 res += '(unsupported)'
3235 if fdict.get('language'):
3236 if res:
3237 res += ' '
3238 res += '[%s]' % fdict['language']
3239 if fdict.get('format_note') is not None:
3240 if res:
3241 res += ' '
3242 res += fdict['format_note']
3243 if fdict.get('tbr') is not None:
3244 if res:
3245 res += ', '
3246 res += '%4dk' % fdict['tbr']
3247 if fdict.get('container') is not None:
3248 if res:
3249 res += ', '
3250 res += '%s container' % fdict['container']
3251 if (fdict.get('vcodec') is not None
3252 and fdict.get('vcodec') != 'none'):
3253 if res:
3254 res += ', '
3255 res += fdict['vcodec']
3256 if fdict.get('vbr') is not None:
3257 res += '@'
3258 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3259 res += 'video@'
3260 if fdict.get('vbr') is not None:
3261 res += '%4dk' % fdict['vbr']
3262 if fdict.get('fps') is not None:
3263 if res:
3264 res += ', '
3265 res += '%sfps' % fdict['fps']
3266 if fdict.get('acodec') is not None:
3267 if res:
3268 res += ', '
3269 if fdict['acodec'] == 'none':
3270 res += 'video only'
3271 else:
3272 res += '%-5s' % fdict['acodec']
3273 elif fdict.get('abr') is not None:
3274 if res:
3275 res += ', '
3276 res += 'audio'
3277 if fdict.get('abr') is not None:
3278 res += '@%3dk' % fdict['abr']
3279 if fdict.get('asr') is not None:
3280 res += ' (%5dHz)' % fdict['asr']
3281 if fdict.get('filesize') is not None:
3282 if res:
3283 res += ', '
3284 res += format_bytes(fdict['filesize'])
3285 elif fdict.get('filesize_approx') is not None:
3286 if res:
3287 res += ', '
3288 res += '~' + format_bytes(fdict['filesize_approx'])
3289 return res
3290
3291 def _list_format_headers(self, *headers):
3292 if self.params.get('listformats_table', True) is not False:
3293 return [self._format_screen(header, self.Styles.HEADERS) for header in headers]
3294 return headers
3295
3296 def list_formats(self, info_dict):
3297 formats = info_dict.get('formats', [info_dict])
3298 new_format = self.params.get('listformats_table', True) is not False
3299 if new_format:
3300 delim = self._format_screen('\u2502', self.Styles.DELIM, '|', test_encoding=True)
3301 table = [
3302 [
3303 self._format_screen(format_field(f, 'format_id'), self.Styles.ID),
3304 format_field(f, 'ext'),
3305 format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
3306 format_field(f, 'fps', '\t%d'),
3307 format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
3308 delim,
3309 format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
3310 format_field(f, 'tbr', '\t%dk'),
3311 shorten_protocol_name(f.get('protocol', '').replace('native', 'n')),
3312 delim,
3313 format_field(f, 'vcodec', default='unknown').replace(
3314 'none',
3315 'images' if f.get('acodec') == 'none'
3316 else self._format_screen('audio only', self.Styles.SUPPRESS)),
3317 format_field(f, 'vbr', '\t%dk'),
3318 format_field(f, 'acodec', default='unknown').replace(
3319 'none',
3320 '' if f.get('vcodec') == 'none'
3321 else self._format_screen('video only', self.Styles.SUPPRESS)),
3322 format_field(f, 'abr', '\t%dk'),
3323 format_field(f, 'asr', '\t%dHz'),
3324 join_nonempty(
3325 self._format_screen('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
3326 format_field(f, 'language', '[%s]'),
3327 join_nonempty(
3328 format_field(f, 'format_note'),
3329 format_field(f, 'container', ignore=(None, f.get('ext'))),
3330 delim=', '),
3331 delim=' '),
3332 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
3333 header_line = self._list_format_headers(
3334 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO',
3335 delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3336 else:
3337 table = [
3338 [
3339 format_field(f, 'format_id'),
3340 format_field(f, 'ext'),
3341 self.format_resolution(f),
3342 self._format_note(f)]
3343 for f in formats
3344 if f.get('preference') is None or f['preference'] >= -1000]
3345 header_line = ['format code', 'extension', 'resolution', 'note']
3346
3347 self.to_screen(
3348 '[info] Available formats for %s:' % info_dict['id'])
3349 self.to_stdout(render_table(
3350 header_line, table,
3351 extra_gap=(0 if new_format else 1),
3352 hide_empty=new_format,
3353 delim=new_format and self._format_screen('\u2500', self.Styles.DELIM, '-', test_encoding=True)))
3354
3355 def list_thumbnails(self, info_dict):
3356 thumbnails = list(info_dict.get('thumbnails'))
3357 if not thumbnails:
3358 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
3359 return
3360
3361 self.to_screen(
3362 '[info] Thumbnails for %s:' % info_dict['id'])
3363 self.to_stdout(render_table(
3364 self._list_format_headers('ID', 'Width', 'Height', 'URL'),
3365 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
3366
3367 def list_subtitles(self, video_id, subtitles, name='subtitles'):
3368 if not subtitles:
3369 self.to_screen('%s has no %s' % (video_id, name))
3370 return
3371 self.to_screen(
3372 'Available %s for %s:' % (name, video_id))
3373
3374 def _row(lang, formats):
3375 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
3376 if len(set(names)) == 1:
3377 names = [] if names[0] == 'unknown' else names[:1]
3378 return [lang, ', '.join(names), ', '.join(exts)]
3379
3380 self.to_stdout(render_table(
3381 self._list_format_headers('Language', 'Name', 'Formats'),
3382 [_row(lang, formats) for lang, formats in subtitles.items()],
3383 hide_empty=True))
3384
3385 def urlopen(self, req):
3386 """ Start an HTTP download """
3387 if isinstance(req, compat_basestring):
3388 req = sanitized_Request(req)
3389 return self._opener.open(req, timeout=self._socket_timeout)
3390
3391 def print_debug_header(self):
3392 if not self.params.get('verbose'):
3393 return
3394
3395 def get_encoding(stream):
3396 ret = getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__)
3397 if not supports_terminal_sequences(stream):
3398 ret += ' (No ANSI)'
3399 return ret
3400
3401 encoding_str = 'Encodings: locale %s, fs %s, out %s, err %s, pref %s' % (
3402 locale.getpreferredencoding(),
3403 sys.getfilesystemencoding(),
3404 get_encoding(self._screen_file), get_encoding(self._err_file),
3405 self.get_encoding())
3406
3407 logger = self.params.get('logger')
3408 if logger:
3409 write_debug = lambda msg: logger.debug(f'[debug] {msg}')
3410 write_debug(encoding_str)
3411 else:
3412 write_string(f'[debug] {encoding_str}\n', encoding=None)
3413 write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
3414
3415 source = detect_variant()
3416 write_debug(join_nonempty(
3417 'yt-dlp version', __version__,
3418 f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
3419 '' if source == 'unknown' else f'({source})',
3420 delim=' '))
3421 if not _LAZY_LOADER:
3422 if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
3423 write_debug('Lazy loading extractors is forcibly disabled')
3424 else:
3425 write_debug('Lazy loading extractors is disabled')
3426 if plugin_extractors or plugin_postprocessors:
3427 write_debug('Plugins: %s' % [
3428 '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
3429 for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
3430 if self.params.get('compat_opts'):
3431 write_debug('Compatibility options: %s' % ', '.join(self.params.get('compat_opts')))
3432
3433 if source == 'source':
3434 try:
3435 sp = Popen(
3436 ['git', 'rev-parse', '--short', 'HEAD'],
3437 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3438 cwd=os.path.dirname(os.path.abspath(__file__)))
3439 out, err = sp.communicate_or_kill()
3440 out = out.decode().strip()
3441 if re.match('[0-9a-f]+', out):
3442 write_debug('Git HEAD: %s' % out)
3443 except Exception:
3444 try:
3445 sys.exc_clear()
3446 except Exception:
3447 pass
3448
3449 def python_implementation():
3450 impl_name = platform.python_implementation()
3451 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
3452 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
3453 return impl_name
3454
3455 write_debug('Python version %s (%s %s) - %s' % (
3456 platform.python_version(),
3457 python_implementation(),
3458 platform.architecture()[0],
3459 platform_name()))
3460
3461 exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
3462 ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
3463 if ffmpeg_features:
3464 exe_versions['ffmpeg'] += ' (%s)' % ','.join(ffmpeg_features)
3465
3466 exe_versions['rtmpdump'] = rtmpdump_version()
3467 exe_versions['phantomjs'] = PhantomJSwrapper._version()
3468 exe_str = ', '.join(
3469 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3470 ) or 'none'
3471 write_debug('exe versions: %s' % exe_str)
3472
3473 from .downloader.websocket import has_websockets
3474 from .postprocessor.embedthumbnail import has_mutagen
3475 from .cookies import SQLITE_AVAILABLE, KEYRING_AVAILABLE
3476
3477 lib_str = join_nonempty(
3478 compat_pycrypto_AES and compat_pycrypto_AES.__name__.split('.')[0],
3479 KEYRING_AVAILABLE and 'keyring',
3480 has_mutagen and 'mutagen',
3481 SQLITE_AVAILABLE and 'sqlite',
3482 has_websockets and 'websockets',
3483 delim=', ') or 'none'
3484 write_debug('Optional libraries: %s' % lib_str)
3485
3486 proxy_map = {}
3487 for handler in self._opener.handlers:
3488 if hasattr(handler, 'proxies'):
3489 proxy_map.update(handler.proxies)
3490 write_debug(f'Proxy map: {proxy_map}')
3491
3492 # Not implemented
3493 if False and self.params.get('call_home'):
3494 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
3495 write_debug('Public IP address: %s' % ipaddr)
3496 latest_version = self.urlopen(
3497 'https://yt-dl.org/latest/version').read().decode('utf-8')
3498 if version_tuple(latest_version) > version_tuple(__version__):
3499 self.report_warning(
3500 'You are using an outdated version (newest version: %s)! '
3501 'See https://yt-dl.org/update if you need help updating.' %
3502 latest_version)
3503
3504 def _setup_opener(self):
3505 timeout_val = self.params.get('socket_timeout')
3506 self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
3507
3508 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
3509 opts_cookiefile = self.params.get('cookiefile')
3510 opts_proxy = self.params.get('proxy')
3511
3512 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
3513
3514 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
3515 if opts_proxy is not None:
3516 if opts_proxy == '':
3517 proxies = {}
3518 else:
3519 proxies = {'http': opts_proxy, 'https': opts_proxy}
3520 else:
3521 proxies = compat_urllib_request.getproxies()
3522 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3523 if 'http' in proxies and 'https' not in proxies:
3524 proxies['https'] = proxies['http']
3525 proxy_handler = PerRequestProxyHandler(proxies)
3526
3527 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
3528 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3529 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
3530 redirect_handler = YoutubeDLRedirectHandler()
3531 data_handler = compat_urllib_request_DataHandler()
3532
3533 # When passing our own FileHandler instance, build_opener won't add the
3534 # default FileHandler and allows us to disable the file protocol, which
3535 # can be used for malicious purposes (see
3536 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3537 file_handler = compat_urllib_request.FileHandler()
3538
3539 def file_open(*args, **kwargs):
3540 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3541 file_handler.file_open = file_open
3542
3543 opener = compat_urllib_request.build_opener(
3544 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
3545
3546 # Delete the default user-agent header, which would otherwise apply in
3547 # cases where our custom HTTP handler doesn't come into play
3548 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3549 opener.addheaders = []
3550 self._opener = opener
3551
3552 def encode(self, s):
3553 if isinstance(s, bytes):
3554 return s # Already encoded
3555
3556 try:
3557 return s.encode(self.get_encoding())
3558 except UnicodeEncodeError as err:
3559 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3560 raise
3561
3562 def get_encoding(self):
3563 encoding = self.params.get('encoding')
3564 if encoding is None:
3565 encoding = preferredencoding()
3566 return encoding
3567
3568 def _write_info_json(self, label, ie_result, infofn, overwrite=None):
3569 ''' Write infojson and returns True = written, False = skip, None = error '''
3570 if overwrite is None:
3571 overwrite = self.params.get('overwrites', True)
3572 if not self.params.get('writeinfojson'):
3573 return False
3574 elif not infofn:
3575 self.write_debug(f'Skipping writing {label} infojson')
3576 return False
3577 elif not self._ensure_dir_exists(infofn):
3578 return None
3579 elif not overwrite and os.path.exists(infofn):
3580 self.to_screen(f'[info] {label.title()} metadata is already present')
3581 else:
3582 self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
3583 try:
3584 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
3585 except (OSError, IOError):
3586 self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
3587 return None
3588 return True
3589
3590 def _write_description(self, label, ie_result, descfn):
3591 ''' Write description and returns True = written, False = skip, None = error '''
3592 if not self.params.get('writedescription'):
3593 return False
3594 elif not descfn:
3595 self.write_debug(f'Skipping writing {label} description')
3596 return False
3597 elif not self._ensure_dir_exists(descfn):
3598 return None
3599 elif not self.params.get('overwrites', True) and os.path.exists(descfn):
3600 self.to_screen(f'[info] {label.title()} description is already present')
3601 elif ie_result.get('description') is None:
3602 self.report_warning(f'There\'s no {label} description to write')
3603 return False
3604 else:
3605 try:
3606 self.to_screen(f'[info] Writing {label} description to: {descfn}')
3607 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
3608 descfile.write(ie_result['description'])
3609 except (OSError, IOError):
3610 self.report_error(f'Cannot write {label} description file {descfn}')
3611 return None
3612 return True
3613
3614 def _write_subtitles(self, info_dict, filename):
3615 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3616 ret = []
3617 subtitles = info_dict.get('requested_subtitles')
3618 if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
3619 # subtitles download errors are already managed as troubles in relevant IE
3620 # that way it will silently go on when used with unsupporting IE
3621 return ret
3622
3623 sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
3624 if not sub_filename_base:
3625 self.to_screen('[info] Skipping writing video subtitles')
3626 return ret
3627 for sub_lang, sub_info in subtitles.items():
3628 sub_format = sub_info['ext']
3629 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
3630 sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
3631 if not self.params.get('overwrites', True) and os.path.exists(sub_filename):
3632 self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3633 sub_info['filepath'] = sub_filename
3634 ret.append((sub_filename, sub_filename_final))
3635 continue
3636
3637 self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
3638 if sub_info.get('data') is not None:
3639 try:
3640 # Use newline='' to prevent conversion of newline characters
3641 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3642 with io.open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
3643 subfile.write(sub_info['data'])
3644 sub_info['filepath'] = sub_filename
3645 ret.append((sub_filename, sub_filename_final))
3646 continue
3647 except (OSError, IOError):
3648 self.report_error(f'Cannot write video subtitles file {sub_filename}')
3649 return None
3650
3651 try:
3652 sub_copy = sub_info.copy()
3653 sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
3654 self.dl(sub_filename, sub_copy, subtitle=True)
3655 sub_info['filepath'] = sub_filename
3656 ret.append((sub_filename, sub_filename_final))
3657 except (ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
3658 self.report_warning(f'Unable to download video subtitles for {sub_lang!r}: {err}')
3659 continue
3660 return ret
3661
3662 def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
3663 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3664 write_all = self.params.get('write_all_thumbnails', False)
3665 thumbnails, ret = [], []
3666 if write_all or self.params.get('writethumbnail', False):
3667 thumbnails = info_dict.get('thumbnails') or []
3668 multiple = write_all and len(thumbnails) > 1
3669
3670 if thumb_filename_base is None:
3671 thumb_filename_base = filename
3672 if thumbnails and not thumb_filename_base:
3673 self.write_debug(f'Skipping writing {label} thumbnail')
3674 return ret
3675
3676 for t in thumbnails[::-1]:
3677 thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
3678 thumb_display_id = f'{label} thumbnail {t["id"]}'
3679 thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
3680 thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
3681
3682 if not self.params.get('overwrites', True) and os.path.exists(thumb_filename):
3683 ret.append((thumb_filename, thumb_filename_final))
3684 t['filepath'] = thumb_filename
3685 self.to_screen('[info] %s is already present' % (
3686 thumb_display_id if multiple else f'{label} thumbnail').capitalize())
3687 else:
3688 self.to_screen(f'[info] Downloading {thumb_display_id} ...')
3689 try:
3690 uf = self.urlopen(t['url'])
3691 self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
3692 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
3693 shutil.copyfileobj(uf, thumbf)
3694 ret.append((thumb_filename, thumb_filename_final))
3695 t['filepath'] = thumb_filename
3696 except network_exceptions as err:
3697 self.report_warning(f'Unable to download {thumb_display_id}: {err}')
3698 if ret and not write_all:
3699 break
3700 return ret