]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
[extractor] Better error message for DRM (#729)
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
cc52de43 1#!/usr/bin/env python3
dcdb292f 2# coding: utf-8
8222d8de 3
6febd1c1 4from __future__ import absolute_import, unicode_literals
8222d8de 5
26e63931 6import collections
31bd3925 7import contextlib
317f7ab6 8import copy
9d2ecdbc 9import datetime
c1c9a79c 10import errno
31bd3925 11import fileinput
8222d8de 12import io
b82f815f 13import itertools
8694c600 14import json
62fec3b2 15import locale
083c9df9 16import operator
8222d8de 17import os
dca08720 18import platform
8222d8de
JMF
19import re
20import shutil
dca08720 21import subprocess
8222d8de 22import sys
21cd8fae 23import tempfile
8222d8de 24import time
67134eab 25import tokenize
8222d8de 26import traceback
75822ca7 27import random
8222d8de 28
961ea474 29from string import ascii_letters
e5813e53 30from zipimport import zipimporter
961ea474 31
8c25f81b 32from .compat import (
82d8a8b6 33 compat_basestring,
003c69a8 34 compat_get_terminal_size,
4f026faf 35 compat_kwargs,
d0d9ade4 36 compat_numeric_types,
e9c0cdd3 37 compat_os_name,
7d1eb38a 38 compat_shlex_quote,
ce02ed60 39 compat_str,
67134eab 40 compat_tokenize_tokenize,
ce02ed60
PH
41 compat_urllib_error,
42 compat_urllib_request,
8b172c2e 43 compat_urllib_request_DataHandler,
8c25f81b 44)
982ee69a 45from .cookies import load_cookies
8c25f81b 46from .utils import (
eedb7ba5
S
47 age_restricted,
48 args_to_str,
ce02ed60
PH
49 ContentTooShortError,
50 date_from_str,
51 DateRange,
acd69589 52 DEFAULT_OUTTMPL,
ce02ed60 53 determine_ext,
b5559424 54 determine_protocol,
732044af 55 DOT_DESKTOP_LINK_TEMPLATE,
56 DOT_URL_LINK_TEMPLATE,
57 DOT_WEBLOC_LINK_TEMPLATE,
ce02ed60 58 DownloadError,
c0384f22 59 encode_compat_str,
ce02ed60 60 encodeFilename,
498f5606 61 EntryNotInPlaylist,
a06916d9 62 error_to_compat_str,
8b0d7497 63 ExistingVideoReached,
590bc6f6 64 expand_path,
ce02ed60 65 ExtractorError,
e29663c6 66 float_or_none,
02dbf93f 67 format_bytes,
76d321f6 68 format_field,
901130bb 69 STR_FORMAT_RE_TMPL,
70 STR_FORMAT_TYPES,
525ef922 71 formatSeconds,
773f291d 72 GeoRestrictedError,
b0249bca 73 HEADRequest,
c9969434 74 int_or_none,
732044af 75 iri_to_uri,
773f291d 76 ISO3166Utils,
56a8fb4f 77 LazyList,
ce02ed60 78 locked_file,
0202b52a 79 make_dir,
dca08720 80 make_HTTPS_handler,
ce02ed60 81 MaxDownloadsReached,
3158150c 82 network_exceptions,
cd6fc19e 83 orderedSet,
a06916d9 84 OUTTMPL_TYPES,
b7ab0590 85 PagedList,
083c9df9 86 parse_filesize,
91410c9b 87 PerRequestProxyHandler,
dca08720 88 platform_name,
eedb7ba5 89 PostProcessingError,
ce02ed60 90 preferredencoding,
eedb7ba5 91 prepend_extension,
a06916d9 92 process_communicate_or_kill,
51fb4995 93 register_socks_protocols,
a06916d9 94 RejectedVideoReached,
cfb56d1a 95 render_table,
eedb7ba5 96 replace_extension,
ce02ed60
PH
97 SameFileError,
98 sanitize_filename,
1bb5c511 99 sanitize_path,
dcf77cf1 100 sanitize_url,
67dda517 101 sanitized_Request,
e5660ee6 102 std_headers,
1211bb6d 103 str_or_none,
e29663c6 104 strftime_or_none,
ce02ed60 105 subtitles_filename,
51d9739f 106 ThrottledDownload,
732044af 107 to_high_limit_path,
324ad820 108 traverse_obj,
6033d980 109 try_get,
ce02ed60 110 UnavailableVideoError,
29eb5174 111 url_basename,
7d1eb38a 112 variadic,
58b1f00d 113 version_tuple,
ce02ed60
PH
114 write_json_file,
115 write_string,
6a3f4c3f 116 YoutubeDLCookieProcessor,
dca08720 117 YoutubeDLHandler,
fca6dba8 118 YoutubeDLRedirectHandler,
ce02ed60 119)
a0e07d31 120from .cache import Cache
52a8a1e1 121from .extractor import (
122 gen_extractor_classes,
123 get_info_extractor,
124 _LAZY_LOADER,
125 _PLUGIN_CLASSES
126)
4c54b89e 127from .extractor.openload import PhantomJSwrapper
52a8a1e1 128from .downloader import (
dbf5416a 129 FFmpegFD,
52a8a1e1 130 get_suitable_downloader,
131 shorten_protocol_name
132)
4c83c967 133from .downloader.rtmp import rtmpdump_version
4f026faf 134from .postprocessor import (
e36d50c5 135 get_postprocessor,
136 FFmpegFixupDurationPP,
f17f8651 137 FFmpegFixupM3u8PP,
62cd676c 138 FFmpegFixupM4aPP,
6271f1ca 139 FFmpegFixupStretchedPP,
e36d50c5 140 FFmpegFixupTimestampPP,
4f026faf
PH
141 FFmpegMergerPP,
142 FFmpegPostProcessor,
0202b52a 143 MoveFilesAfterDownloadPP,
4f026faf 144)
dca08720 145from .version import __version__
8222d8de 146
e9c0cdd3
YCH
147if compat_os_name == 'nt':
148 import ctypes
149
2459b6e1 150
8222d8de
JMF
151class YoutubeDL(object):
152 """YoutubeDL class.
153
154 YoutubeDL objects are the ones responsible of downloading the
155 actual video file and writing it to disk if the user has requested
156 it, among some other tasks. In most cases there should be one per
157 program. As, given a video URL, the downloader doesn't know how to
158 extract all the needed information, task that InfoExtractors do, it
159 has to pass the URL to one of them.
160
161 For this, YoutubeDL objects have a method that allows
162 InfoExtractors to be registered in a given order. When it is passed
163 a URL, the YoutubeDL object handles it to the first InfoExtractor it
164 finds that reports being able to handle it. The InfoExtractor extracts
165 all the information about the video or videos the URL refers to, and
166 YoutubeDL process the extracted information, possibly using a File
167 Downloader to download the video.
168
169 YoutubeDL objects accept a lot of parameters. In order not to saturate
170 the object constructor with arguments, it receives a dictionary of
171 options instead. These options are available through the params
172 attribute for the InfoExtractors to use. The YoutubeDL also
173 registers itself as the downloader in charge for the InfoExtractors
174 that are added to it, so this is a "mutual registration".
175
176 Available options:
177
178 username: Username for authentication purposes.
179 password: Password for authentication purposes.
180940e0 180 videopassword: Password for accessing a video.
1da50aa3
S
181 ap_mso: Adobe Pass multiple-system operator identifier.
182 ap_username: Multiple-system operator account username.
183 ap_password: Multiple-system operator account password.
8222d8de
JMF
184 usenetrc: Use netrc for authentication instead.
185 verbose: Print additional info to stdout.
186 quiet: Do not print messages to stdout.
ad8915b7 187 no_warnings: Do not print out anything for warnings.
53c18592 188 forceprint: A list of templates to force print
189 forceurl: Force printing final URL. (Deprecated)
190 forcetitle: Force printing title. (Deprecated)
191 forceid: Force printing ID. (Deprecated)
192 forcethumbnail: Force printing thumbnail URL. (Deprecated)
193 forcedescription: Force printing description. (Deprecated)
194 forcefilename: Force printing final filename. (Deprecated)
195 forceduration: Force printing duration. (Deprecated)
8694c600 196 forcejson: Force printing info_dict as JSON.
63e0be34
PH
197 dump_single_json: Force printing the info_dict of the whole playlist
198 (or video) as a single JSON line.
c25228e5 199 force_write_download_archive: Force writing download archive regardless
200 of 'skip_download' or 'simulate'.
b7b04c78 201 simulate: Do not download the video files. If unset (or None),
202 simulate only if listsubtitles, listformats or list_thumbnails is used
eb8a4433 203 format: Video format code. see "FORMAT SELECTION" for more details.
63ad4d43 204 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
b7da73eb 205 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
206 extracting metadata even if the video is not actually
207 available for download (experimental)
c25228e5 208 format_sort: How to sort the video formats. see "Sorting Formats"
209 for more details.
210 format_sort_force: Force the given format_sort. see "Sorting Formats"
211 for more details.
212 allow_multiple_video_streams: Allow multiple video streams to be merged
213 into a single file
214 allow_multiple_audio_streams: Allow multiple audio streams to be merged
215 into a single file
0ba692ac 216 check_formats Whether to test if the formats are downloadable.
217 Can be True (check all), False (check none)
218 or None (check only if requested by extractor)
4524baf0 219 paths: Dictionary of output paths. The allowed keys are 'home'
220 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 221 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 222 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
34488702 223 For compatibility with youtube-dl, a single string can also be used
a820dc72
RA
224 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
225 restrictfilenames: Do not allow "&" and spaces in file names
226 trim_file_name: Limit length of filename (extension excluded)
4524baf0 227 windowsfilenames: Force the filenames to be windows compatible
a820dc72 228 ignoreerrors: Do not stop on download errors
7a5c1cfe 229 (Default True when running yt-dlp,
a820dc72 230 but False when directly accessing YoutubeDL class)
26e2805c 231 skip_playlist_after_errors: Number of allowed failures until the rest of
232 the playlist is skipped
d22dec74 233 force_generic_extractor: Force downloader to use the generic extractor
0c3d0f51 234 overwrites: Overwrite all video and metadata files if True,
235 overwrite only non-video files if None
236 and don't overwrite any file if False
34488702 237 For compatibility with youtube-dl,
238 "nooverwrites" may also be used instead
8222d8de
JMF
239 playliststart: Playlist item to start at.
240 playlistend: Playlist item to end at.
c14e88f0 241 playlist_items: Specific indices of playlist to download.
ff815fe6 242 playlistreverse: Download playlist items in reverse order.
75822ca7 243 playlistrandom: Download playlist items in random order.
8222d8de
JMF
244 matchtitle: Download only matching titles.
245 rejecttitle: Reject downloads for matching titles.
8bf9319e 246 logger: Log messages to a logging.Logger instance.
8222d8de
JMF
247 logtostderr: Log messages to stderr instead of stdout.
248 writedescription: Write the video description to a .description file
249 writeinfojson: Write the video description to a .info.json file
75d43ca0 250 clean_infojson: Remove private fields from the infojson
34488702 251 getcomments: Extract video comments. This will not be written to disk
06167fbb 252 unless writeinfojson is also given
1fb07d10 253 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 254 writethumbnail: Write the thumbnail image to a file
c25228e5 255 allow_playlist_files: Whether to write playlists' description, infojson etc
256 also to disk when using the 'write*' options
ec82d85a 257 write_all_thumbnails: Write all thumbnail formats to files
732044af 258 writelink: Write an internet shortcut file, depending on the
259 current platform (.url/.webloc/.desktop)
260 writeurllink: Write a Windows internet shortcut file (.url)
261 writewebloclink: Write a macOS internet shortcut file (.webloc)
262 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 263 writesubtitles: Write the video subtitles to a file
741dd8ea 264 writeautomaticsub: Write the automatically generated subtitles to a file
245524e6 265 allsubtitles: Deprecated - Use subtitleslangs = ['all']
c32b0aab 266 Downloads all the subtitles of the video
0b7f3118 267 (requires writesubtitles or writeautomaticsub)
8222d8de 268 listsubtitles: Lists all available subtitles for the video
a504ced0 269 subtitlesformat: The format code for subtitles
c32b0aab 270 subtitleslangs: List of languages of the subtitles to download (can be regex).
271 The list may contain "all" to refer to all the available
272 subtitles. The language can be prefixed with a "-" to
273 exclude it from the requested languages. Eg: ['all', '-live_chat']
8222d8de
JMF
274 keepvideo: Keep the video file after post-processing
275 daterange: A DateRange object, download only if the upload_date is in the range.
276 skip_download: Skip the actual download of the video file
c35f9e72 277 cachedir: Location of the cache files in the filesystem.
a0e07d31 278 False to disable filesystem cache.
47192f92 279 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
280 age_limit: An integer representing the user's age in years.
281 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
282 min_views: An integer representing the minimum view count the video
283 must have in order to not be skipped.
284 Videos without view count information are always
285 downloaded. None for no limit.
286 max_views: An integer representing the maximum view count.
287 Videos that are more popular than that are not
288 downloaded.
289 Videos without view count information are always
290 downloaded. None for no limit.
291 download_archive: File name of a file where all downloads are recorded.
c1c9a79c
PH
292 Videos already present in the file are not downloaded
293 again.
8a51f564 294 break_on_existing: Stop the download process after attempting to download a
295 file that is in the archive.
296 break_on_reject: Stop the download process when encountering a video that
297 has been filtered out.
298 cookiefile: File name where cookies should be read from and dumped to
982ee69a
MB
299 cookiesfrombrowser: A tuple containing the name of the browser and the profile
300 name/path from where cookies are loaded.
301 Eg: ('chrome', ) or (vivaldi, 'default')
a1ee09e8 302 nocheckcertificate:Do not verify SSL certificates
7e8c0af0
PH
303 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
304 At the moment, this is only supported by YouTube.
a1ee09e8 305 proxy: URL of the proxy server to use
38cce791 306 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 307 on geo-restricted sites.
e344693b 308 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
309 bidi_workaround: Work around buggy terminals without bidirectional text
310 support, using fridibi
a0ddb8a2 311 debug_printtraffic:Print out sent and received HTTP traffic
7b0817e8 312 include_ads: Download ads as well
04b4d394
PH
313 default_search: Prepend this string if an input url is not valid.
314 'auto' for elaborate guessing
62fec3b2 315 encoding: Use this encoding instead of the system-specified.
e8ee972c 316 extract_flat: Do not resolve URLs, return the immediate result.
057a5206
PH
317 Pass in 'in_playlist' to only show this behavior for
318 playlist items.
4f026faf 319 postprocessors: A list of dictionaries, each with an entry
71b640cc 320 * key: The name of the postprocessor. See
7a5c1cfe 321 yt_dlp/postprocessor/__init__.py for a list.
56d868db 322 * when: When to run the postprocessor. Can be one of
323 pre_process|before_dl|post_process|after_move.
324 Assumed to be 'post_process' if not given
ab8e5e51
AM
325 post_hooks: A list of functions that get called as the final step
326 for each video file, after all postprocessors have been
327 called. The filename will be passed as the only argument.
71b640cc
PH
328 progress_hooks: A list of functions that get called on download
329 progress, with a dictionary with the entries
5cda4eda 330 * status: One of "downloading", "error", or "finished".
ee69b99a 331 Check this first and ignore unknown values.
3ba7740d 332 * info_dict: The extracted info_dict
71b640cc 333
5cda4eda 334 If status is one of "downloading", or "finished", the
ee69b99a
PH
335 following properties may also be present:
336 * filename: The final filename (always present)
5cda4eda 337 * tmpfilename: The filename we're currently writing to
71b640cc
PH
338 * downloaded_bytes: Bytes on disk
339 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
340 * total_bytes_estimate: Guess of the eventual file size,
341 None if unavailable.
342 * elapsed: The number of seconds since download started.
71b640cc
PH
343 * eta: The estimated time in seconds, None if unknown
344 * speed: The download speed in bytes/second, None if
345 unknown
5cda4eda
PH
346 * fragment_index: The counter of the currently
347 downloaded video fragment.
348 * fragment_count: The number of fragments (= individual
349 files that will be merged)
71b640cc
PH
350
351 Progress hooks are guaranteed to be called at least once
352 (with status "finished") if the download is successful.
45598f15 353 merge_output_format: Extension to use when merging formats.
6b591b29 354 final_ext: Expected final extension; used to detect when the file was
355 already downloaded and converted. "merge_output_format" is
356 replaced by this extension when given
6271f1ca
PH
357 fixup: Automatically correct known faults of the file.
358 One of:
359 - "never": do nothing
360 - "warn": only emit a warning
361 - "detect_or_warn": check whether we can do anything
62cd676c 362 about it, warn otherwise (default)
504f20dd 363 source_address: Client-side IP address to bind to.
6ec6cb4e 364 call_home: Boolean, true iff we are allowed to contact the
7a5c1cfe 365 yt-dlp servers for debugging. (BROKEN)
1cf376f5 366 sleep_interval_requests: Number of seconds to sleep between requests
367 during extraction
7aa589a5
S
368 sleep_interval: Number of seconds to sleep before each download when
369 used alone or a lower bound of a range for randomized
370 sleep before each download (minimum possible number
371 of seconds to sleep) when used along with
372 max_sleep_interval.
373 max_sleep_interval:Upper bound of a range for randomized sleep before each
374 download (maximum possible number of seconds to sleep).
375 Must only be used along with sleep_interval.
376 Actual sleep time will be a random float from range
377 [sleep_interval; max_sleep_interval].
1cf376f5 378 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
379 listformats: Print an overview of available video formats and exit.
380 list_thumbnails: Print a table of all thumbnails and exit.
347de493
PH
381 match_filter: A function that gets called with the info_dict of
382 every video.
383 If it returns a message, the video is ignored.
384 If it returns None, the video is downloaded.
385 match_filter_func in utils.py is one example for this.
7e5db8c9 386 no_color: Do not emit color codes in output.
0a840f58 387 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 388 HTTP header
0a840f58 389 geo_bypass_country:
773f291d
S
390 Two-letter ISO 3166-2 country code that will be used for
391 explicit geographic restriction bypassing via faking
504f20dd 392 X-Forwarded-For HTTP header
5f95927a
S
393 geo_bypass_ip_block:
394 IP range in CIDR notation that will be used similarly to
504f20dd 395 geo_bypass_country
71b640cc 396
85729c51 397 The following options determine which downloader is picked:
52a8a1e1 398 external_downloader: A dictionary of protocol keys and the executable of the
399 external downloader to use for it. The allowed protocols
400 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
401 Set the value to 'native' to use the native downloader
402 hls_prefer_native: Deprecated - Use external_downloader = {'m3u8': 'native'}
403 or {'m3u8': 'ffmpeg'} instead.
404 Use the native HLS downloader instead of ffmpeg/avconv
bf09af3a
S
405 if True, otherwise use ffmpeg/avconv if False, otherwise
406 use downloader suggested by extractor if None.
53ed7066 407 compat_opts: Compatibility options. See "Differences in default behavior".
3acf6d38 408 The following options do not work when used through the API:
409 filename, abort-on-error, multistreams, no-live-chat,
b51d2ae3 410 no-clean-infojson, no-playlist-metafiles, no-keep-subs.
e4f02757 411 Refer __init__.py for their implementation
fe7e0c98 412
8222d8de 413 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 414 the downloader (see yt_dlp/downloader/common.py):
51d9739f 415 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
416 max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle,
417 xattr_set_filesize, external_downloader_args, hls_use_mpegts, http_chunk_size.
76b1bd67
JMF
418
419 The following options are used by the post processors:
d4a24f40 420 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
e4172ac9 421 otherwise prefer ffmpeg. (avconv support is deprecated)
c0b7d117
S
422 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
423 to the binary or its containing directory.
43820c03 424 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
34488702 425 and a list of additional command-line arguments for the
426 postprocessor/executable. The dict can also have "PP+EXE" keys
427 which are used when the given exe is used by the given PP.
428 Use 'default' as the name for arguments to passed to all PP
429 For compatibility with youtube-dl, a single list of args
430 can also be used
e409895f 431
432 The following options are used by the extractors:
62bff2c1 433 extractor_retries: Number of times to retry for known errors
434 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 435 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 436 discontinuities such as ad breaks (default: False)
5d3a0e79 437 extractor_args: A dictionary of arguments to be passed to the extractors.
438 See "EXTRACTOR ARGUMENTS" for details.
439 Eg: {'youtube': {'skip': ['dash', 'hls']}}
440 youtube_include_dash_manifest: Deprecated - Use extractor_args instead.
441 If True (default), DASH manifests and related
62bff2c1 442 data will be downloaded and processed by extractor.
443 You can reduce network I/O by disabling it if you don't
444 care about DASH. (only for youtube)
5d3a0e79 445 youtube_include_hls_manifest: Deprecated - Use extractor_args instead.
446 If True (default), HLS manifests and related
62bff2c1 447 data will be downloaded and processed by extractor.
448 You can reduce network I/O by disabling it if you don't
449 care about HLS. (only for youtube)
8222d8de
JMF
450 """
451
c9969434
S
452 _NUMERIC_FIELDS = set((
453 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
454 'timestamp', 'upload_year', 'upload_month', 'upload_day',
455 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
456 'average_rating', 'comment_count', 'age_limit',
457 'start_time', 'end_time',
458 'chapter_number', 'season_number', 'episode_number',
459 'track_number', 'disc_number', 'release_year',
460 'playlist_index',
461 ))
462
8222d8de
JMF
463 params = None
464 _ies = []
56d868db 465 _pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
b35496d8 466 _printed_messages = set()
1cf376f5 467 _first_webpage_request = True
8222d8de
JMF
468 _download_retcode = None
469 _num_downloads = None
30a074c2 470 _playlist_level = 0
471 _playlist_urls = set()
8222d8de
JMF
472 _screen_file = None
473
3511266b 474 def __init__(self, params=None, auto_init=True):
8222d8de 475 """Create a FileDownloader object with the given options."""
e9f9a10f
JMF
476 if params is None:
477 params = {}
8222d8de 478 self._ies = []
56c73665 479 self._ies_instances = {}
56d868db 480 self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
b35496d8 481 self._printed_messages = set()
1cf376f5 482 self._first_webpage_request = True
ab8e5e51 483 self._post_hooks = []
933605d7 484 self._progress_hooks = []
8222d8de
JMF
485 self._download_retcode = 0
486 self._num_downloads = 0
487 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
0783b09b 488 self._err_file = sys.stderr
4abf617b
S
489 self.params = {
490 # Default parameters
491 'nocheckcertificate': False,
492 }
493 self.params.update(params)
a0e07d31 494 self.cache = Cache(self)
34308b30 495
a61f4b28 496 if sys.version_info < (3, 6):
497 self.report_warning(
0181adef 498 'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys.version_info[:2])
a61f4b28 499
88acdbc2 500 if self.params.get('allow_unplayable_formats'):
501 self.report_warning(
502 'You have asked for unplayable formats to be listed/downloaded. '
503 'This is a developer option intended for debugging. '
504 'If you experience any issues while using this option, DO NOT open a bug report')
505
be5df5ee
S
506 def check_deprecated(param, option, suggestion):
507 if self.params.get(param) is not None:
53ed7066 508 self.report_warning('%s is deprecated. Use %s instead' % (option, suggestion))
be5df5ee
S
509 return True
510 return False
511
512 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
513 if self.params.get('geo_verification_proxy') is None:
514 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
515
0d1bb027 516 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
517 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
53ed7066 518 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
0d1bb027 519
520 for msg in self.params.get('warnings', []):
521 self.report_warning(msg)
522
b868936c 523 if self.params.get('overwrites') is None:
524 self.params.pop('overwrites', None)
525 elif self.params.get('nooverwrites') is not None:
526 # nooverwrites was unnecessarily changed to overwrites
527 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
528 # This ensures compatibility with both keys
529 self.params['overwrites'] = not self.params['nooverwrites']
530 else:
531 self.params['nooverwrites'] = not self.params['overwrites']
b9d973be 532
0783b09b 533 if params.get('bidi_workaround', False):
1c088fa8
PH
534 try:
535 import pty
536 master, slave = pty.openpty()
003c69a8 537 width = compat_get_terminal_size().columns
1c088fa8
PH
538 if width is None:
539 width_args = []
540 else:
541 width_args = ['-w', str(width)]
5d681e96 542 sp_kwargs = dict(
1c088fa8
PH
543 stdin=subprocess.PIPE,
544 stdout=slave,
545 stderr=self._err_file)
5d681e96
PH
546 try:
547 self._output_process = subprocess.Popen(
548 ['bidiv'] + width_args, **sp_kwargs
549 )
550 except OSError:
5d681e96
PH
551 self._output_process = subprocess.Popen(
552 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
553 self._output_channel = os.fdopen(master, 'rb')
1c088fa8 554 except OSError as ose:
66e7ace1 555 if ose.errno == errno.ENOENT:
6febd1c1 556 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
1c088fa8
PH
557 else:
558 raise
0783b09b 559
3089bc74
S
560 if (sys.platform != 'win32'
561 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
562 and not params.get('restrictfilenames', False)):
e9137224 563 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 564 self.report_warning(
6febd1c1 565 'Assuming --restrict-filenames since file system encoding '
1b725173 566 'cannot encode all characters. '
6febd1c1 567 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 568 self.params['restrictfilenames'] = True
34308b30 569
de6000d9 570 self.outtmpl_dict = self.parse_outtmpl()
486dd09e 571
187986a8 572 # Creating format selector here allows us to catch syntax errors before the extraction
573 self.format_selector = (
574 None if self.params.get('format') is None
575 else self.build_format_selector(self.params['format']))
576
dca08720
PH
577 self._setup_opener()
578
4cd0a709 579 """Preload the archive, if any is specified"""
580 def preload_download_archive(fn):
581 if fn is None:
582 return False
0760b0a7 583 self.write_debug('Loading archive file %r\n' % fn)
4cd0a709 584 try:
585 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
586 for line in archive_file:
587 self.archive.add(line.strip())
588 except IOError as ioe:
589 if ioe.errno != errno.ENOENT:
590 raise
591 return False
592 return True
593
594 self.archive = set()
595 preload_download_archive(self.params.get('download_archive'))
596
3511266b
PH
597 if auto_init:
598 self.print_debug_header()
599 self.add_default_info_extractors()
600
4f026faf 601 for pp_def_raw in self.params.get('postprocessors', []):
4f026faf 602 pp_def = dict(pp_def_raw)
fd7cfb64 603 when = pp_def.pop('when', 'post_process')
604 pp_class = get_postprocessor(pp_def.pop('key'))
4f026faf 605 pp = pp_class(self, **compat_kwargs(pp_def))
5bfa4862 606 self.add_post_processor(pp, when=when)
4f026faf 607
ab8e5e51
AM
608 for ph in self.params.get('post_hooks', []):
609 self.add_post_hook(ph)
610
71b640cc
PH
611 for ph in self.params.get('progress_hooks', []):
612 self.add_progress_hook(ph)
613
51fb4995
YCH
614 register_socks_protocols()
615
7d4111ed
PH
616 def warn_if_short_id(self, argv):
617 # short YouTube ID starting with dash?
618 idxs = [
619 i for i, a in enumerate(argv)
620 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
621 if idxs:
622 correct_argv = (
7a5c1cfe 623 ['yt-dlp']
3089bc74
S
624 + [a for i, a in enumerate(argv) if i not in idxs]
625 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
626 )
627 self.report_warning(
628 'Long argument string detected. '
629 'Use -- to separate parameters and URLs, like this:\n%s\n' %
630 args_to_str(correct_argv))
631
8222d8de
JMF
632 def add_info_extractor(self, ie):
633 """Add an InfoExtractor object to the end of the list."""
634 self._ies.append(ie)
e52d7f85
JMF
635 if not isinstance(ie, type):
636 self._ies_instances[ie.ie_key()] = ie
637 ie.set_downloader(self)
8222d8de 638
56c73665
JMF
639 def get_info_extractor(self, ie_key):
640 """
641 Get an instance of an IE with name ie_key, it will try to get one from
642 the _ies list, if there's no instance it will create a new one and add
643 it to the extractor list.
644 """
645 ie = self._ies_instances.get(ie_key)
646 if ie is None:
647 ie = get_info_extractor(ie_key)()
648 self.add_info_extractor(ie)
649 return ie
650
023fa8c4
JMF
651 def add_default_info_extractors(self):
652 """
653 Add the InfoExtractors returned by gen_extractors to the end of the list
654 """
e52d7f85 655 for ie in gen_extractor_classes():
023fa8c4
JMF
656 self.add_info_extractor(ie)
657
56d868db 658 def add_post_processor(self, pp, when='post_process'):
8222d8de 659 """Add a PostProcessor object to the end of the chain."""
5bfa4862 660 self._pps[when].append(pp)
8222d8de
JMF
661 pp.set_downloader(self)
662
ab8e5e51
AM
663 def add_post_hook(self, ph):
664 """Add the post hook"""
665 self._post_hooks.append(ph)
666
933605d7
JMF
667 def add_progress_hook(self, ph):
668 """Add the progress hook (currently only for the file downloader)"""
669 self._progress_hooks.append(ph)
8ab470f1 670
1c088fa8 671 def _bidi_workaround(self, message):
5d681e96 672 if not hasattr(self, '_output_channel'):
1c088fa8
PH
673 return message
674
5d681e96 675 assert hasattr(self, '_output_process')
11b85ce6 676 assert isinstance(message, compat_str)
6febd1c1
PH
677 line_count = message.count('\n') + 1
678 self._output_process.stdin.write((message + '\n').encode('utf-8'))
5d681e96 679 self._output_process.stdin.flush()
6febd1c1 680 res = ''.join(self._output_channel.readline().decode('utf-8')
9e1a5b84 681 for _ in range(line_count))
6febd1c1 682 return res[:-len('\n')]
1c088fa8 683
b35496d8 684 def _write_string(self, message, out=None, only_once=False):
685 if only_once:
686 if message in self._printed_messages:
687 return
688 self._printed_messages.add(message)
689 write_string(message, out=out, encoding=self.params.get('encoding'))
734f90bb 690
848887eb 691 def to_stdout(self, message, skip_eol=False, quiet=False):
0760b0a7 692 """Print message to stdout"""
8bf9319e 693 if self.params.get('logger'):
43afe285 694 self.params['logger'].debug(message)
835a1478 695 elif not quiet or self.params.get('verbose'):
696 self._write_string(
697 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
698 self._err_file if quiet else self._screen_file)
8222d8de 699
b35496d8 700 def to_stderr(self, message, only_once=False):
0760b0a7 701 """Print message to stderr"""
11b85ce6 702 assert isinstance(message, compat_str)
8bf9319e 703 if self.params.get('logger'):
43afe285
IB
704 self.params['logger'].error(message)
705 else:
b35496d8 706 self._write_string('%s\n' % self._bidi_workaround(message), self._err_file, only_once=only_once)
8222d8de 707
1e5b9a95
PH
708 def to_console_title(self, message):
709 if not self.params.get('consoletitle', False):
710 return
4bede0d8
C
711 if compat_os_name == 'nt':
712 if ctypes.windll.kernel32.GetConsoleWindow():
713 # c_wchar_p() might not be necessary if `message` is
714 # already of type unicode()
715 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
1e5b9a95 716 elif 'TERM' in os.environ:
b46696bd 717 self._write_string('\033]0;%s\007' % message, self._screen_file)
1e5b9a95 718
bdde425c
PH
719 def save_console_title(self):
720 if not self.params.get('consoletitle', False):
721 return
b7b04c78 722 if self.params.get('simulate'):
94c3442e 723 return
4bede0d8 724 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 725 # Save the title on stack
734f90bb 726 self._write_string('\033[22;0t', self._screen_file)
bdde425c
PH
727
728 def restore_console_title(self):
729 if not self.params.get('consoletitle', False):
730 return
b7b04c78 731 if self.params.get('simulate'):
94c3442e 732 return
4bede0d8 733 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 734 # Restore the title from stack
734f90bb 735 self._write_string('\033[23;0t', self._screen_file)
bdde425c
PH
736
737 def __enter__(self):
738 self.save_console_title()
739 return self
740
741 def __exit__(self, *args):
742 self.restore_console_title()
f89197d7 743
dca08720 744 if self.params.get('cookiefile') is not None:
1bab3437 745 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 746
8222d8de
JMF
747 def trouble(self, message=None, tb=None):
748 """Determine action to take when a download problem appears.
749
750 Depending on if the downloader has been configured to ignore
751 download errors or not, this method may throw an exception or
752 not when errors are found, after printing the message.
753
754 tb, if given, is additional traceback information.
755 """
756 if message is not None:
757 self.to_stderr(message)
758 if self.params.get('verbose'):
759 if tb is None:
760 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 761 tb = ''
8222d8de 762 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 763 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 764 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
765 else:
766 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 767 tb = ''.join(tb_data)
c19bc311 768 if tb:
769 self.to_stderr(tb)
8222d8de
JMF
770 if not self.params.get('ignoreerrors', False):
771 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
772 exc_info = sys.exc_info()[1].exc_info
773 else:
774 exc_info = sys.exc_info()
775 raise DownloadError(message, exc_info)
776 self._download_retcode = 1
777
0760b0a7 778 def to_screen(self, message, skip_eol=False):
779 """Print message to stdout if not in quiet mode"""
780 self.to_stdout(
781 message, skip_eol, quiet=self.params.get('quiet', False))
782
c84aeac6 783 def report_warning(self, message, only_once=False):
8222d8de
JMF
784 '''
785 Print the message to stderr, it will be prefixed with 'WARNING:'
786 If stderr is a tty file the 'WARNING:' will be colored
787 '''
6d07ce01
JMF
788 if self.params.get('logger') is not None:
789 self.params['logger'].warning(message)
8222d8de 790 else:
ad8915b7
PH
791 if self.params.get('no_warnings'):
792 return
e9c0cdd3 793 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6d07ce01
JMF
794 _msg_header = '\033[0;33mWARNING:\033[0m'
795 else:
796 _msg_header = 'WARNING:'
797 warning_message = '%s %s' % (_msg_header, message)
b35496d8 798 self.to_stderr(warning_message, only_once)
8222d8de
JMF
799
800 def report_error(self, message, tb=None):
801 '''
802 Do the same as trouble, but prefixes the message with 'ERROR:', colored
803 in red if stderr is a tty file.
804 '''
e9c0cdd3 805 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6febd1c1 806 _msg_header = '\033[0;31mERROR:\033[0m'
8222d8de 807 else:
6febd1c1
PH
808 _msg_header = 'ERROR:'
809 error_message = '%s %s' % (_msg_header, message)
8222d8de
JMF
810 self.trouble(error_message, tb)
811
b35496d8 812 def write_debug(self, message, only_once=False):
0760b0a7 813 '''Log debug message or Print message to stderr'''
814 if not self.params.get('verbose', False):
815 return
816 message = '[debug] %s' % message
817 if self.params.get('logger'):
818 self.params['logger'].debug(message)
819 else:
b35496d8 820 self.to_stderr(message, only_once)
0760b0a7 821
8222d8de
JMF
822 def report_file_already_downloaded(self, file_name):
823 """Report file has already been fully downloaded."""
824 try:
6febd1c1 825 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 826 except UnicodeEncodeError:
6febd1c1 827 self.to_screen('[download] The file has already been downloaded')
8222d8de 828
0c3d0f51 829 def report_file_delete(self, file_name):
830 """Report that existing file will be deleted."""
831 try:
c25228e5 832 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 833 except UnicodeEncodeError:
c25228e5 834 self.to_screen('Deleting existing file')
0c3d0f51 835
88acdbc2 836 def raise_no_formats(self, has_drm=False, forced=False):
837 msg = 'This video is DRM protected' if has_drm else 'No video formats found!'
838 expected = self.params.get('ignore_no_formats_error')
839 if forced or not expected:
840 raise ExtractorError(msg, expected=has_drm or expected)
841 else:
842 self.report_warning(msg)
843
de6000d9 844 def parse_outtmpl(self):
845 outtmpl_dict = self.params.get('outtmpl', {})
846 if not isinstance(outtmpl_dict, dict):
847 outtmpl_dict = {'default': outtmpl_dict}
848 outtmpl_dict.update({
849 k: v for k, v in DEFAULT_OUTTMPL.items()
850 if not outtmpl_dict.get(k)})
851 for key, val in outtmpl_dict.items():
852 if isinstance(val, bytes):
853 self.report_warning(
854 'Parameter outtmpl is bytes, but should be a unicode string. '
855 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
856 return outtmpl_dict
857
21cd8fae 858 def get_output_path(self, dir_type='', filename=None):
859 paths = self.params.get('paths', {})
860 assert isinstance(paths, dict)
861 path = os.path.join(
862 expand_path(paths.get('home', '').strip()),
863 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
864 filename or '')
865
866 # Temporary fix for #4787
867 # 'Treat' all problem characters by passing filename through preferredencoding
868 # to workaround encoding issues with subprocess on python2 @ Windows
869 if sys.version_info < (3, 0) and sys.platform == 'win32':
870 path = encodeFilename(path, True).decode(preferredencoding())
871 return sanitize_path(path, force=self.params.get('windowsfilenames'))
872
76a264ac 873 @staticmethod
901130bb 874 def _outtmpl_expandpath(outtmpl):
875 # expand_path translates '%%' into '%' and '$$' into '$'
876 # correspondingly that is not what we want since we need to keep
877 # '%%' intact for template dict substitution step. Working around
878 # with boundary-alike separator hack.
879 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
880 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
881
882 # outtmpl should be expand_path'ed before template dict substitution
883 # because meta fields may contain env variables we don't want to
884 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
885 # title "Hello $PATH", we don't want `$PATH` to be expanded.
886 return expand_path(outtmpl).replace(sep, '')
887
888 @staticmethod
889 def escape_outtmpl(outtmpl):
890 ''' Escape any remaining strings like %s, %abc% etc. '''
891 return re.sub(
892 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
893 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
894 outtmpl)
895
896 @classmethod
897 def validate_outtmpl(cls, outtmpl):
76a264ac 898 ''' @return None or Exception object '''
7d1eb38a 899 outtmpl = re.sub(
900 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljq]'),
901 lambda mobj: f'{mobj.group(0)[:-1]}s',
902 cls._outtmpl_expandpath(outtmpl))
76a264ac 903 try:
7d1eb38a 904 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
76a264ac 905 return None
906 except ValueError as err:
907 return err
908
143db31d 909 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
901130bb 910 """ Make the template and info_dict suitable for substitution : ydl.outtmpl_escape(outtmpl) % info_dict """
6e84b215 911 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
143db31d 912
6e84b215 913 info_dict = dict(info_dict) # Do not sanitize so as not to consume LazyList
914 for key in ('__original_infodict', '__postprocessors'):
915 info_dict.pop(key, None)
752cda38 916 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
53c18592 917 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
143db31d 918 if info_dict.get('duration', None) is not None
919 else None)
752cda38 920 info_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
921 if info_dict.get('resolution') is None:
922 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
143db31d 923
143db31d 924 # For fields playlist_index and autonumber convert all occurrences
925 # of %(field)s to %(field)0Nd for backward compatibility
926 field_size_compat_map = {
752cda38 927 'playlist_index': len(str(info_dict.get('_last_playlist_index') or '')),
928 'autonumber': self.params.get('autonumber_size') or 5,
143db31d 929 }
752cda38 930
385a27fa 931 TMPL_DICT = {}
7d1eb38a 932 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljq]'))
385a27fa 933 MATH_FUNCTIONS = {
934 '+': float.__add__,
935 '-': float.__sub__,
936 }
e625be0d 937 # Field is of the form key1.key2...
938 # where keys (except first) can be string, int or slice
2b8a2973 939 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
385a27fa 940 MATH_FIELD_RE = r'''{field}|{num}'''.format(field=FIELD_RE, num=r'-?\d+(?:.\d+)?')
941 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
e625be0d 942 INTERNAL_FORMAT_RE = re.compile(r'''(?x)
943 (?P<negate>-)?
385a27fa 944 (?P<fields>{field})
945 (?P<maths>(?:{math_op}{math_field})*)
e625be0d 946 (?:>(?P<strf_format>.+?))?
947 (?:\|(?P<default>.*?))?
385a27fa 948 $'''.format(field=FIELD_RE, math_op=MATH_OPERATORS_RE, math_field=MATH_FIELD_RE))
752cda38 949
2b8a2973 950 def _traverse_infodict(k):
951 k = k.split('.')
952 if k[0] == '':
953 k.pop(0)
954 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
76a264ac 955
752cda38 956 def get_value(mdict):
957 # Object traversal
2b8a2973 958 value = _traverse_infodict(mdict['fields'])
752cda38 959 # Negative
960 if mdict['negate']:
961 value = float_or_none(value)
962 if value is not None:
963 value *= -1
964 # Do maths
385a27fa 965 offset_key = mdict['maths']
966 if offset_key:
752cda38 967 value = float_or_none(value)
968 operator = None
385a27fa 969 while offset_key:
970 item = re.match(
971 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
972 offset_key).group(0)
973 offset_key = offset_key[len(item):]
974 if operator is None:
752cda38 975 operator = MATH_FUNCTIONS[item]
385a27fa 976 continue
977 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
978 offset = float_or_none(item)
979 if offset is None:
2b8a2973 980 offset = float_or_none(_traverse_infodict(item))
385a27fa 981 try:
982 value = operator(value, multiplier * offset)
983 except (TypeError, ZeroDivisionError):
984 return None
985 operator = None
752cda38 986 # Datetime formatting
987 if mdict['strf_format']:
988 value = strftime_or_none(value, mdict['strf_format'])
989
990 return value
991
b868936c 992 na = self.params.get('outtmpl_na_placeholder', 'NA')
993
6e84b215 994 def _dumpjson_default(obj):
995 if isinstance(obj, (set, LazyList)):
996 return list(obj)
997 raise TypeError(f'Object of type {type(obj).__name__} is not JSON serializable')
998
752cda38 999 def create_key(outer_mobj):
1000 if not outer_mobj.group('has_key'):
901130bb 1001 return f'%{outer_mobj.group(0)}'
752cda38 1002 key = outer_mobj.group('key')
752cda38 1003 mobj = re.match(INTERNAL_FORMAT_RE, key)
1004 if mobj is None:
9fea350f 1005 value, default, mobj = None, na, {'fields': ''}
752cda38 1006 else:
e625be0d 1007 mobj = mobj.groupdict()
752cda38 1008 default = mobj['default'] if mobj['default'] is not None else na
1009 value = get_value(mobj)
1010
b868936c 1011 fmt = outer_mobj.group('format')
752cda38 1012 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1013 fmt = '0{:d}d'.format(field_size_compat_map[key])
1014
1015 value = default if value is None else value
752cda38 1016
7d1eb38a 1017 str_fmt = f'{fmt[:-1]}s'
1018 if fmt[-1] == 'l':
1019 value, fmt = ', '.join(variadic(value)), str_fmt
1020 elif fmt[-1] == 'j':
6e84b215 1021 value, fmt = json.dumps(value, default=_dumpjson_default), str_fmt
7d1eb38a 1022 elif fmt[-1] == 'q':
1023 value, fmt = compat_shlex_quote(str(value)), str_fmt
1024 elif fmt[-1] == 'c':
1025 value = str(value)
76a264ac 1026 if value is None:
1027 value, fmt = default, 's'
1028 else:
1029 value = value[0]
1030 elif fmt[-1] not in 'rs': # numeric
a439a3a4 1031 value = float_or_none(value)
752cda38 1032 if value is None:
1033 value, fmt = default, 's'
901130bb 1034
752cda38 1035 if sanitize:
1036 if fmt[-1] == 'r':
1037 # If value is an object, sanitize might convert it to a string
1038 # So we convert it to repr first
7d1eb38a 1039 value, fmt = repr(value), str_fmt
639f1cea 1040 if fmt[-1] in 'csr':
9fea350f 1041 value = sanitize(mobj['fields'].split('.')[-1], value)
901130bb 1042
b868936c 1043 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
385a27fa 1044 TMPL_DICT[key] = value
b868936c 1045 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
752cda38 1046
385a27fa 1047 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
143db31d 1048
de6000d9 1049 def _prepare_filename(self, info_dict, tmpl_type='default'):
8222d8de 1050 try:
586a91b6 1051 sanitize = lambda k, v: sanitize_filename(
45598aab 1052 compat_str(v),
1bb5c511 1053 restricted=self.params.get('restrictfilenames'),
40df485f 1054 is_id=(k == 'id' or k.endswith('_id')))
de6000d9 1055 outtmpl = self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default'])
143db31d 1056 outtmpl, template_dict = self.prepare_outtmpl(outtmpl, info_dict, sanitize)
901130bb 1057 outtmpl = self.escape_outtmpl(self._outtmpl_expandpath(outtmpl))
1058 filename = outtmpl % template_dict
15da37c7 1059
143db31d 1060 force_ext = OUTTMPL_TYPES.get(tmpl_type)
de6000d9 1061 if force_ext is not None:
752cda38 1062 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
de6000d9 1063
bdc3fd2f
U
1064 # https://github.com/blackjack4494/youtube-dlc/issues/85
1065 trim_file_name = self.params.get('trim_file_name', False)
1066 if trim_file_name:
1067 fn_groups = filename.rsplit('.')
1068 ext = fn_groups[-1]
1069 sub_ext = ''
1070 if len(fn_groups) > 2:
1071 sub_ext = fn_groups[-2]
1072 filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
1073
0202b52a 1074 return filename
8222d8de 1075 except ValueError as err:
6febd1c1 1076 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
1077 return None
1078
de6000d9 1079 def prepare_filename(self, info_dict, dir_type='', warn=False):
1080 """Generate the output filename."""
21cd8fae 1081
de6000d9 1082 filename = self._prepare_filename(info_dict, dir_type or 'default')
1083
c84aeac6 1084 if warn:
21cd8fae 1085 if not self.params.get('paths'):
de6000d9 1086 pass
1087 elif filename == '-':
c84aeac6 1088 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
de6000d9 1089 elif os.path.isabs(filename):
c84aeac6 1090 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
de6000d9 1091 if filename == '-' or not filename:
1092 return filename
1093
21cd8fae 1094 return self.get_output_path(dir_type, filename)
0202b52a 1095
120fe513 1096 def _match_entry(self, info_dict, incomplete=False, silent=False):
ecdec191 1097 """ Returns None if the file should be downloaded """
8222d8de 1098
c77495e3 1099 video_title = info_dict.get('title', info_dict.get('id', 'video'))
1100
8b0d7497 1101 def check_filter():
8b0d7497 1102 if 'title' in info_dict:
1103 # This can happen when we're just evaluating the playlist
1104 title = info_dict['title']
1105 matchtitle = self.params.get('matchtitle', False)
1106 if matchtitle:
1107 if not re.search(matchtitle, title, re.IGNORECASE):
1108 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1109 rejecttitle = self.params.get('rejecttitle', False)
1110 if rejecttitle:
1111 if re.search(rejecttitle, title, re.IGNORECASE):
1112 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1113 date = info_dict.get('upload_date')
1114 if date is not None:
1115 dateRange = self.params.get('daterange', DateRange())
1116 if date not in dateRange:
1117 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
1118 view_count = info_dict.get('view_count')
1119 if view_count is not None:
1120 min_views = self.params.get('min_views')
1121 if min_views is not None and view_count < min_views:
1122 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1123 max_views = self.params.get('max_views')
1124 if max_views is not None and view_count > max_views:
1125 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1126 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1127 return 'Skipping "%s" because it is age restricted' % video_title
8b0d7497 1128
8f18aca8 1129 match_filter = self.params.get('match_filter')
1130 if match_filter is not None:
1131 try:
1132 ret = match_filter(info_dict, incomplete=incomplete)
1133 except TypeError:
1134 # For backward compatibility
1135 ret = None if incomplete else match_filter(info_dict)
1136 if ret is not None:
1137 return ret
8b0d7497 1138 return None
1139
c77495e3 1140 if self.in_download_archive(info_dict):
1141 reason = '%s has already been recorded in the archive' % video_title
1142 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1143 else:
1144 reason = check_filter()
1145 break_opt, break_err = 'break_on_reject', RejectedVideoReached
8b0d7497 1146 if reason is not None:
120fe513 1147 if not silent:
1148 self.to_screen('[download] ' + reason)
c77495e3 1149 if self.params.get(break_opt, False):
1150 raise break_err()
8b0d7497 1151 return reason
fe7e0c98 1152
b6c45014
JMF
1153 @staticmethod
1154 def add_extra_info(info_dict, extra_info):
1155 '''Set the keys from extra_info in info dict if they are missing'''
1156 for key, value in extra_info.items():
1157 info_dict.setdefault(key, value)
1158
58f197b7 1159 def extract_info(self, url, download=True, ie_key=None, extra_info={},
61aa5ba3 1160 process=True, force_generic_extractor=False):
41d1cca3 1161 """
1162 Return a list with a dictionary for each video extracted.
1163
1164 Arguments:
1165 url -- URL to extract
1166
1167 Keyword arguments:
1168 download -- whether to download videos during extraction
1169 ie_key -- extractor key hint
1170 extra_info -- dictionary containing the extra values to add to each result
1171 process -- whether to resolve all unresolved references (URLs, playlist items),
1172 must be True for download to work.
1173 force_generic_extractor -- force using the generic extractor
1174 """
fe7e0c98 1175
61aa5ba3 1176 if not ie_key and force_generic_extractor:
d22dec74
S
1177 ie_key = 'Generic'
1178
8222d8de 1179 if ie_key:
56c73665 1180 ies = [self.get_info_extractor(ie_key)]
8222d8de
JMF
1181 else:
1182 ies = self._ies
1183
1184 for ie in ies:
1185 if not ie.suitable(url):
1186 continue
1187
9a68de12 1188 ie_key = ie.ie_key()
1189 ie = self.get_info_extractor(ie_key)
8222d8de 1190 if not ie.working():
6febd1c1
PH
1191 self.report_warning('The program functionality for this site has been marked as broken, '
1192 'and will probably not work.')
8222d8de
JMF
1193
1194 try:
d0757229 1195 temp_id = str_or_none(
63be1aab 1196 ie.extract_id(url) if callable(getattr(ie, 'extract_id', None))
1197 else ie._match_id(url))
a0566bbf 1198 except (AssertionError, IndexError, AttributeError):
1199 temp_id = None
1200 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1201 self.to_screen("[%s] %s: has already been recorded in archive" % (
1202 ie_key, temp_id))
1203 break
58f197b7 1204 return self.__extract_info(url, ie, download, extra_info, process)
a0566bbf 1205 else:
1206 self.report_error('no suitable InfoExtractor for URL %s' % url)
1207
cc9d1493 1208 def __handle_extraction_exceptions(func, handle_all_errors=True):
a0566bbf 1209 def wrapper(self, *args, **kwargs):
1210 try:
1211 return func(self, *args, **kwargs)
773f291d
S
1212 except GeoRestrictedError as e:
1213 msg = e.msg
1214 if e.countries:
1215 msg += '\nThis video is available in %s.' % ', '.join(
1216 map(ISO3166Utils.short2full, e.countries))
1217 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1218 self.report_error(msg)
fb043a6e 1219 except ExtractorError as e: # An error we somewhat expected
2c74e6fa 1220 self.report_error(compat_str(e), e.format_traceback())
51d9739f 1221 except ThrottledDownload:
1222 self.to_stderr('\r')
1223 self.report_warning('The download speed is below throttle limit. Re-extracting data')
1224 return wrapper(self, *args, **kwargs)
8b0d7497 1225 except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
d3e5bbf4 1226 raise
8222d8de 1227 except Exception as e:
cc9d1493 1228 if handle_all_errors and self.params.get('ignoreerrors', False):
9b9c5355 1229 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
8222d8de
JMF
1230 else:
1231 raise
a0566bbf 1232 return wrapper
1233
1234 @__handle_extraction_exceptions
58f197b7 1235 def __extract_info(self, url, ie, download, extra_info, process):
a0566bbf 1236 ie_result = ie.extract(url)
1237 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1238 return
1239 if isinstance(ie_result, list):
1240 # Backwards compatibility: old IE result format
1241 ie_result = {
1242 '_type': 'compat_list',
1243 'entries': ie_result,
1244 }
e37d0efb 1245 if extra_info.get('original_url'):
1246 ie_result.setdefault('original_url', extra_info['original_url'])
a0566bbf 1247 self.add_default_extra_info(ie_result, ie, url)
1248 if process:
1249 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1250 else:
a0566bbf 1251 return ie_result
fe7e0c98 1252
ea38e55f 1253 def add_default_extra_info(self, ie_result, ie, url):
6033d980 1254 if url is not None:
1255 self.add_extra_info(ie_result, {
1256 'webpage_url': url,
1257 'original_url': url,
1258 'webpage_url_basename': url_basename(url),
1259 })
1260 if ie is not None:
1261 self.add_extra_info(ie_result, {
1262 'extractor': ie.IE_NAME,
1263 'extractor_key': ie.ie_key(),
1264 })
ea38e55f 1265
58adec46 1266 def process_ie_result(self, ie_result, download=True, extra_info=None):
8222d8de
JMF
1267 """
1268 Take the result of the ie(may be modified) and resolve all unresolved
1269 references (URLs, playlist items).
1270
1271 It will also download the videos if 'download'.
1272 Returns the resolved ie_result.
1273 """
58adec46 1274 if extra_info is None:
1275 extra_info = {}
e8ee972c
PH
1276 result_type = ie_result.get('_type', 'video')
1277
057a5206 1278 if result_type in ('url', 'url_transparent'):
134c6ea8 1279 ie_result['url'] = sanitize_url(ie_result['url'])
e37d0efb 1280 if ie_result.get('original_url'):
1281 extra_info.setdefault('original_url', ie_result['original_url'])
1282
057a5206 1283 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1284 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1285 or extract_flat is True):
ecb54191 1286 info_copy = ie_result.copy()
1287 self.add_extra_info(info_copy, extra_info)
6033d980 1288 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1289 self.add_default_extra_info(info_copy, ie, ie_result['url'])
ecb54191 1290 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
e8ee972c
PH
1291 return ie_result
1292
8222d8de 1293 if result_type == 'video':
b6c45014 1294 self.add_extra_info(ie_result, extra_info)
9c2b75b5 1295 ie_result = self.process_video_result(ie_result, download=download)
28b0eb0f 1296 additional_urls = (ie_result or {}).get('additional_urls')
9c2b75b5 1297 if additional_urls:
e9f4ccd1 1298 # TODO: Improve MetadataParserPP to allow setting a list
9c2b75b5 1299 if isinstance(additional_urls, compat_str):
1300 additional_urls = [additional_urls]
1301 self.to_screen(
1302 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1303 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1304 ie_result['additional_entries'] = [
1305 self.extract_info(
1306 url, download, extra_info,
1307 force_generic_extractor=self.params.get('force_generic_extractor'))
1308 for url in additional_urls
1309 ]
1310 return ie_result
8222d8de
JMF
1311 elif result_type == 'url':
1312 # We have to add extra_info to the results because it may be
1313 # contained in a playlist
07cce701 1314 return self.extract_info(
1315 ie_result['url'], download,
1316 ie_key=ie_result.get('ie_key'),
1317 extra_info=extra_info)
7fc3fa05
PH
1318 elif result_type == 'url_transparent':
1319 # Use the information from the embedding page
1320 info = self.extract_info(
1321 ie_result['url'], ie_key=ie_result.get('ie_key'),
1322 extra_info=extra_info, download=False, process=False)
1323
1640eb09
S
1324 # extract_info may return None when ignoreerrors is enabled and
1325 # extraction failed with an error, don't crash and return early
1326 # in this case
1327 if not info:
1328 return info
1329
412c617d
PH
1330 force_properties = dict(
1331 (k, v) for k, v in ie_result.items() if v is not None)
0396806f 1332 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
412c617d
PH
1333 if f in force_properties:
1334 del force_properties[f]
1335 new_result = info.copy()
1336 new_result.update(force_properties)
7fc3fa05 1337
0563f7ac
S
1338 # Extracted info may not be a video result (i.e.
1339 # info.get('_type', 'video') != video) but rather an url or
1340 # url_transparent. In such cases outer metadata (from ie_result)
1341 # should be propagated to inner one (info). For this to happen
1342 # _type of info should be overridden with url_transparent. This
067aa17e 1343 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1344 if new_result.get('_type') == 'url':
1345 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1346
1347 return self.process_ie_result(
1348 new_result, download=download, extra_info=extra_info)
40fcba5e 1349 elif result_type in ('playlist', 'multi_video'):
30a074c2 1350 # Protect from infinite recursion due to recursively nested playlists
1351 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1352 webpage_url = ie_result['webpage_url']
1353 if webpage_url in self._playlist_urls:
7e85e872 1354 self.to_screen(
30a074c2 1355 '[download] Skipping already downloaded playlist: %s'
1356 % ie_result.get('title') or ie_result.get('id'))
1357 return
7e85e872 1358
30a074c2 1359 self._playlist_level += 1
1360 self._playlist_urls.add(webpage_url)
bc516a3f 1361 self._sanitize_thumbnails(ie_result)
30a074c2 1362 try:
1363 return self.__process_playlist(ie_result, download)
1364 finally:
1365 self._playlist_level -= 1
1366 if not self._playlist_level:
1367 self._playlist_urls.clear()
8222d8de 1368 elif result_type == 'compat_list':
c9bf4114
PH
1369 self.report_warning(
1370 'Extractor %s returned a compat_list result. '
1371 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1372
8222d8de 1373 def _fixup(r):
b868936c 1374 self.add_extra_info(r, {
1375 'extractor': ie_result['extractor'],
1376 'webpage_url': ie_result['webpage_url'],
1377 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1378 'extractor_key': ie_result['extractor_key'],
1379 })
8222d8de
JMF
1380 return r
1381 ie_result['entries'] = [
b6c45014 1382 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1383 for r in ie_result['entries']
1384 ]
1385 return ie_result
1386 else:
1387 raise Exception('Invalid result type: %s' % result_type)
1388
e92caff5 1389 def _ensure_dir_exists(self, path):
1390 return make_dir(path, self.report_error)
1391
30a074c2 1392 def __process_playlist(self, ie_result, download):
1393 # We process each entry in the playlist
1394 playlist = ie_result.get('title') or ie_result.get('id')
1395 self.to_screen('[download] Downloading playlist: %s' % playlist)
1396
498f5606 1397 if 'entries' not in ie_result:
1398 raise EntryNotInPlaylist()
1399 incomplete_entries = bool(ie_result.get('requested_entries'))
1400 if incomplete_entries:
1401 def fill_missing_entries(entries, indexes):
1402 ret = [None] * max(*indexes)
1403 for i, entry in zip(indexes, entries):
1404 ret[i - 1] = entry
1405 return ret
1406 ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
02fd60d3 1407
30a074c2 1408 playlist_results = []
1409
56a8fb4f 1410 playliststart = self.params.get('playliststart', 1)
30a074c2 1411 playlistend = self.params.get('playlistend')
1412 # For backwards compatibility, interpret -1 as whole list
1413 if playlistend == -1:
1414 playlistend = None
1415
1416 playlistitems_str = self.params.get('playlist_items')
1417 playlistitems = None
1418 if playlistitems_str is not None:
1419 def iter_playlistitems(format):
1420 for string_segment in format.split(','):
1421 if '-' in string_segment:
1422 start, end = string_segment.split('-')
1423 for item in range(int(start), int(end) + 1):
1424 yield int(item)
1425 else:
1426 yield int(string_segment)
1427 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
1428
1429 ie_entries = ie_result['entries']
56a8fb4f 1430 msg = (
1431 'Downloading %d videos' if not isinstance(ie_entries, list)
1432 else 'Collected %d videos; downloading %%d of them' % len(ie_entries))
1433 if not isinstance(ie_entries, (list, PagedList)):
1434 ie_entries = LazyList(ie_entries)
1435
50fed816 1436 def get_entry(i):
1437 return YoutubeDL.__handle_extraction_exceptions(
cc9d1493 1438 lambda self, i: ie_entries[i - 1],
1439 False
50fed816 1440 )(self, i)
1441
56a8fb4f 1442 entries = []
1443 for i in playlistitems or itertools.count(playliststart):
1444 if playlistitems is None and playlistend is not None and playlistend < i:
1445 break
1446 entry = None
1447 try:
50fed816 1448 entry = get_entry(i)
56a8fb4f 1449 if entry is None:
498f5606 1450 raise EntryNotInPlaylist()
56a8fb4f 1451 except (IndexError, EntryNotInPlaylist):
1452 if incomplete_entries:
1453 raise EntryNotInPlaylist()
1454 elif not playlistitems:
1455 break
1456 entries.append(entry)
120fe513 1457 try:
1458 if entry is not None:
1459 self._match_entry(entry, incomplete=True, silent=True)
1460 except (ExistingVideoReached, RejectedVideoReached):
1461 break
56a8fb4f 1462 ie_result['entries'] = entries
30a074c2 1463
56a8fb4f 1464 # Save playlist_index before re-ordering
1465 entries = [
9e598870 1466 ((playlistitems[i - 1] if playlistitems else i + playliststart - 1), entry)
56a8fb4f 1467 for i, entry in enumerate(entries, 1)
1468 if entry is not None]
1469 n_entries = len(entries)
498f5606 1470
498f5606 1471 if not playlistitems and (playliststart or playlistend):
56a8fb4f 1472 playlistitems = list(range(playliststart, playliststart + n_entries))
498f5606 1473 ie_result['requested_entries'] = playlistitems
1474
1475 if self.params.get('allow_playlist_files', True):
1476 ie_copy = {
1477 'playlist': playlist,
1478 'playlist_id': ie_result.get('id'),
1479 'playlist_title': ie_result.get('title'),
1480 'playlist_uploader': ie_result.get('uploader'),
1481 'playlist_uploader_id': ie_result.get('uploader_id'),
71729754 1482 'playlist_index': 0,
498f5606 1483 }
1484 ie_copy.update(dict(ie_result))
1485
1486 if self.params.get('writeinfojson', False):
1487 infofn = self.prepare_filename(ie_copy, 'pl_infojson')
1488 if not self._ensure_dir_exists(encodeFilename(infofn)):
1489 return
1490 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
1491 self.to_screen('[info] Playlist metadata is already present')
1492 else:
1493 self.to_screen('[info] Writing playlist metadata as JSON to: ' + infofn)
1494 try:
8012d892 1495 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
498f5606 1496 except (OSError, IOError):
1497 self.report_error('Cannot write playlist metadata to JSON file ' + infofn)
1498
681de68e 1499 # TODO: This should be passed to ThumbnailsConvertor if necessary
1500 self._write_thumbnails(ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1501
498f5606 1502 if self.params.get('writedescription', False):
1503 descfn = self.prepare_filename(ie_copy, 'pl_description')
1504 if not self._ensure_dir_exists(encodeFilename(descfn)):
1505 return
1506 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
1507 self.to_screen('[info] Playlist description is already present')
1508 elif ie_result.get('description') is None:
1509 self.report_warning('There\'s no playlist description to write.')
1510 else:
1511 try:
1512 self.to_screen('[info] Writing playlist description to: ' + descfn)
1513 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1514 descfile.write(ie_result['description'])
1515 except (OSError, IOError):
1516 self.report_error('Cannot write playlist description file ' + descfn)
1517 return
30a074c2 1518
1519 if self.params.get('playlistreverse', False):
1520 entries = entries[::-1]
30a074c2 1521 if self.params.get('playlistrandom', False):
1522 random.shuffle(entries)
1523
1524 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
1525
56a8fb4f 1526 self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries))
26e2805c 1527 failures = 0
1528 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
71729754 1529 for i, entry_tuple in enumerate(entries, 1):
1530 playlist_index, entry = entry_tuple
9e598870 1531 if 'playlist-index' in self.params.get('compat_options', []):
53ed7066 1532 playlist_index = playlistitems[i - 1] if playlistitems else i
30a074c2 1533 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
1534 # This __x_forwarded_for_ip thing is a bit ugly but requires
1535 # minimal changes
1536 if x_forwarded_for:
1537 entry['__x_forwarded_for_ip'] = x_forwarded_for
1538 extra = {
1539 'n_entries': n_entries,
f59ae581 1540 '_last_playlist_index': max(playlistitems) if playlistitems else (playlistend or n_entries),
71729754 1541 'playlist_index': playlist_index,
1542 'playlist_autonumber': i,
30a074c2 1543 'playlist': playlist,
1544 'playlist_id': ie_result.get('id'),
1545 'playlist_title': ie_result.get('title'),
1546 'playlist_uploader': ie_result.get('uploader'),
1547 'playlist_uploader_id': ie_result.get('uploader_id'),
30a074c2 1548 'extractor': ie_result['extractor'],
1549 'webpage_url': ie_result['webpage_url'],
1550 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1551 'extractor_key': ie_result['extractor_key'],
1552 }
1553
1554 if self._match_entry(entry, incomplete=True) is not None:
1555 continue
1556
1557 entry_result = self.__process_iterable_entry(entry, download, extra)
26e2805c 1558 if not entry_result:
1559 failures += 1
1560 if failures >= max_failures:
1561 self.report_error(
1562 'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures))
1563 break
30a074c2 1564 # TODO: skip failed (empty) entries?
1565 playlist_results.append(entry_result)
1566 ie_result['entries'] = playlist_results
1567 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1568 return ie_result
1569
a0566bbf 1570 @__handle_extraction_exceptions
1571 def __process_iterable_entry(self, entry, download, extra_info):
1572 return self.process_ie_result(
1573 entry, download=download, extra_info=extra_info)
1574
67134eab
JMF
1575 def _build_format_filter(self, filter_spec):
1576 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1577
1578 OPERATORS = {
1579 '<': operator.lt,
1580 '<=': operator.le,
1581 '>': operator.gt,
1582 '>=': operator.ge,
1583 '=': operator.eq,
1584 '!=': operator.ne,
1585 }
67134eab 1586 operator_rex = re.compile(r'''(?x)\s*
187986a8 1587 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1588 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1589 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
083c9df9 1590 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
187986a8 1591 m = operator_rex.fullmatch(filter_spec)
9ddb6925
S
1592 if m:
1593 try:
1594 comparison_value = int(m.group('value'))
1595 except ValueError:
1596 comparison_value = parse_filesize(m.group('value'))
1597 if comparison_value is None:
1598 comparison_value = parse_filesize(m.group('value') + 'B')
1599 if comparison_value is None:
1600 raise ValueError(
1601 'Invalid value %r in format specification %r' % (
67134eab 1602 m.group('value'), filter_spec))
9ddb6925
S
1603 op = OPERATORS[m.group('op')]
1604
083c9df9 1605 if not m:
9ddb6925
S
1606 STR_OPERATORS = {
1607 '=': operator.eq,
10d33b34
YCH
1608 '^=': lambda attr, value: attr.startswith(value),
1609 '$=': lambda attr, value: attr.endswith(value),
1610 '*=': lambda attr, value: value in attr,
9ddb6925 1611 }
187986a8 1612 str_operator_rex = re.compile(r'''(?x)\s*
1613 (?P<key>[a-zA-Z0-9._-]+)\s*
1614 (?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1615 (?P<value>[a-zA-Z0-9._-]+)\s*
9ddb6925 1616 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
187986a8 1617 m = str_operator_rex.fullmatch(filter_spec)
9ddb6925
S
1618 if m:
1619 comparison_value = m.group('value')
2cc779f4
S
1620 str_op = STR_OPERATORS[m.group('op')]
1621 if m.group('negation'):
e118a879 1622 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1623 else:
1624 op = str_op
083c9df9 1625
9ddb6925 1626 if not m:
187986a8 1627 raise SyntaxError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1628
1629 def _filter(f):
1630 actual_value = f.get(m.group('key'))
1631 if actual_value is None:
1632 return m.group('none_inclusive')
1633 return op(actual_value, comparison_value)
67134eab
JMF
1634 return _filter
1635
0017d9ad 1636 def _default_format_spec(self, info_dict, download=True):
0017d9ad 1637
af0f7428
S
1638 def can_merge():
1639 merger = FFmpegMergerPP(self)
1640 return merger.available and merger.can_merge()
1641
91ebc640 1642 prefer_best = (
b7b04c78 1643 not self.params.get('simulate')
91ebc640 1644 and download
1645 and (
1646 not can_merge()
19807826 1647 or info_dict.get('is_live', False)
de6000d9 1648 or self.outtmpl_dict['default'] == '-'))
53ed7066 1649 compat = (
1650 prefer_best
1651 or self.params.get('allow_multiple_audio_streams', False)
1652 or 'format-spec' in self.params.get('compat_opts', []))
91ebc640 1653
1654 return (
53ed7066 1655 'best/bestvideo+bestaudio' if prefer_best
1656 else 'bestvideo*+bestaudio/best' if not compat
91ebc640 1657 else 'bestvideo+bestaudio/best')
0017d9ad 1658
67134eab
JMF
1659 def build_format_selector(self, format_spec):
1660 def syntax_error(note, start):
1661 message = (
1662 'Invalid format specification: '
1663 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1664 return SyntaxError(message)
1665
1666 PICKFIRST = 'PICKFIRST'
1667 MERGE = 'MERGE'
1668 SINGLE = 'SINGLE'
0130afb7 1669 GROUP = 'GROUP'
67134eab
JMF
1670 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1671
91ebc640 1672 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1673 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 1674
e8e73840 1675 check_formats = self.params.get('check_formats')
1676
67134eab
JMF
1677 def _parse_filter(tokens):
1678 filter_parts = []
1679 for type, string, start, _, _ in tokens:
1680 if type == tokenize.OP and string == ']':
1681 return ''.join(filter_parts)
1682 else:
1683 filter_parts.append(string)
1684
232541df 1685 def _remove_unused_ops(tokens):
17cc1534 1686 # Remove operators that we don't use and join them with the surrounding strings
232541df
JMF
1687 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1688 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1689 last_string, last_start, last_end, last_line = None, None, None, None
1690 for type, string, start, end, line in tokens:
1691 if type == tokenize.OP and string == '[':
1692 if last_string:
1693 yield tokenize.NAME, last_string, last_start, last_end, last_line
1694 last_string = None
1695 yield type, string, start, end, line
1696 # everything inside brackets will be handled by _parse_filter
1697 for type, string, start, end, line in tokens:
1698 yield type, string, start, end, line
1699 if type == tokenize.OP and string == ']':
1700 break
1701 elif type == tokenize.OP and string in ALLOWED_OPS:
1702 if last_string:
1703 yield tokenize.NAME, last_string, last_start, last_end, last_line
1704 last_string = None
1705 yield type, string, start, end, line
1706 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1707 if not last_string:
1708 last_string = string
1709 last_start = start
1710 last_end = end
1711 else:
1712 last_string += string
1713 if last_string:
1714 yield tokenize.NAME, last_string, last_start, last_end, last_line
1715
cf2ac6df 1716 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
1717 selectors = []
1718 current_selector = None
1719 for type, string, start, _, _ in tokens:
1720 # ENCODING is only defined in python 3.x
1721 if type == getattr(tokenize, 'ENCODING', None):
1722 continue
1723 elif type in [tokenize.NAME, tokenize.NUMBER]:
1724 current_selector = FormatSelector(SINGLE, string, [])
1725 elif type == tokenize.OP:
cf2ac6df
JMF
1726 if string == ')':
1727 if not inside_group:
1728 # ')' will be handled by the parentheses group
1729 tokens.restore_last_token()
67134eab 1730 break
cf2ac6df 1731 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
1732 tokens.restore_last_token()
1733 break
cf2ac6df
JMF
1734 elif inside_choice and string == ',':
1735 tokens.restore_last_token()
1736 break
1737 elif string == ',':
0a31a350
JMF
1738 if not current_selector:
1739 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
1740 selectors.append(current_selector)
1741 current_selector = None
1742 elif string == '/':
d96d604e
JMF
1743 if not current_selector:
1744 raise syntax_error('"/" must follow a format selector', start)
67134eab 1745 first_choice = current_selector
cf2ac6df 1746 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 1747 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
1748 elif string == '[':
1749 if not current_selector:
1750 current_selector = FormatSelector(SINGLE, 'best', [])
1751 format_filter = _parse_filter(tokens)
1752 current_selector.filters.append(format_filter)
0130afb7
JMF
1753 elif string == '(':
1754 if current_selector:
1755 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
1756 group = _parse_format_selection(tokens, inside_group=True)
1757 current_selector = FormatSelector(GROUP, group, [])
67134eab 1758 elif string == '+':
d03cfdce 1759 if not current_selector:
1760 raise syntax_error('Unexpected "+"', start)
1761 selector_1 = current_selector
1762 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1763 if not selector_2:
1764 raise syntax_error('Expected a selector', start)
1765 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab
JMF
1766 else:
1767 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1768 elif type == tokenize.ENDMARKER:
1769 break
1770 if current_selector:
1771 selectors.append(current_selector)
1772 return selectors
1773
f8d4ad9a 1774 def _merge(formats_pair):
1775 format_1, format_2 = formats_pair
1776
1777 formats_info = []
1778 formats_info.extend(format_1.get('requested_formats', (format_1,)))
1779 formats_info.extend(format_2.get('requested_formats', (format_2,)))
1780
1781 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
551f9388 1782 get_no_more = {'video': False, 'audio': False}
f8d4ad9a 1783 for (i, fmt_info) in enumerate(formats_info):
551f9388 1784 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
1785 formats_info.pop(i)
1786 continue
1787 for aud_vid in ['audio', 'video']:
f8d4ad9a 1788 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
1789 if get_no_more[aud_vid]:
1790 formats_info.pop(i)
f5510afe 1791 break
f8d4ad9a 1792 get_no_more[aud_vid] = True
1793
1794 if len(formats_info) == 1:
1795 return formats_info[0]
1796
1797 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
1798 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
1799
1800 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
1801 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
1802
1803 output_ext = self.params.get('merge_output_format')
1804 if not output_ext:
1805 if the_only_video:
1806 output_ext = the_only_video['ext']
1807 elif the_only_audio and not video_fmts:
1808 output_ext = the_only_audio['ext']
1809 else:
1810 output_ext = 'mkv'
1811
1812 new_dict = {
1813 'requested_formats': formats_info,
1814 'format': '+'.join(fmt_info.get('format') for fmt_info in formats_info),
1815 'format_id': '+'.join(fmt_info.get('format_id') for fmt_info in formats_info),
1816 'ext': output_ext,
1817 }
1818
1819 if the_only_video:
1820 new_dict.update({
1821 'width': the_only_video.get('width'),
1822 'height': the_only_video.get('height'),
1823 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
1824 'fps': the_only_video.get('fps'),
1825 'vcodec': the_only_video.get('vcodec'),
1826 'vbr': the_only_video.get('vbr'),
1827 'stretched_ratio': the_only_video.get('stretched_ratio'),
1828 })
1829
1830 if the_only_audio:
1831 new_dict.update({
1832 'acodec': the_only_audio.get('acodec'),
1833 'abr': the_only_audio.get('abr'),
1834 })
1835
1836 return new_dict
1837
e8e73840 1838 def _check_formats(formats):
981052c9 1839 if not check_formats:
1840 yield from formats
b5ac45b1 1841 return
e8e73840 1842 for f in formats:
1843 self.to_screen('[info] Testing format %s' % f['format_id'])
21cd8fae 1844 temp_file = tempfile.NamedTemporaryFile(
1845 suffix='.tmp', delete=False,
1846 dir=self.get_output_path('temp') or None)
1847 temp_file.close()
fe346461 1848 try:
981052c9 1849 success, _ = self.dl(temp_file.name, f, test=True)
1850 except (DownloadError, IOError, OSError, ValueError) + network_exceptions:
1851 success = False
fe346461 1852 finally:
21cd8fae 1853 if os.path.exists(temp_file.name):
1854 try:
1855 os.remove(temp_file.name)
1856 except OSError:
1857 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
981052c9 1858 if success:
e8e73840 1859 yield f
1860 else:
1861 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1862
67134eab 1863 def _build_selector_function(selector):
909d24dd 1864 if isinstance(selector, list): # ,
67134eab
JMF
1865 fs = [_build_selector_function(s) for s in selector]
1866
317f7ab6 1867 def selector_function(ctx):
67134eab 1868 for f in fs:
981052c9 1869 yield from f(ctx)
67134eab 1870 return selector_function
909d24dd 1871
1872 elif selector.type == GROUP: # ()
0130afb7 1873 selector_function = _build_selector_function(selector.selector)
909d24dd 1874
1875 elif selector.type == PICKFIRST: # /
67134eab
JMF
1876 fs = [_build_selector_function(s) for s in selector.selector]
1877
317f7ab6 1878 def selector_function(ctx):
67134eab 1879 for f in fs:
317f7ab6 1880 picked_formats = list(f(ctx))
67134eab
JMF
1881 if picked_formats:
1882 return picked_formats
1883 return []
67134eab 1884
981052c9 1885 elif selector.type == MERGE: # +
1886 selector_1, selector_2 = map(_build_selector_function, selector.selector)
1887
1888 def selector_function(ctx):
1889 for pair in itertools.product(
1890 selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
1891 yield _merge(pair)
1892
909d24dd 1893 elif selector.type == SINGLE: # atom
598d185d 1894 format_spec = selector.selector or 'best'
909d24dd 1895
f8d4ad9a 1896 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
909d24dd 1897 if format_spec == 'all':
1898 def selector_function(ctx):
981052c9 1899 yield from _check_formats(ctx['formats'])
f8d4ad9a 1900 elif format_spec == 'mergeall':
1901 def selector_function(ctx):
981052c9 1902 formats = list(_check_formats(ctx['formats']))
e01d6aa4 1903 if not formats:
1904 return
921b76ca 1905 merged_format = formats[-1]
1906 for f in formats[-2::-1]:
f8d4ad9a 1907 merged_format = _merge((merged_format, f))
1908 yield merged_format
909d24dd 1909
1910 else:
e8e73840 1911 format_fallback, format_reverse, format_idx = False, True, 1
eff63539 1912 mobj = re.match(
1913 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
1914 format_spec)
1915 if mobj is not None:
1916 format_idx = int_or_none(mobj.group('n'), default=1)
e8e73840 1917 format_reverse = mobj.group('bw')[0] == 'b'
eff63539 1918 format_type = (mobj.group('type') or [None])[0]
1919 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
1920 format_modified = mobj.group('mod') is not None
909d24dd 1921
1922 format_fallback = not format_type and not format_modified # for b, w
8326b00a 1923 _filter_f = (
eff63539 1924 (lambda f: f.get('%scodec' % format_type) != 'none')
1925 if format_type and format_modified # bv*, ba*, wv*, wa*
1926 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
1927 if format_type # bv, ba, wv, wa
1928 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
1929 if not format_modified # b, w
8326b00a 1930 else lambda f: True) # b*, w*
1931 filter_f = lambda f: _filter_f(f) and (
1932 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
67134eab 1933 else:
909d24dd 1934 filter_f = ((lambda f: f.get('ext') == format_spec)
1935 if format_spec in ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] # extension
1936 else (lambda f: f.get('format_id') == format_spec)) # id
1937
1938 def selector_function(ctx):
1939 formats = list(ctx['formats'])
909d24dd 1940 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
e8e73840 1941 if format_fallback and ctx['incomplete_formats'] and not matches:
909d24dd 1942 # for extractors with incomplete formats (audio only (soundcloud)
1943 # or video only (imgur)) best/worst will fallback to
1944 # best/worst {video,audio}-only format
e8e73840 1945 matches = formats
981052c9 1946 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
1947 try:
e8e73840 1948 yield matches[format_idx - 1]
981052c9 1949 except IndexError:
1950 return
083c9df9 1951
67134eab 1952 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 1953
317f7ab6
S
1954 def final_selector(ctx):
1955 ctx_copy = copy.deepcopy(ctx)
67134eab 1956 for _filter in filters:
317f7ab6
S
1957 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1958 return selector_function(ctx_copy)
67134eab 1959 return final_selector
083c9df9 1960
67134eab 1961 stream = io.BytesIO(format_spec.encode('utf-8'))
0130afb7 1962 try:
232541df 1963 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
0130afb7
JMF
1964 except tokenize.TokenError:
1965 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1966
1967 class TokenIterator(object):
1968 def __init__(self, tokens):
1969 self.tokens = tokens
1970 self.counter = 0
1971
1972 def __iter__(self):
1973 return self
1974
1975 def __next__(self):
1976 if self.counter >= len(self.tokens):
1977 raise StopIteration()
1978 value = self.tokens[self.counter]
1979 self.counter += 1
1980 return value
1981
1982 next = __next__
1983
1984 def restore_last_token(self):
1985 self.counter -= 1
1986
1987 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 1988 return _build_selector_function(parsed_selector)
a9c58ad9 1989
e5660ee6
JMF
1990 def _calc_headers(self, info_dict):
1991 res = std_headers.copy()
1992
1993 add_headers = info_dict.get('http_headers')
1994 if add_headers:
1995 res.update(add_headers)
1996
1997 cookies = self._calc_cookies(info_dict)
1998 if cookies:
1999 res['Cookie'] = cookies
2000
0016b84e
S
2001 if 'X-Forwarded-For' not in res:
2002 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
2003 if x_forwarded_for_ip:
2004 res['X-Forwarded-For'] = x_forwarded_for_ip
2005
e5660ee6
JMF
2006 return res
2007
2008 def _calc_cookies(self, info_dict):
5c2266df 2009 pr = sanitized_Request(info_dict['url'])
e5660ee6 2010 self.cookiejar.add_cookie_header(pr)
662435f7 2011 return pr.get_header('Cookie')
e5660ee6 2012
b0249bca 2013 def _sanitize_thumbnails(self, info_dict):
bc516a3f 2014 thumbnails = info_dict.get('thumbnails')
2015 if thumbnails is None:
2016 thumbnail = info_dict.get('thumbnail')
2017 if thumbnail:
2018 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2019 if thumbnails:
2020 thumbnails.sort(key=lambda t: (
2021 t.get('preference') if t.get('preference') is not None else -1,
2022 t.get('width') if t.get('width') is not None else -1,
2023 t.get('height') if t.get('height') is not None else -1,
2024 t.get('id') if t.get('id') is not None else '',
2025 t.get('url')))
b0249bca 2026
0ba692ac 2027 def thumbnail_tester():
2028 if self.params.get('check_formats'):
cca80fe6 2029 test_all = True
2030 to_screen = lambda msg: self.to_screen(f'[info] {msg}')
0ba692ac 2031 else:
cca80fe6 2032 test_all = False
0ba692ac 2033 to_screen = self.write_debug
2034
2035 def test_thumbnail(t):
cca80fe6 2036 if not test_all and not t.get('_test_url'):
2037 return True
0ba692ac 2038 to_screen('Testing thumbnail %s' % t['id'])
2039 try:
2040 self.urlopen(HEADRequest(t['url']))
2041 except network_exceptions as err:
2042 to_screen('Unable to connect to thumbnail %s URL "%s" - %s. Skipping...' % (
2043 t['id'], t['url'], error_to_compat_str(err)))
2044 return False
2045 return True
2046
2047 return test_thumbnail
b0249bca 2048
bc516a3f 2049 for i, t in enumerate(thumbnails):
bc516a3f 2050 if t.get('id') is None:
2051 t['id'] = '%d' % i
b0249bca 2052 if t.get('width') and t.get('height'):
2053 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2054 t['url'] = sanitize_url(t['url'])
0ba692ac 2055
2056 if self.params.get('check_formats') is not False:
2057 info_dict['thumbnails'] = LazyList(filter(thumbnail_tester(), thumbnails[::-1])).reverse()
2058 else:
2059 info_dict['thumbnails'] = thumbnails
bc516a3f 2060
dd82ffea
JMF
2061 def process_video_result(self, info_dict, download=True):
2062 assert info_dict.get('_type', 'video') == 'video'
2063
bec1fad2
PH
2064 if 'id' not in info_dict:
2065 raise ExtractorError('Missing "id" field in extractor result')
2066 if 'title' not in info_dict:
2067 raise ExtractorError('Missing "title" field in extractor result')
2068
c9969434
S
2069 def report_force_conversion(field, field_not, conversion):
2070 self.report_warning(
2071 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2072 % (field, field_not, conversion))
2073
2074 def sanitize_string_field(info, string_field):
2075 field = info.get(string_field)
2076 if field is None or isinstance(field, compat_str):
2077 return
2078 report_force_conversion(string_field, 'a string', 'string')
2079 info[string_field] = compat_str(field)
2080
2081 def sanitize_numeric_fields(info):
2082 for numeric_field in self._NUMERIC_FIELDS:
2083 field = info.get(numeric_field)
2084 if field is None or isinstance(field, compat_numeric_types):
2085 continue
2086 report_force_conversion(numeric_field, 'numeric', 'int')
2087 info[numeric_field] = int_or_none(field)
2088
2089 sanitize_string_field(info_dict, 'id')
2090 sanitize_numeric_fields(info_dict)
be6217b2 2091
dd82ffea
JMF
2092 if 'playlist' not in info_dict:
2093 # It isn't part of a playlist
2094 info_dict['playlist'] = None
2095 info_dict['playlist_index'] = None
2096
bc516a3f 2097 self._sanitize_thumbnails(info_dict)
d5519808 2098
536a55da 2099 thumbnail = info_dict.get('thumbnail')
bc516a3f 2100 thumbnails = info_dict.get('thumbnails')
536a55da
S
2101 if thumbnail:
2102 info_dict['thumbnail'] = sanitize_url(thumbnail)
2103 elif thumbnails:
d5519808
PH
2104 info_dict['thumbnail'] = thumbnails[-1]['url']
2105
ae30b840 2106 if info_dict.get('display_id') is None and 'id' in info_dict:
0afef30b
PH
2107 info_dict['display_id'] = info_dict['id']
2108
10db0d2f 2109 for ts_key, date_key in (
2110 ('timestamp', 'upload_date'),
2111 ('release_timestamp', 'release_date'),
2112 ):
2113 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2114 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2115 # see http://bugs.python.org/issue1646728)
2116 try:
2117 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2118 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2119 except (ValueError, OverflowError, OSError):
2120 pass
9d2ecdbc 2121
ae30b840 2122 live_keys = ('is_live', 'was_live')
2123 live_status = info_dict.get('live_status')
2124 if live_status is None:
2125 for key in live_keys:
2126 if info_dict.get(key) is False:
2127 continue
2128 if info_dict.get(key):
2129 live_status = key
2130 break
2131 if all(info_dict.get(key) is False for key in live_keys):
2132 live_status = 'not_live'
2133 if live_status:
2134 info_dict['live_status'] = live_status
2135 for key in live_keys:
2136 if info_dict.get(key) is None:
2137 info_dict[key] = (live_status == key)
2138
33d2fc2f
S
2139 # Auto generate title fields corresponding to the *_number fields when missing
2140 # in order to always have clean titles. This is very common for TV series.
2141 for field in ('chapter', 'season', 'episode'):
2142 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2143 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2144
05108a49
S
2145 for cc_kind in ('subtitles', 'automatic_captions'):
2146 cc = info_dict.get(cc_kind)
2147 if cc:
2148 for _, subtitle in cc.items():
2149 for subtitle_format in subtitle:
2150 if subtitle_format.get('url'):
2151 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2152 if subtitle_format.get('ext') is None:
2153 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2154
2155 automatic_captions = info_dict.get('automatic_captions')
4bba3716 2156 subtitles = info_dict.get('subtitles')
4bba3716 2157
360e1ca5 2158 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 2159 info_dict['id'], subtitles, automatic_captions)
a504ced0 2160
dd82ffea
JMF
2161 # We now pick which formats have to be downloaded
2162 if info_dict.get('formats') is None:
2163 # There's only one format available
2164 formats = [info_dict]
2165 else:
2166 formats = info_dict['formats']
2167
88acdbc2 2168 if not self.params.get('allow_unplayable_formats'):
2169 formats = [f for f in formats if not f.get('has_drm')]
2170 info_dict['__has_drm'] = len(info_dict.get('formats') or ['']) > len(formats)
2171
db95dc13 2172 if not formats:
88acdbc2 2173 self.raise_no_formats(info_dict.get('__has_drm'))
db95dc13 2174
73af5cc8
S
2175 def is_wellformed(f):
2176 url = f.get('url')
a5ac0c47 2177 if not url:
73af5cc8
S
2178 self.report_warning(
2179 '"url" field is missing or empty - skipping format, '
2180 'there is an error in extractor')
a5ac0c47
S
2181 return False
2182 if isinstance(url, bytes):
2183 sanitize_string_field(f, 'url')
2184 return True
73af5cc8
S
2185
2186 # Filter out malformed formats for better extraction robustness
2187 formats = list(filter(is_wellformed, formats))
2188
181c7053
S
2189 formats_dict = {}
2190
dd82ffea 2191 # We check that all the formats have the format and format_id fields
db95dc13 2192 for i, format in enumerate(formats):
c9969434
S
2193 sanitize_string_field(format, 'format_id')
2194 sanitize_numeric_fields(format)
dcf77cf1 2195 format['url'] = sanitize_url(format['url'])
e74e3b63 2196 if not format.get('format_id'):
8016c922 2197 format['format_id'] = compat_str(i)
e2effb08
S
2198 else:
2199 # Sanitize format_id from characters used in format selector expression
ec85ded8 2200 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
181c7053
S
2201 format_id = format['format_id']
2202 if format_id not in formats_dict:
2203 formats_dict[format_id] = []
2204 formats_dict[format_id].append(format)
2205
2206 # Make sure all formats have unique format_id
2207 for format_id, ambiguous_formats in formats_dict.items():
2208 if len(ambiguous_formats) > 1:
2209 for i, format in enumerate(ambiguous_formats):
2210 format['format_id'] = '%s-%d' % (format_id, i)
2211
2212 for i, format in enumerate(formats):
8c51aa65 2213 if format.get('format') is None:
6febd1c1 2214 format['format'] = '{id} - {res}{note}'.format(
8c51aa65
JMF
2215 id=format['format_id'],
2216 res=self.format_resolution(format),
b868936c 2217 note=format_field(format, 'format_note', ' (%s)'),
8c51aa65 2218 )
c1002e96 2219 # Automatically determine file extension if missing
5b1d8575 2220 if format.get('ext') is None:
cce929ea 2221 format['ext'] = determine_ext(format['url']).lower()
b5559424
S
2222 # Automatically determine protocol if missing (useful for format
2223 # selection purposes)
6f0be937 2224 if format.get('protocol') is None:
b5559424 2225 format['protocol'] = determine_protocol(format)
e5660ee6
JMF
2226 # Add HTTP headers, so that external programs can use them from the
2227 # json output
2228 full_format_info = info_dict.copy()
2229 full_format_info.update(format)
2230 format['http_headers'] = self._calc_headers(full_format_info)
0016b84e
S
2231 # Remove private housekeeping stuff
2232 if '__x_forwarded_for_ip' in info_dict:
2233 del info_dict['__x_forwarded_for_ip']
dd82ffea 2234
4bcc7bd1 2235 # TODO Central sorting goes here
99e206d5 2236
88acdbc2 2237 if not formats or formats[0] is not info_dict:
b3d9ef88
JMF
2238 # only set the 'formats' fields if the original info_dict list them
2239 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 2240 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 2241 # which can't be exported to json
b3d9ef88 2242 info_dict['formats'] = formats
4ec82a72 2243
2244 info_dict, _ = self.pre_process(info_dict)
2245
b7b04c78 2246 if self.params.get('list_thumbnails'):
2247 self.list_thumbnails(info_dict)
2248 if self.params.get('listformats'):
86c66b2d 2249 if not info_dict.get('formats') and not info_dict.get('url'):
88acdbc2 2250 self.to_screen('%s has no formats' % info_dict['id'])
2251 else:
2252 self.list_formats(info_dict)
b7b04c78 2253 if self.params.get('listsubtitles'):
2254 if 'automatic_captions' in info_dict:
2255 self.list_subtitles(
2256 info_dict['id'], automatic_captions, 'automatic captions')
2257 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2258 list_only = self.params.get('simulate') is None and (
2259 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
169dbde9 2260 if list_only:
b7b04c78 2261 # Without this printing, -F --print-json will not work
169dbde9 2262 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
bfaae0a7 2263 return
2264
187986a8 2265 format_selector = self.format_selector
2266 if format_selector is None:
0017d9ad 2267 req_format = self._default_format_spec(info_dict, download=download)
0760b0a7 2268 self.write_debug('Default format spec: %s' % req_format)
187986a8 2269 format_selector = self.build_format_selector(req_format)
317f7ab6
S
2270
2271 # While in format selection we may need to have an access to the original
2272 # format set in order to calculate some metrics or do some processing.
2273 # For now we need to be able to guess whether original formats provided
2274 # by extractor are incomplete or not (i.e. whether extractor provides only
2275 # video-only or audio-only formats) for proper formats selection for
2276 # extractors with such incomplete formats (see
067aa17e 2277 # https://github.com/ytdl-org/youtube-dl/pull/5556).
317f7ab6
S
2278 # Since formats may be filtered during format selection and may not match
2279 # the original formats the results may be incorrect. Thus original formats
2280 # or pre-calculated metrics should be passed to format selection routines
2281 # as well.
2282 # We will pass a context object containing all necessary additional data
2283 # instead of just formats.
2284 # This fixes incorrect format selection issue (see
067aa17e 2285 # https://github.com/ytdl-org/youtube-dl/issues/10083).
2e221ca3 2286 incomplete_formats = (
317f7ab6 2287 # All formats are video-only or
3089bc74 2288 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
317f7ab6 2289 # all formats are audio-only
3089bc74 2290 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
317f7ab6
S
2291
2292 ctx = {
2293 'formats': formats,
2294 'incomplete_formats': incomplete_formats,
2295 }
2296
2297 formats_to_download = list(format_selector(ctx))
dd82ffea 2298 if not formats_to_download:
b7da73eb 2299 if not self.params.get('ignore_no_formats_error'):
2300 raise ExtractorError('Requested format is not available', expected=True)
2301 else:
2302 self.report_warning('Requested format is not available')
4513a41a
A
2303 # Process what we can, even without any available formats.
2304 self.process_info(dict(info_dict))
b7da73eb 2305 elif download:
2306 self.to_screen(
07cce701 2307 '[info] %s: Downloading %d format(s): %s' % (
2308 info_dict['id'], len(formats_to_download),
2309 ", ".join([f['format_id'] for f in formats_to_download])))
b7da73eb 2310 for fmt in formats_to_download:
dd82ffea 2311 new_info = dict(info_dict)
4ec82a72 2312 # Save a reference to the original info_dict so that it can be modified in process_info if needed
2313 new_info['__original_infodict'] = info_dict
b7da73eb 2314 new_info.update(fmt)
dd82ffea
JMF
2315 self.process_info(new_info)
2316 # We update the info dict with the best quality format (backwards compatibility)
b7da73eb 2317 if formats_to_download:
2318 info_dict.update(formats_to_download[-1])
dd82ffea
JMF
2319 return info_dict
2320
98c70d6f 2321 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 2322 """Select the requested subtitles and their format"""
98c70d6f
JMF
2323 available_subs = {}
2324 if normal_subtitles and self.params.get('writesubtitles'):
2325 available_subs.update(normal_subtitles)
2326 if automatic_captions and self.params.get('writeautomaticsub'):
2327 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
2328 if lang not in available_subs:
2329 available_subs[lang] = cap_info
2330
4d171848
JMF
2331 if (not self.params.get('writesubtitles') and not
2332 self.params.get('writeautomaticsub') or not
2333 available_subs):
2334 return None
a504ced0 2335
c32b0aab 2336 all_sub_langs = available_subs.keys()
a504ced0 2337 if self.params.get('allsubtitles', False):
c32b0aab 2338 requested_langs = all_sub_langs
2339 elif self.params.get('subtitleslangs', False):
2340 requested_langs = set()
2341 for lang in self.params.get('subtitleslangs'):
2342 if lang == 'all':
2343 requested_langs.update(all_sub_langs)
2344 continue
2345 discard = lang[0] == '-'
2346 if discard:
2347 lang = lang[1:]
2348 current_langs = filter(re.compile(lang + '$').match, all_sub_langs)
2349 if discard:
2350 for lang in current_langs:
2351 requested_langs.discard(lang)
2352 else:
2353 requested_langs.update(current_langs)
2354 elif 'en' in available_subs:
2355 requested_langs = ['en']
a504ced0 2356 else:
c32b0aab 2357 requested_langs = [list(all_sub_langs)[0]]
ad3dc496 2358 if requested_langs:
2359 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
a504ced0
JMF
2360
2361 formats_query = self.params.get('subtitlesformat', 'best')
2362 formats_preference = formats_query.split('/') if formats_query else []
2363 subs = {}
2364 for lang in requested_langs:
2365 formats = available_subs.get(lang)
2366 if formats is None:
2367 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
2368 continue
a504ced0
JMF
2369 for ext in formats_preference:
2370 if ext == 'best':
2371 f = formats[-1]
2372 break
2373 matches = list(filter(lambda f: f['ext'] == ext, formats))
2374 if matches:
2375 f = matches[-1]
2376 break
2377 else:
2378 f = formats[-1]
2379 self.report_warning(
2380 'No subtitle format found matching "%s" for language %s, '
2381 'using %s' % (formats_query, lang, f['ext']))
2382 subs[lang] = f
2383 return subs
2384
d06daf23 2385 def __forced_printings(self, info_dict, filename, incomplete):
53c18592 2386 def print_mandatory(field, actual_field=None):
2387 if actual_field is None:
2388 actual_field = field
d06daf23 2389 if (self.params.get('force%s' % field, False)
53c18592 2390 and (not incomplete or info_dict.get(actual_field) is not None)):
2391 self.to_stdout(info_dict[actual_field])
d06daf23
S
2392
2393 def print_optional(field):
2394 if (self.params.get('force%s' % field, False)
2395 and info_dict.get(field) is not None):
2396 self.to_stdout(info_dict[field])
2397
53c18592 2398 info_dict = info_dict.copy()
2399 if filename is not None:
2400 info_dict['filename'] = filename
2401 if info_dict.get('requested_formats') is not None:
2402 # For RTMP URLs, also include the playpath
2403 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2404 elif 'url' in info_dict:
2405 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2406
2b8a2973 2407 if self.params.get('forceprint') or self.params.get('forcejson'):
2408 self.post_extract(info_dict)
53c18592 2409 for tmpl in self.params.get('forceprint', []):
2410 if re.match(r'\w+$', tmpl):
2411 tmpl = '%({})s'.format(tmpl)
2412 tmpl, info_copy = self.prepare_outtmpl(tmpl, info_dict)
901130bb 2413 self.to_stdout(self.escape_outtmpl(tmpl) % info_copy)
53c18592 2414
d06daf23
S
2415 print_mandatory('title')
2416 print_mandatory('id')
53c18592 2417 print_mandatory('url', 'urls')
d06daf23
S
2418 print_optional('thumbnail')
2419 print_optional('description')
53c18592 2420 print_optional('filename')
b868936c 2421 if self.params.get('forceduration') and info_dict.get('duration') is not None:
d06daf23
S
2422 self.to_stdout(formatSeconds(info_dict['duration']))
2423 print_mandatory('format')
53c18592 2424
2b8a2973 2425 if self.params.get('forcejson'):
6e84b215 2426 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
d06daf23 2427
e8e73840 2428 def dl(self, name, info, subtitle=False, test=False):
88acdbc2 2429 if not info.get('url'):
2430 self.raise_no_formats(info.get('__has_drm'), forced=True)
e8e73840 2431
2432 if test:
2433 verbose = self.params.get('verbose')
2434 params = {
2435 'test': True,
2436 'quiet': not verbose,
2437 'verbose': verbose,
2438 'noprogress': not verbose,
2439 'nopart': True,
2440 'skip_unavailable_fragments': False,
2441 'keep_fragments': False,
2442 'overwrites': True,
2443 '_no_ytdl_file': True,
2444 }
2445 else:
2446 params = self.params
96fccc10 2447 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
e8e73840 2448 if not test:
2449 for ph in self._progress_hooks:
2450 fd.add_progress_hook(ph)
18e674b4 2451 urls = '", "'.join([f['url'] for f in info.get('requested_formats', [])] or [info['url']])
2452 self.write_debug('Invoking downloader on "%s"' % urls)
e8e73840 2453 new_info = dict(info)
2454 if new_info.get('http_headers') is None:
2455 new_info['http_headers'] = self._calc_headers(new_info)
2456 return fd.download(name, new_info, subtitle)
2457
8222d8de
JMF
2458 def process_info(self, info_dict):
2459 """Process a single resolved IE result."""
2460
2461 assert info_dict.get('_type', 'video') == 'video'
fd288278
PH
2462
2463 max_downloads = self.params.get('max_downloads')
2464 if max_downloads is not None:
2465 if self._num_downloads >= int(max_downloads):
2466 raise MaxDownloadsReached()
8222d8de 2467
d06daf23 2468 # TODO: backward compatibility, to be removed
8222d8de 2469 info_dict['fulltitle'] = info_dict['title']
8222d8de 2470
4513a41a 2471 if 'format' not in info_dict and 'ext' in info_dict:
8222d8de
JMF
2472 info_dict['format'] = info_dict['ext']
2473
c77495e3 2474 if self._match_entry(info_dict) is not None:
8222d8de
JMF
2475 return
2476
277d6ff5 2477 self.post_extract(info_dict)
fd288278 2478 self._num_downloads += 1
8222d8de 2479
dcf64d43 2480 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 2481 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2482 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 2483 files_to_move = {}
8222d8de
JMF
2484
2485 # Forced printings
4513a41a 2486 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
8222d8de 2487
b7b04c78 2488 if self.params.get('simulate'):
2d30509f 2489 if self.params.get('force_write_download_archive', False):
2490 self.record_download_archive(info_dict)
2491
2492 # Do nothing else if in simulate mode
8222d8de
JMF
2493 return
2494
de6000d9 2495 if full_filename is None:
8222d8de
JMF
2496 return
2497
e92caff5 2498 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 2499 return
e92caff5 2500 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
2501 return
2502
2503 if self.params.get('writedescription', False):
de6000d9 2504 descfn = self.prepare_filename(info_dict, 'description')
e92caff5 2505 if not self._ensure_dir_exists(encodeFilename(descfn)):
0202b52a 2506 return
0c3d0f51 2507 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
6febd1c1 2508 self.to_screen('[info] Video description is already present')
f00fd51d
JMF
2509 elif info_dict.get('description') is None:
2510 self.report_warning('There\'s no description to write.')
7b6fefc9
PH
2511 else:
2512 try:
6febd1c1 2513 self.to_screen('[info] Writing video description to: ' + descfn)
7b6fefc9
PH
2514 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
2515 descfile.write(info_dict['description'])
7b6fefc9 2516 except (OSError, IOError):
6febd1c1 2517 self.report_error('Cannot write description file ' + descfn)
7b6fefc9 2518 return
8222d8de 2519
1fb07d10 2520 if self.params.get('writeannotations', False):
de6000d9 2521 annofn = self.prepare_filename(info_dict, 'annotation')
e92caff5 2522 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 2523 return
0c3d0f51 2524 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 2525 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
2526 elif not info_dict.get('annotations'):
2527 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
2528 else:
2529 try:
6febd1c1 2530 self.to_screen('[info] Writing video annotations to: ' + annofn)
7b6fefc9
PH
2531 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2532 annofile.write(info_dict['annotations'])
2533 except (KeyError, TypeError):
6febd1c1 2534 self.report_warning('There are no annotations to write.')
7b6fefc9 2535 except (OSError, IOError):
6febd1c1 2536 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 2537 return
1fb07d10 2538
c4a91be7 2539 subtitles_are_requested = any([self.params.get('writesubtitles', False),
0b7f3118 2540 self.params.get('writeautomaticsub')])
c4a91be7 2541
c84dd8a9 2542 if subtitles_are_requested and info_dict.get('requested_subtitles'):
8222d8de
JMF
2543 # subtitles download errors are already managed as troubles in relevant IE
2544 # that way it will silently go on when used with unsupporting IE
c84dd8a9 2545 subtitles = info_dict['requested_subtitles']
fa57af1e 2546 # ie = self.get_info_extractor(info_dict['extractor_key'])
a504ced0
JMF
2547 for sub_lang, sub_info in subtitles.items():
2548 sub_format = sub_info['ext']
56d868db 2549 sub_filename = subtitles_filename(temp_filename, sub_lang, sub_format, info_dict.get('ext'))
2550 sub_filename_final = subtitles_filename(
2551 self.prepare_filename(info_dict, 'subtitle'), sub_lang, sub_format, info_dict.get('ext'))
0c3d0f51 2552 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(sub_filename)):
5ff1bc0c 2553 self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
dcf64d43 2554 sub_info['filepath'] = sub_filename
0202b52a 2555 files_to_move[sub_filename] = sub_filename_final
a504ced0 2556 else:
0c9df79e 2557 self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
5ff1bc0c
RA
2558 if sub_info.get('data') is not None:
2559 try:
2560 # Use newline='' to prevent conversion of newline characters
067aa17e 2561 # See https://github.com/ytdl-org/youtube-dl/issues/10268
5ff1bc0c
RA
2562 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
2563 subfile.write(sub_info['data'])
dcf64d43 2564 sub_info['filepath'] = sub_filename
0202b52a 2565 files_to_move[sub_filename] = sub_filename_final
5ff1bc0c
RA
2566 except (OSError, IOError):
2567 self.report_error('Cannot write subtitles file ' + sub_filename)
2568 return
7b6fefc9 2569 else:
5ff1bc0c 2570 try:
e8e73840 2571 self.dl(sub_filename, sub_info.copy(), subtitle=True)
dcf64d43 2572 sub_info['filepath'] = sub_filename
0202b52a 2573 files_to_move[sub_filename] = sub_filename_final
fe346461 2574 except (ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
5ff1bc0c
RA
2575 self.report_warning('Unable to download subtitle for "%s": %s' %
2576 (sub_lang, error_to_compat_str(err)))
2577 continue
8222d8de 2578
8222d8de 2579 if self.params.get('writeinfojson', False):
de6000d9 2580 infofn = self.prepare_filename(info_dict, 'infojson')
e92caff5 2581 if not self._ensure_dir_exists(encodeFilename(infofn)):
0202b52a 2582 return
0c3d0f51 2583 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
66c935fb 2584 self.to_screen('[info] Video metadata is already present')
7b6fefc9 2585 else:
66c935fb 2586 self.to_screen('[info] Writing video metadata as JSON to: ' + infofn)
7b6fefc9 2587 try:
8012d892 2588 write_json_file(self.sanitize_info(info_dict, self.params.get('clean_infojson', True)), infofn)
7b6fefc9 2589 except (OSError, IOError):
66c935fb 2590 self.report_error('Cannot write video metadata to JSON file ' + infofn)
7b6fefc9 2591 return
de6000d9 2592 info_dict['__infojson_filename'] = infofn
8222d8de 2593
56d868db 2594 for thumb_ext in self._write_thumbnails(info_dict, temp_filename):
2595 thumb_filename_temp = replace_extension(temp_filename, thumb_ext, info_dict.get('ext'))
2596 thumb_filename = replace_extension(
2597 self.prepare_filename(info_dict, 'thumbnail'), thumb_ext, info_dict.get('ext'))
dcf64d43 2598 files_to_move[thumb_filename_temp] = thumb_filename
8222d8de 2599
732044af 2600 # Write internet shortcut files
2601 url_link = webloc_link = desktop_link = False
2602 if self.params.get('writelink', False):
2603 if sys.platform == "darwin": # macOS.
2604 webloc_link = True
2605 elif sys.platform.startswith("linux"):
2606 desktop_link = True
2607 else: # if sys.platform in ['win32', 'cygwin']:
2608 url_link = True
2609 if self.params.get('writeurllink', False):
2610 url_link = True
2611 if self.params.get('writewebloclink', False):
2612 webloc_link = True
2613 if self.params.get('writedesktoplink', False):
2614 desktop_link = True
2615
2616 if url_link or webloc_link or desktop_link:
2617 if 'webpage_url' not in info_dict:
2618 self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
2619 return
2620 ascii_url = iri_to_uri(info_dict['webpage_url'])
2621
2622 def _write_link_file(extension, template, newline, embed_filename):
0202b52a 2623 linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
10e3742e 2624 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
732044af 2625 self.to_screen('[info] Internet shortcut is already present')
2626 else:
2627 try:
2628 self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
2629 with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
2630 template_vars = {'url': ascii_url}
2631 if embed_filename:
2632 template_vars['filename'] = linkfn[:-(len(extension) + 1)]
2633 linkfile.write(template % template_vars)
2634 except (OSError, IOError):
2635 self.report_error('Cannot write internet shortcut ' + linkfn)
2636 return False
2637 return True
2638
2639 if url_link:
2640 if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
2641 return
2642 if webloc_link:
2643 if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
2644 return
2645 if desktop_link:
2646 if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
2647 return
2648
56d868db 2649 try:
2650 info_dict, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
2651 except PostProcessingError as err:
2652 self.report_error('Preprocessing: %s' % str(err))
2653 return
2654
732044af 2655 must_record_download_archive = False
56d868db 2656 if self.params.get('skip_download', False):
2657 info_dict['filepath'] = temp_filename
2658 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2659 info_dict['__files_to_move'] = files_to_move
2660 info_dict = self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict)
2661 else:
2662 # Download
b868936c 2663 info_dict.setdefault('__postprocessors', [])
4340deca 2664 try:
0202b52a 2665
6b591b29 2666 def existing_file(*filepaths):
2667 ext = info_dict.get('ext')
2668 final_ext = self.params.get('final_ext', ext)
2669 existing_files = []
2670 for file in orderedSet(filepaths):
2671 if final_ext != ext:
2672 converted = replace_extension(file, final_ext, ext)
2673 if os.path.exists(encodeFilename(converted)):
2674 existing_files.append(converted)
2675 if os.path.exists(encodeFilename(file)):
2676 existing_files.append(file)
2677
2678 if not existing_files or self.params.get('overwrites', False):
2679 for file in orderedSet(existing_files):
2680 self.report_file_delete(file)
2681 os.remove(encodeFilename(file))
2682 return None
2683
6b591b29 2684 info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
2685 return existing_files[0]
0202b52a 2686
2687 success = True
4340deca 2688 if info_dict.get('requested_formats') is not None:
81cd954a
S
2689
2690 def compatible_formats(formats):
d03cfdce 2691 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
2692 video_formats = [format for format in formats if format.get('vcodec') != 'none']
2693 audio_formats = [format for format in formats if format.get('acodec') != 'none']
2694 if len(video_formats) > 2 or len(audio_formats) > 2:
2695 return False
2696
81cd954a 2697 # Check extension
d03cfdce 2698 exts = set(format.get('ext') for format in formats)
2699 COMPATIBLE_EXTS = (
2700 set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
2701 set(('webm',)),
2702 )
2703 for ext_sets in COMPATIBLE_EXTS:
2704 if ext_sets.issuperset(exts):
2705 return True
81cd954a
S
2706 # TODO: Check acodec/vcodec
2707 return False
2708
2709 requested_formats = info_dict['requested_formats']
0202b52a 2710 old_ext = info_dict['ext']
3b297919 2711 if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
2712 info_dict['ext'] = 'mkv'
2713 self.report_warning(
2714 'Requested formats are incompatible for merge and will be merged into mkv.')
124bc071 2715 new_ext = info_dict['ext']
0202b52a 2716
124bc071 2717 def correct_ext(filename, ext=new_ext):
96fccc10 2718 if filename == '-':
2719 return filename
0202b52a 2720 filename_real_ext = os.path.splitext(filename)[1][1:]
2721 filename_wo_ext = (
2722 os.path.splitext(filename)[0]
124bc071 2723 if filename_real_ext in (old_ext, new_ext)
0202b52a 2724 else filename)
124bc071 2725 return '%s.%s' % (filename_wo_ext, ext)
0202b52a 2726
38c6902b 2727 # Ensure filename always has a correct extension for successful merge
0202b52a 2728 full_filename = correct_ext(full_filename)
2729 temp_filename = correct_ext(temp_filename)
2730 dl_filename = existing_file(full_filename, temp_filename)
1ea24129 2731 info_dict['__real_download'] = False
18e674b4 2732
2733 _protocols = set(determine_protocol(f) for f in requested_formats)
dbf5416a 2734 if len(_protocols) == 1: # All requested formats have same protocol
18e674b4 2735 info_dict['protocol'] = _protocols.pop()
dbf5416a 2736 directly_mergable = FFmpegFD.can_merge_formats(info_dict)
2737 if dl_filename is not None:
6c7274ec 2738 self.report_file_already_downloaded(dl_filename)
96fccc10 2739 elif (directly_mergable and get_suitable_downloader(
a46a815b 2740 info_dict, self.params, to_stdout=(temp_filename == '-')) == FFmpegFD):
dbf5416a 2741 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
2742 success, real_download = self.dl(temp_filename, info_dict)
2743 info_dict['__real_download'] = real_download
18e674b4 2744 else:
2745 downloaded = []
2746 merger = FFmpegMergerPP(self)
2747 if self.params.get('allow_unplayable_formats'):
2748 self.report_warning(
2749 'You have requested merging of multiple formats '
2750 'while also allowing unplayable formats to be downloaded. '
2751 'The formats won\'t be merged to prevent data corruption.')
2752 elif not merger.available:
2753 self.report_warning(
2754 'You have requested merging of multiple formats but ffmpeg is not installed. '
2755 'The formats won\'t be merged.')
2756
96fccc10 2757 if temp_filename == '-':
2758 reason = ('using a downloader other than ffmpeg' if directly_mergable
2759 else 'but the formats are incompatible for simultaneous download' if merger.available
2760 else 'but ffmpeg is not installed')
2761 self.report_warning(
2762 f'You have requested downloading multiple formats to stdout {reason}. '
2763 'The formats will be streamed one after the other')
2764 fname = temp_filename
dbf5416a 2765 for f in requested_formats:
2766 new_info = dict(info_dict)
2767 del new_info['requested_formats']
2768 new_info.update(f)
96fccc10 2769 if temp_filename != '-':
124bc071 2770 fname = prepend_extension(
2771 correct_ext(temp_filename, new_info['ext']),
2772 'f%s' % f['format_id'], new_info['ext'])
96fccc10 2773 if not self._ensure_dir_exists(fname):
2774 return
2775 downloaded.append(fname)
dbf5416a 2776 partial_success, real_download = self.dl(fname, new_info)
2777 info_dict['__real_download'] = info_dict['__real_download'] or real_download
2778 success = success and partial_success
2779 if merger.available and not self.params.get('allow_unplayable_formats'):
2780 info_dict['__postprocessors'].append(merger)
2781 info_dict['__files_to_merge'] = downloaded
2782 # Even if there were no downloads, it is being merged only now
2783 info_dict['__real_download'] = True
2784 else:
2785 for file in downloaded:
2786 files_to_move[file] = None
4340deca
P
2787 else:
2788 # Just a single file
0202b52a 2789 dl_filename = existing_file(full_filename, temp_filename)
6c7274ec 2790 if dl_filename is None or dl_filename == temp_filename:
2791 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
2792 # So we should try to resume the download
e8e73840 2793 success, real_download = self.dl(temp_filename, info_dict)
0202b52a 2794 info_dict['__real_download'] = real_download
6c7274ec 2795 else:
2796 self.report_file_already_downloaded(dl_filename)
0202b52a 2797
0202b52a 2798 dl_filename = dl_filename or temp_filename
c571435f 2799 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 2800
3158150c 2801 except network_exceptions as err:
7960b056 2802 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca
P
2803 return
2804 except (OSError, IOError) as err:
2805 raise UnavailableVideoError(err)
2806 except (ContentTooShortError, ) as err:
2807 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
2808 return
8222d8de 2809
de6000d9 2810 if success and full_filename != '-':
f17f8651 2811
fd7cfb64 2812 def fixup():
2813 do_fixup = True
2814 fixup_policy = self.params.get('fixup')
2815 vid = info_dict['id']
2816
2817 if fixup_policy in ('ignore', 'never'):
2818 return
2819 elif fixup_policy == 'warn':
2820 do_fixup = False
f89b3e2d 2821 elif fixup_policy != 'force':
2822 assert fixup_policy in ('detect_or_warn', None)
2823 if not info_dict.get('__real_download'):
2824 do_fixup = False
fd7cfb64 2825
2826 def ffmpeg_fixup(cndn, msg, cls):
2827 if not cndn:
2828 return
2829 if not do_fixup:
2830 self.report_warning(f'{vid}: {msg}')
2831 return
2832 pp = cls(self)
2833 if pp.available:
2834 info_dict['__postprocessors'].append(pp)
2835 else:
2836 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
2837
2838 stretched_ratio = info_dict.get('stretched_ratio')
2839 ffmpeg_fixup(
2840 stretched_ratio not in (1, None),
2841 f'Non-uniform pixel ratio {stretched_ratio}',
2842 FFmpegFixupStretchedPP)
2843
2844 ffmpeg_fixup(
2845 (info_dict.get('requested_formats') is None
2846 and info_dict.get('container') == 'm4a_dash'
2847 and info_dict.get('ext') == 'm4a'),
2848 'writing DASH m4a. Only some players support this container',
2849 FFmpegFixupM4aPP)
2850
2851 downloader = (get_suitable_downloader(info_dict, self.params).__name__
2852 if 'protocol' in info_dict else None)
2853 ffmpeg_fixup(downloader == 'HlsFD', 'malformed AAC bitstream detected', FFmpegFixupM3u8PP)
e36d50c5 2854 ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed timestamps detected', FFmpegFixupTimestampPP)
2855 ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed duration detected', FFmpegFixupDurationPP)
fd7cfb64 2856
2857 fixup()
8222d8de 2858 try:
23c1a667 2859 info_dict = self.post_process(dl_filename, info_dict, files_to_move)
af819c21 2860 except PostProcessingError as err:
2861 self.report_error('Postprocessing: %s' % str(err))
8222d8de 2862 return
ab8e5e51
AM
2863 try:
2864 for ph in self._post_hooks:
23c1a667 2865 ph(info_dict['filepath'])
ab8e5e51
AM
2866 except Exception as err:
2867 self.report_error('post hooks: %s' % str(err))
2868 return
2d30509f 2869 must_record_download_archive = True
2870
2871 if must_record_download_archive or self.params.get('force_write_download_archive', False):
2872 self.record_download_archive(info_dict)
c3e6ffba 2873 max_downloads = self.params.get('max_downloads')
2874 if max_downloads is not None and self._num_downloads >= int(max_downloads):
2875 raise MaxDownloadsReached()
8222d8de
JMF
2876
2877 def download(self, url_list):
2878 """Download a given list of URLs."""
de6000d9 2879 outtmpl = self.outtmpl_dict['default']
3089bc74
S
2880 if (len(url_list) > 1
2881 and outtmpl != '-'
2882 and '%' not in outtmpl
2883 and self.params.get('max_downloads') != 1):
acd69589 2884 raise SameFileError(outtmpl)
8222d8de
JMF
2885
2886 for url in url_list:
2887 try:
5f6a1245 2888 # It also downloads the videos
61aa5ba3
S
2889 res = self.extract_info(
2890 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de 2891 except UnavailableVideoError:
6febd1c1 2892 self.report_error('unable to download video')
8222d8de 2893 except MaxDownloadsReached:
8f18aca8 2894 self.to_screen('[info] Maximum number of downloads reached')
8b0d7497 2895 raise
2896 except ExistingVideoReached:
8f18aca8 2897 self.to_screen('[info] Encountered a video that is already in the archive, stopping due to --break-on-existing')
8b0d7497 2898 raise
2899 except RejectedVideoReached:
8f18aca8 2900 self.to_screen('[info] Encountered a video that did not match filter, stopping due to --break-on-reject')
8222d8de 2901 raise
63e0be34
PH
2902 else:
2903 if self.params.get('dump_single_json', False):
277d6ff5 2904 self.post_extract(res)
6e84b215 2905 self.to_stdout(json.dumps(self.sanitize_info(res)))
8222d8de
JMF
2906
2907 return self._download_retcode
2908
1dcc4c0c 2909 def download_with_info_file(self, info_filename):
31bd3925
JMF
2910 with contextlib.closing(fileinput.FileInput(
2911 [info_filename], mode='r',
2912 openhook=fileinput.hook_encoded('utf-8'))) as f:
2913 # FileInput doesn't have a read method, we can't call json.load
8012d892 2914 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
d4943898
JMF
2915 try:
2916 self.process_ie_result(info, download=True)
d3f62c19 2917 except (DownloadError, EntryNotInPlaylist, ThrottledDownload):
d4943898
JMF
2918 webpage_url = info.get('webpage_url')
2919 if webpage_url is not None:
6febd1c1 2920 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
d4943898
JMF
2921 return self.download([webpage_url])
2922 else:
2923 raise
2924 return self._download_retcode
1dcc4c0c 2925
cb202fd2 2926 @staticmethod
8012d892 2927 def sanitize_info(info_dict, remove_private_keys=False):
2928 ''' Sanitize the infodict for converting to json '''
3ad56b42 2929 if info_dict is None:
2930 return info_dict
6e84b215 2931 info_dict.setdefault('epoch', int(time.time()))
2932 remove_keys = {'__original_infodict'} # Always remove this since this may contain a copy of the entire dict
ae8f99e6 2933 keep_keys = ['_type'], # Always keep this to facilitate load-info-json
8012d892 2934 if remove_private_keys:
6e84b215 2935 remove_keys |= {
2936 'requested_formats', 'requested_subtitles', 'requested_entries',
2937 'filepath', 'entries', 'original_url', 'playlist_autonumber',
2938 }
ae8f99e6 2939 empty_values = (None, {}, [], set(), tuple())
2940 reject = lambda k, v: k not in keep_keys and (
2941 k.startswith('_') or k in remove_keys or v in empty_values)
2942 else:
ae8f99e6 2943 reject = lambda k, v: k in remove_keys
5226731e 2944 filter_fn = lambda obj: (
b0249bca 2945 list(map(filter_fn, obj)) if isinstance(obj, (LazyList, list, tuple, set))
a515a78d 2946 else obj if not isinstance(obj, dict)
ae8f99e6 2947 else dict((k, filter_fn(v)) for k, v in obj.items() if not reject(k, v)))
5226731e 2948 return filter_fn(info_dict)
cb202fd2 2949
8012d892 2950 @staticmethod
2951 def filter_requested_info(info_dict, actually_filter=True):
2952 ''' Alias of sanitize_info for backward compatibility '''
2953 return YoutubeDL.sanitize_info(info_dict, actually_filter)
2954
dcf64d43 2955 def run_pp(self, pp, infodict):
5bfa4862 2956 files_to_delete = []
dcf64d43 2957 if '__files_to_move' not in infodict:
2958 infodict['__files_to_move'] = {}
af819c21 2959 files_to_delete, infodict = pp.run(infodict)
5bfa4862 2960 if not files_to_delete:
dcf64d43 2961 return infodict
5bfa4862 2962
2963 if self.params.get('keepvideo', False):
2964 for f in files_to_delete:
dcf64d43 2965 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 2966 else:
2967 for old_filename in set(files_to_delete):
2968 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2969 try:
2970 os.remove(encodeFilename(old_filename))
2971 except (IOError, OSError):
2972 self.report_warning('Unable to remove downloaded original file')
dcf64d43 2973 if old_filename in infodict['__files_to_move']:
2974 del infodict['__files_to_move'][old_filename]
2975 return infodict
5bfa4862 2976
277d6ff5 2977 @staticmethod
2978 def post_extract(info_dict):
2979 def actual_post_extract(info_dict):
2980 if info_dict.get('_type') in ('playlist', 'multi_video'):
2981 for video_dict in info_dict.get('entries', {}):
b050d210 2982 actual_post_extract(video_dict or {})
277d6ff5 2983 return
2984
07cce701 2985 post_extractor = info_dict.get('__post_extractor') or (lambda: {})
4ec82a72 2986 extra = post_extractor().items()
2987 info_dict.update(extra)
07cce701 2988 info_dict.pop('__post_extractor', None)
277d6ff5 2989
4ec82a72 2990 original_infodict = info_dict.get('__original_infodict') or {}
2991 original_infodict.update(extra)
2992 original_infodict.pop('__post_extractor', None)
2993
b050d210 2994 actual_post_extract(info_dict or {})
277d6ff5 2995
56d868db 2996 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
5bfa4862 2997 info = dict(ie_info)
56d868db 2998 info['__files_to_move'] = files_to_move or {}
2999 for pp in self._pps[key]:
dcf64d43 3000 info = self.run_pp(pp, info)
56d868db 3001 return info, info.pop('__files_to_move', None)
5bfa4862 3002
dcf64d43 3003 def post_process(self, filename, ie_info, files_to_move=None):
8222d8de
JMF
3004 """Run all the postprocessors on the given file."""
3005 info = dict(ie_info)
3006 info['filepath'] = filename
dcf64d43 3007 info['__files_to_move'] = files_to_move or {}
0202b52a 3008
56d868db 3009 for pp in ie_info.get('__postprocessors', []) + self._pps['post_process']:
dcf64d43 3010 info = self.run_pp(pp, info)
3011 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
3012 del info['__files_to_move']
56d868db 3013 for pp in self._pps['after_move']:
dcf64d43 3014 info = self.run_pp(pp, info)
23c1a667 3015 return info
c1c9a79c 3016
5db07df6 3017 def _make_archive_id(self, info_dict):
e9fef7ee
S
3018 video_id = info_dict.get('id')
3019 if not video_id:
3020 return
5db07df6
PH
3021 # Future-proof against any change in case
3022 # and backwards compatibility with prior versions
e9fef7ee 3023 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 3024 if extractor is None:
1211bb6d
S
3025 url = str_or_none(info_dict.get('url'))
3026 if not url:
3027 return
e9fef7ee
S
3028 # Try to find matching extractor for the URL and take its ie_key
3029 for ie in self._ies:
1211bb6d 3030 if ie.suitable(url):
e9fef7ee
S
3031 extractor = ie.ie_key()
3032 break
3033 else:
3034 return
d0757229 3035 return '%s %s' % (extractor.lower(), video_id)
5db07df6
PH
3036
3037 def in_download_archive(self, info_dict):
3038 fn = self.params.get('download_archive')
3039 if fn is None:
3040 return False
3041
3042 vid_id = self._make_archive_id(info_dict)
e9fef7ee 3043 if not vid_id:
7012b23c 3044 return False # Incomplete video information
5db07df6 3045
a45e8619 3046 return vid_id in self.archive
c1c9a79c
PH
3047
3048 def record_download_archive(self, info_dict):
3049 fn = self.params.get('download_archive')
3050 if fn is None:
3051 return
5db07df6
PH
3052 vid_id = self._make_archive_id(info_dict)
3053 assert vid_id
c1c9a79c 3054 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
6febd1c1 3055 archive_file.write(vid_id + '\n')
a45e8619 3056 self.archive.add(vid_id)
dd82ffea 3057
8c51aa65 3058 @staticmethod
8abeeb94 3059 def format_resolution(format, default='unknown'):
fb04e403 3060 if format.get('vcodec') == 'none':
8326b00a 3061 if format.get('acodec') == 'none':
3062 return 'images'
fb04e403 3063 return 'audio only'
f49d89ee
PH
3064 if format.get('resolution') is not None:
3065 return format['resolution']
35615307
DA
3066 if format.get('width') and format.get('height'):
3067 res = '%dx%d' % (format['width'], format['height'])
3068 elif format.get('height'):
3069 res = '%sp' % format['height']
3070 elif format.get('width'):
388ae76b 3071 res = '%dx?' % format['width']
8c51aa65 3072 else:
8abeeb94 3073 res = default
8c51aa65
JMF
3074 return res
3075
c57f7757
PH
3076 def _format_note(self, fdict):
3077 res = ''
3078 if fdict.get('ext') in ['f4f', 'f4m']:
3079 res += '(unsupported) '
32f90364
PH
3080 if fdict.get('language'):
3081 if res:
3082 res += ' '
9016d76f 3083 res += '[%s] ' % fdict['language']
c57f7757
PH
3084 if fdict.get('format_note') is not None:
3085 res += fdict['format_note'] + ' '
3086 if fdict.get('tbr') is not None:
3087 res += '%4dk ' % fdict['tbr']
3088 if fdict.get('container') is not None:
3089 if res:
3090 res += ', '
3091 res += '%s container' % fdict['container']
3089bc74
S
3092 if (fdict.get('vcodec') is not None
3093 and fdict.get('vcodec') != 'none'):
c57f7757
PH
3094 if res:
3095 res += ', '
3096 res += fdict['vcodec']
91c7271a 3097 if fdict.get('vbr') is not None:
c57f7757
PH
3098 res += '@'
3099 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3100 res += 'video@'
3101 if fdict.get('vbr') is not None:
3102 res += '%4dk' % fdict['vbr']
fbb21cf5 3103 if fdict.get('fps') is not None:
5d583bdf
S
3104 if res:
3105 res += ', '
3106 res += '%sfps' % fdict['fps']
c57f7757
PH
3107 if fdict.get('acodec') is not None:
3108 if res:
3109 res += ', '
3110 if fdict['acodec'] == 'none':
3111 res += 'video only'
3112 else:
3113 res += '%-5s' % fdict['acodec']
3114 elif fdict.get('abr') is not None:
3115 if res:
3116 res += ', '
3117 res += 'audio'
3118 if fdict.get('abr') is not None:
3119 res += '@%3dk' % fdict['abr']
3120 if fdict.get('asr') is not None:
3121 res += ' (%5dHz)' % fdict['asr']
3122 if fdict.get('filesize') is not None:
3123 if res:
3124 res += ', '
3125 res += format_bytes(fdict['filesize'])
9732d77e
PH
3126 elif fdict.get('filesize_approx') is not None:
3127 if res:
3128 res += ', '
3129 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 3130 return res
91c7271a 3131
c57f7757 3132 def list_formats(self, info_dict):
94badb25 3133 formats = info_dict.get('formats', [info_dict])
53ed7066 3134 new_format = (
3135 'list-formats' not in self.params.get('compat_opts', [])
169dbde9 3136 and self.params.get('listformats_table', True) is not False)
76d321f6 3137 if new_format:
3138 table = [
3139 [
3140 format_field(f, 'format_id'),
3141 format_field(f, 'ext'),
3142 self.format_resolution(f),
3143 format_field(f, 'fps', '%d'),
3144 '|',
3145 format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
3146 format_field(f, 'tbr', '%4dk'),
52a8a1e1 3147 shorten_protocol_name(f.get('protocol', '').replace("native", "n")),
76d321f6 3148 '|',
3149 format_field(f, 'vcodec', default='unknown').replace('none', ''),
3150 format_field(f, 'vbr', '%4dk'),
3151 format_field(f, 'acodec', default='unknown').replace('none', ''),
3152 format_field(f, 'abr', '%3dk'),
3153 format_field(f, 'asr', '%5dHz'),
3f698246 3154 ', '.join(filter(None, (
3155 'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
3156 format_field(f, 'language', '[%s]'),
3157 format_field(f, 'format_note'),
3158 format_field(f, 'container', ignore=(None, f.get('ext'))),
ea05b302 3159 ))),
3f698246 3160 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
76d321f6 3161 header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', ' TBR', 'PROTO',
3f698246 3162 '|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'MORE INFO']
76d321f6 3163 else:
3164 table = [
3165 [
3166 format_field(f, 'format_id'),
3167 format_field(f, 'ext'),
3168 self.format_resolution(f),
3169 self._format_note(f)]
3170 for f in formats
3171 if f.get('preference') is None or f['preference'] >= -1000]
3172 header_line = ['format code', 'extension', 'resolution', 'note']
57dd9a8f 3173
cfb56d1a 3174 self.to_screen(
169dbde9 3175 '[info] Available formats for %s:' % info_dict['id'])
3176 self.to_stdout(render_table(
bc97cdae 3177 header_line, table, delim=new_format, extraGap=(0 if new_format else 1), hideEmpty=new_format))
cfb56d1a
PH
3178
3179 def list_thumbnails(self, info_dict):
b0249bca 3180 thumbnails = list(info_dict.get('thumbnails'))
cfb56d1a 3181 if not thumbnails:
b7b72db9 3182 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
3183 return
cfb56d1a
PH
3184
3185 self.to_screen(
3186 '[info] Thumbnails for %s:' % info_dict['id'])
169dbde9 3187 self.to_stdout(render_table(
cfb56d1a
PH
3188 ['ID', 'width', 'height', 'URL'],
3189 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
dca08720 3190
360e1ca5 3191 def list_subtitles(self, video_id, subtitles, name='subtitles'):
a504ced0 3192 if not subtitles:
360e1ca5 3193 self.to_screen('%s has no %s' % (video_id, name))
a504ced0 3194 return
a504ced0 3195 self.to_screen(
edab9dbf 3196 'Available %s for %s:' % (name, video_id))
2412044c 3197
3198 def _row(lang, formats):
49c258e1 3199 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
2412044c 3200 if len(set(names)) == 1:
7aee40c1 3201 names = [] if names[0] == 'unknown' else names[:1]
2412044c 3202 return [lang, ', '.join(names), ', '.join(exts)]
3203
169dbde9 3204 self.to_stdout(render_table(
2412044c 3205 ['Language', 'Name', 'Formats'],
3206 [_row(lang, formats) for lang, formats in subtitles.items()],
3207 hideEmpty=True))
a504ced0 3208
dca08720
PH
3209 def urlopen(self, req):
3210 """ Start an HTTP download """
82d8a8b6 3211 if isinstance(req, compat_basestring):
67dda517 3212 req = sanitized_Request(req)
19a41fc6 3213 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
3214
3215 def print_debug_header(self):
3216 if not self.params.get('verbose'):
3217 return
62fec3b2 3218
c6afed48
PH
3219 stdout_encoding = getattr(
3220 sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
b0472057 3221 encoding_str = (
734f90bb
PH
3222 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
3223 locale.getpreferredencoding(),
3224 sys.getfilesystemencoding(),
c6afed48 3225 stdout_encoding,
b0472057 3226 self.get_encoding()))
4192b51c 3227 write_string(encoding_str, encoding=None)
734f90bb 3228
e5813e53 3229 source = (
3230 '(exe)' if hasattr(sys, 'frozen')
3231 else '(zip)' if isinstance(globals().get('__loader__'), zipimporter)
3232 else '(source)' if os.path.basename(sys.argv[0]) == '__main__.py'
3233 else '')
3234 self._write_string('[debug] yt-dlp version %s %s\n' % (__version__, source))
e0986e31 3235 if _LAZY_LOADER:
f74980cb 3236 self._write_string('[debug] Lazy loading extractors enabled\n')
3237 if _PLUGIN_CLASSES:
3238 self._write_string(
3239 '[debug] Plugin Extractors: %s\n' % [ie.ie_key() for ie in _PLUGIN_CLASSES])
53ed7066 3240 if self.params.get('compat_opts'):
3241 self._write_string(
3242 '[debug] Compatibility options: %s\n' % ', '.join(self.params.get('compat_opts')))
dca08720
PH
3243 try:
3244 sp = subprocess.Popen(
3245 ['git', 'rev-parse', '--short', 'HEAD'],
3246 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3247 cwd=os.path.dirname(os.path.abspath(__file__)))
f5b1bca9 3248 out, err = process_communicate_or_kill(sp)
dca08720
PH
3249 out = out.decode().strip()
3250 if re.match('[0-9a-f]+', out):
f74980cb 3251 self._write_string('[debug] Git HEAD: %s\n' % out)
70a1165b 3252 except Exception:
dca08720
PH
3253 try:
3254 sys.exc_clear()
70a1165b 3255 except Exception:
dca08720 3256 pass
b300cda4
S
3257
3258 def python_implementation():
3259 impl_name = platform.python_implementation()
3260 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
3261 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
3262 return impl_name
3263
e5813e53 3264 self._write_string('[debug] Python version %s (%s %s) - %s\n' % (
3265 platform.python_version(),
3266 python_implementation(),
3267 platform.architecture()[0],
b300cda4 3268 platform_name()))
d28b5171 3269
73fac4e9 3270 exe_versions = FFmpegPostProcessor.get_versions(self)
4c83c967 3271 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 3272 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171 3273 exe_str = ', '.join(
2831b468 3274 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3275 ) or 'none'
d28b5171 3276 self._write_string('[debug] exe versions: %s\n' % exe_str)
dca08720 3277
2831b468 3278 from .downloader.fragment import can_decrypt_frag
3279 from .downloader.websocket import has_websockets
3280 from .postprocessor.embedthumbnail import has_mutagen
3281 from .cookies import SQLITE_AVAILABLE, KEYRING_AVAILABLE
3282
ad3dc496 3283 lib_str = ', '.join(sorted(filter(None, (
2831b468 3284 can_decrypt_frag and 'pycryptodome',
3285 has_websockets and 'websockets',
3286 has_mutagen and 'mutagen',
3287 SQLITE_AVAILABLE and 'sqlite',
3288 KEYRING_AVAILABLE and 'keyring',
ad3dc496 3289 )))) or 'none'
2831b468 3290 self._write_string('[debug] Optional libraries: %s\n' % lib_str)
3291
dca08720
PH
3292 proxy_map = {}
3293 for handler in self._opener.handlers:
3294 if hasattr(handler, 'proxies'):
3295 proxy_map.update(handler.proxies)
734f90bb 3296 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
dca08720 3297
58b1f00d
PH
3298 if self.params.get('call_home', False):
3299 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
3300 self._write_string('[debug] Public IP address: %s\n' % ipaddr)
f5546c0b 3301 return
58b1f00d
PH
3302 latest_version = self.urlopen(
3303 'https://yt-dl.org/latest/version').read().decode('utf-8')
3304 if version_tuple(latest_version) > version_tuple(__version__):
3305 self.report_warning(
3306 'You are using an outdated version (newest version: %s)! '
3307 'See https://yt-dl.org/update if you need help updating.' %
3308 latest_version)
3309
e344693b 3310 def _setup_opener(self):
6ad14cab 3311 timeout_val = self.params.get('socket_timeout')
19a41fc6 3312 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
6ad14cab 3313
982ee69a 3314 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
dca08720
PH
3315 opts_cookiefile = self.params.get('cookiefile')
3316 opts_proxy = self.params.get('proxy')
3317
982ee69a 3318 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
dca08720 3319
6a3f4c3f 3320 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
3321 if opts_proxy is not None:
3322 if opts_proxy == '':
3323 proxies = {}
3324 else:
3325 proxies = {'http': opts_proxy, 'https': opts_proxy}
3326 else:
3327 proxies = compat_urllib_request.getproxies()
067aa17e 3328 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
3329 if 'http' in proxies and 'https' not in proxies:
3330 proxies['https'] = proxies['http']
91410c9b 3331 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
3332
3333 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
3334 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3335 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 3336 redirect_handler = YoutubeDLRedirectHandler()
8b172c2e 3337 data_handler = compat_urllib_request_DataHandler()
6240b0a2
JMF
3338
3339 # When passing our own FileHandler instance, build_opener won't add the
3340 # default FileHandler and allows us to disable the file protocol, which
3341 # can be used for malicious purposes (see
067aa17e 3342 # https://github.com/ytdl-org/youtube-dl/issues/8227)
6240b0a2
JMF
3343 file_handler = compat_urllib_request.FileHandler()
3344
3345 def file_open(*args, **kwargs):
7a5c1cfe 3346 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
6240b0a2
JMF
3347 file_handler.file_open = file_open
3348
3349 opener = compat_urllib_request.build_opener(
fca6dba8 3350 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 3351
dca08720
PH
3352 # Delete the default user-agent header, which would otherwise apply in
3353 # cases where our custom HTTP handler doesn't come into play
067aa17e 3354 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
3355 opener.addheaders = []
3356 self._opener = opener
62fec3b2
PH
3357
3358 def encode(self, s):
3359 if isinstance(s, bytes):
3360 return s # Already encoded
3361
3362 try:
3363 return s.encode(self.get_encoding())
3364 except UnicodeEncodeError as err:
3365 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3366 raise
3367
3368 def get_encoding(self):
3369 encoding = self.params.get('encoding')
3370 if encoding is None:
3371 encoding = preferredencoding()
3372 return encoding
ec82d85a 3373
de6000d9 3374 def _write_thumbnails(self, info_dict, filename): # return the extensions
6c4fd172 3375 write_all = self.params.get('write_all_thumbnails', False)
3376 thumbnails = []
3377 if write_all or self.params.get('writethumbnail', False):
0202b52a 3378 thumbnails = info_dict.get('thumbnails') or []
6c4fd172 3379 multiple = write_all and len(thumbnails) > 1
ec82d85a 3380
0202b52a 3381 ret = []
981052c9 3382 for t in thumbnails[::-1]:
ec82d85a 3383 thumb_ext = determine_ext(t['url'], 'jpg')
6c4fd172 3384 suffix = '%s.' % t['id'] if multiple else ''
3385 thumb_display_id = '%s ' % t['id'] if multiple else ''
885cc0b7 3386 thumb_filename = replace_extension(filename, suffix + thumb_ext, info_dict.get('ext'))
ec82d85a 3387
0c3d0f51 3388 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(thumb_filename)):
de6000d9 3389 ret.append(suffix + thumb_ext)
8ba87148 3390 t['filepath'] = thumb_filename
ec82d85a
PH
3391 self.to_screen('[%s] %s: Thumbnail %sis already present' %
3392 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3393 else:
5ef7d9bd 3394 self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
ec82d85a
PH
3395 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3396 try:
3397 uf = self.urlopen(t['url'])
d3d89c32 3398 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 3399 shutil.copyfileobj(uf, thumbf)
de6000d9 3400 ret.append(suffix + thumb_ext)
ec82d85a
PH
3401 self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
3402 (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
885cc0b7 3403 t['filepath'] = thumb_filename
3158150c 3404 except network_exceptions as err:
ec82d85a 3405 self.report_warning('Unable to download thumbnail "%s": %s' %
9b9c5355 3406 (t['url'], error_to_compat_str(err)))
6c4fd172 3407 if ret and not write_all:
3408 break
0202b52a 3409 return ret