]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
Let `--match-filter` reject entries early
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
cc52de43 1#!/usr/bin/env python3
dcdb292f 2# coding: utf-8
8222d8de 3
6febd1c1 4from __future__ import absolute_import, unicode_literals
8222d8de 5
26e63931 6import collections
31bd3925 7import contextlib
317f7ab6 8import copy
9d2ecdbc 9import datetime
c1c9a79c 10import errno
31bd3925 11import fileinput
8222d8de 12import io
b82f815f 13import itertools
8694c600 14import json
62fec3b2 15import locale
083c9df9 16import operator
8222d8de 17import os
dca08720 18import platform
8222d8de
JMF
19import re
20import shutil
dca08720 21import subprocess
8222d8de 22import sys
21cd8fae 23import tempfile
8222d8de 24import time
67134eab 25import tokenize
8222d8de 26import traceback
75822ca7 27import random
8222d8de 28
961ea474 29from string import ascii_letters
e5813e53 30from zipimport import zipimporter
961ea474 31
8c25f81b 32from .compat import (
82d8a8b6 33 compat_basestring,
003c69a8 34 compat_get_terminal_size,
4f026faf 35 compat_kwargs,
d0d9ade4 36 compat_numeric_types,
e9c0cdd3 37 compat_os_name,
7d1eb38a 38 compat_shlex_quote,
ce02ed60 39 compat_str,
67134eab 40 compat_tokenize_tokenize,
ce02ed60
PH
41 compat_urllib_error,
42 compat_urllib_request,
8b172c2e 43 compat_urllib_request_DataHandler,
8c25f81b 44)
982ee69a 45from .cookies import load_cookies
8c25f81b 46from .utils import (
eedb7ba5
S
47 age_restricted,
48 args_to_str,
ce02ed60
PH
49 ContentTooShortError,
50 date_from_str,
51 DateRange,
acd69589 52 DEFAULT_OUTTMPL,
ce02ed60 53 determine_ext,
b5559424 54 determine_protocol,
732044af 55 DOT_DESKTOP_LINK_TEMPLATE,
56 DOT_URL_LINK_TEMPLATE,
57 DOT_WEBLOC_LINK_TEMPLATE,
ce02ed60 58 DownloadError,
c0384f22 59 encode_compat_str,
ce02ed60 60 encodeFilename,
498f5606 61 EntryNotInPlaylist,
a06916d9 62 error_to_compat_str,
8b0d7497 63 ExistingVideoReached,
590bc6f6 64 expand_path,
ce02ed60 65 ExtractorError,
e29663c6 66 float_or_none,
02dbf93f 67 format_bytes,
76d321f6 68 format_field,
901130bb 69 STR_FORMAT_RE_TMPL,
70 STR_FORMAT_TYPES,
525ef922 71 formatSeconds,
773f291d 72 GeoRestrictedError,
b0249bca 73 HEADRequest,
c9969434 74 int_or_none,
732044af 75 iri_to_uri,
773f291d 76 ISO3166Utils,
56a8fb4f 77 LazyList,
ce02ed60 78 locked_file,
0202b52a 79 make_dir,
dca08720 80 make_HTTPS_handler,
ce02ed60 81 MaxDownloadsReached,
3158150c 82 network_exceptions,
cd6fc19e 83 orderedSet,
a06916d9 84 OUTTMPL_TYPES,
b7ab0590 85 PagedList,
083c9df9 86 parse_filesize,
91410c9b 87 PerRequestProxyHandler,
dca08720 88 platform_name,
eedb7ba5 89 PostProcessingError,
ce02ed60 90 preferredencoding,
eedb7ba5 91 prepend_extension,
a06916d9 92 process_communicate_or_kill,
51fb4995 93 register_socks_protocols,
a06916d9 94 RejectedVideoReached,
cfb56d1a 95 render_table,
eedb7ba5 96 replace_extension,
ce02ed60
PH
97 SameFileError,
98 sanitize_filename,
1bb5c511 99 sanitize_path,
dcf77cf1 100 sanitize_url,
67dda517 101 sanitized_Request,
e5660ee6 102 std_headers,
1211bb6d 103 str_or_none,
e29663c6 104 strftime_or_none,
ce02ed60 105 subtitles_filename,
51d9739f 106 ThrottledDownload,
732044af 107 to_high_limit_path,
324ad820 108 traverse_obj,
6033d980 109 try_get,
ce02ed60 110 UnavailableVideoError,
29eb5174 111 url_basename,
7d1eb38a 112 variadic,
58b1f00d 113 version_tuple,
ce02ed60
PH
114 write_json_file,
115 write_string,
6a3f4c3f 116 YoutubeDLCookieProcessor,
dca08720 117 YoutubeDLHandler,
fca6dba8 118 YoutubeDLRedirectHandler,
ce02ed60 119)
a0e07d31 120from .cache import Cache
52a8a1e1 121from .extractor import (
122 gen_extractor_classes,
123 get_info_extractor,
124 _LAZY_LOADER,
125 _PLUGIN_CLASSES
126)
4c54b89e 127from .extractor.openload import PhantomJSwrapper
52a8a1e1 128from .downloader import (
dbf5416a 129 FFmpegFD,
52a8a1e1 130 get_suitable_downloader,
131 shorten_protocol_name
132)
4c83c967 133from .downloader.rtmp import rtmpdump_version
4f026faf 134from .postprocessor import (
e36d50c5 135 get_postprocessor,
136 FFmpegFixupDurationPP,
f17f8651 137 FFmpegFixupM3u8PP,
62cd676c 138 FFmpegFixupM4aPP,
6271f1ca 139 FFmpegFixupStretchedPP,
e36d50c5 140 FFmpegFixupTimestampPP,
4f026faf
PH
141 FFmpegMergerPP,
142 FFmpegPostProcessor,
0202b52a 143 MoveFilesAfterDownloadPP,
4f026faf 144)
dca08720 145from .version import __version__
8222d8de 146
e9c0cdd3
YCH
147if compat_os_name == 'nt':
148 import ctypes
149
2459b6e1 150
8222d8de
JMF
151class YoutubeDL(object):
152 """YoutubeDL class.
153
154 YoutubeDL objects are the ones responsible of downloading the
155 actual video file and writing it to disk if the user has requested
156 it, among some other tasks. In most cases there should be one per
157 program. As, given a video URL, the downloader doesn't know how to
158 extract all the needed information, task that InfoExtractors do, it
159 has to pass the URL to one of them.
160
161 For this, YoutubeDL objects have a method that allows
162 InfoExtractors to be registered in a given order. When it is passed
163 a URL, the YoutubeDL object handles it to the first InfoExtractor it
164 finds that reports being able to handle it. The InfoExtractor extracts
165 all the information about the video or videos the URL refers to, and
166 YoutubeDL process the extracted information, possibly using a File
167 Downloader to download the video.
168
169 YoutubeDL objects accept a lot of parameters. In order not to saturate
170 the object constructor with arguments, it receives a dictionary of
171 options instead. These options are available through the params
172 attribute for the InfoExtractors to use. The YoutubeDL also
173 registers itself as the downloader in charge for the InfoExtractors
174 that are added to it, so this is a "mutual registration".
175
176 Available options:
177
178 username: Username for authentication purposes.
179 password: Password for authentication purposes.
180940e0 180 videopassword: Password for accessing a video.
1da50aa3
S
181 ap_mso: Adobe Pass multiple-system operator identifier.
182 ap_username: Multiple-system operator account username.
183 ap_password: Multiple-system operator account password.
8222d8de
JMF
184 usenetrc: Use netrc for authentication instead.
185 verbose: Print additional info to stdout.
186 quiet: Do not print messages to stdout.
ad8915b7 187 no_warnings: Do not print out anything for warnings.
53c18592 188 forceprint: A list of templates to force print
189 forceurl: Force printing final URL. (Deprecated)
190 forcetitle: Force printing title. (Deprecated)
191 forceid: Force printing ID. (Deprecated)
192 forcethumbnail: Force printing thumbnail URL. (Deprecated)
193 forcedescription: Force printing description. (Deprecated)
194 forcefilename: Force printing final filename. (Deprecated)
195 forceduration: Force printing duration. (Deprecated)
8694c600 196 forcejson: Force printing info_dict as JSON.
63e0be34
PH
197 dump_single_json: Force printing the info_dict of the whole playlist
198 (or video) as a single JSON line.
c25228e5 199 force_write_download_archive: Force writing download archive regardless
200 of 'skip_download' or 'simulate'.
b7b04c78 201 simulate: Do not download the video files. If unset (or None),
202 simulate only if listsubtitles, listformats or list_thumbnails is used
eb8a4433 203 format: Video format code. see "FORMAT SELECTION" for more details.
63ad4d43 204 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
b7da73eb 205 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
206 extracting metadata even if the video is not actually
207 available for download (experimental)
c25228e5 208 format_sort: How to sort the video formats. see "Sorting Formats"
209 for more details.
210 format_sort_force: Force the given format_sort. see "Sorting Formats"
211 for more details.
212 allow_multiple_video_streams: Allow multiple video streams to be merged
213 into a single file
214 allow_multiple_audio_streams: Allow multiple audio streams to be merged
215 into a single file
0ba692ac 216 check_formats Whether to test if the formats are downloadable.
217 Can be True (check all), False (check none)
218 or None (check only if requested by extractor)
4524baf0 219 paths: Dictionary of output paths. The allowed keys are 'home'
220 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 221 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 222 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
34488702 223 For compatibility with youtube-dl, a single string can also be used
a820dc72
RA
224 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
225 restrictfilenames: Do not allow "&" and spaces in file names
226 trim_file_name: Limit length of filename (extension excluded)
4524baf0 227 windowsfilenames: Force the filenames to be windows compatible
a820dc72 228 ignoreerrors: Do not stop on download errors
7a5c1cfe 229 (Default True when running yt-dlp,
a820dc72 230 but False when directly accessing YoutubeDL class)
26e2805c 231 skip_playlist_after_errors: Number of allowed failures until the rest of
232 the playlist is skipped
d22dec74 233 force_generic_extractor: Force downloader to use the generic extractor
0c3d0f51 234 overwrites: Overwrite all video and metadata files if True,
235 overwrite only non-video files if None
236 and don't overwrite any file if False
34488702 237 For compatibility with youtube-dl,
238 "nooverwrites" may also be used instead
8222d8de
JMF
239 playliststart: Playlist item to start at.
240 playlistend: Playlist item to end at.
c14e88f0 241 playlist_items: Specific indices of playlist to download.
ff815fe6 242 playlistreverse: Download playlist items in reverse order.
75822ca7 243 playlistrandom: Download playlist items in random order.
8222d8de
JMF
244 matchtitle: Download only matching titles.
245 rejecttitle: Reject downloads for matching titles.
8bf9319e 246 logger: Log messages to a logging.Logger instance.
8222d8de
JMF
247 logtostderr: Log messages to stderr instead of stdout.
248 writedescription: Write the video description to a .description file
249 writeinfojson: Write the video description to a .info.json file
75d43ca0 250 clean_infojson: Remove private fields from the infojson
34488702 251 getcomments: Extract video comments. This will not be written to disk
06167fbb 252 unless writeinfojson is also given
1fb07d10 253 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 254 writethumbnail: Write the thumbnail image to a file
c25228e5 255 allow_playlist_files: Whether to write playlists' description, infojson etc
256 also to disk when using the 'write*' options
ec82d85a 257 write_all_thumbnails: Write all thumbnail formats to files
732044af 258 writelink: Write an internet shortcut file, depending on the
259 current platform (.url/.webloc/.desktop)
260 writeurllink: Write a Windows internet shortcut file (.url)
261 writewebloclink: Write a macOS internet shortcut file (.webloc)
262 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 263 writesubtitles: Write the video subtitles to a file
741dd8ea 264 writeautomaticsub: Write the automatically generated subtitles to a file
245524e6 265 allsubtitles: Deprecated - Use subtitleslangs = ['all']
c32b0aab 266 Downloads all the subtitles of the video
0b7f3118 267 (requires writesubtitles or writeautomaticsub)
8222d8de 268 listsubtitles: Lists all available subtitles for the video
a504ced0 269 subtitlesformat: The format code for subtitles
c32b0aab 270 subtitleslangs: List of languages of the subtitles to download (can be regex).
271 The list may contain "all" to refer to all the available
272 subtitles. The language can be prefixed with a "-" to
273 exclude it from the requested languages. Eg: ['all', '-live_chat']
8222d8de
JMF
274 keepvideo: Keep the video file after post-processing
275 daterange: A DateRange object, download only if the upload_date is in the range.
276 skip_download: Skip the actual download of the video file
c35f9e72 277 cachedir: Location of the cache files in the filesystem.
a0e07d31 278 False to disable filesystem cache.
47192f92 279 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
280 age_limit: An integer representing the user's age in years.
281 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
282 min_views: An integer representing the minimum view count the video
283 must have in order to not be skipped.
284 Videos without view count information are always
285 downloaded. None for no limit.
286 max_views: An integer representing the maximum view count.
287 Videos that are more popular than that are not
288 downloaded.
289 Videos without view count information are always
290 downloaded. None for no limit.
291 download_archive: File name of a file where all downloads are recorded.
c1c9a79c
PH
292 Videos already present in the file are not downloaded
293 again.
8a51f564 294 break_on_existing: Stop the download process after attempting to download a
295 file that is in the archive.
296 break_on_reject: Stop the download process when encountering a video that
297 has been filtered out.
298 cookiefile: File name where cookies should be read from and dumped to
982ee69a
MB
299 cookiesfrombrowser: A tuple containing the name of the browser and the profile
300 name/path from where cookies are loaded.
301 Eg: ('chrome', ) or (vivaldi, 'default')
a1ee09e8 302 nocheckcertificate:Do not verify SSL certificates
7e8c0af0
PH
303 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
304 At the moment, this is only supported by YouTube.
a1ee09e8 305 proxy: URL of the proxy server to use
38cce791 306 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 307 on geo-restricted sites.
e344693b 308 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
309 bidi_workaround: Work around buggy terminals without bidirectional text
310 support, using fridibi
a0ddb8a2 311 debug_printtraffic:Print out sent and received HTTP traffic
7b0817e8 312 include_ads: Download ads as well
04b4d394
PH
313 default_search: Prepend this string if an input url is not valid.
314 'auto' for elaborate guessing
62fec3b2 315 encoding: Use this encoding instead of the system-specified.
e8ee972c 316 extract_flat: Do not resolve URLs, return the immediate result.
057a5206
PH
317 Pass in 'in_playlist' to only show this behavior for
318 playlist items.
4f026faf 319 postprocessors: A list of dictionaries, each with an entry
71b640cc 320 * key: The name of the postprocessor. See
7a5c1cfe 321 yt_dlp/postprocessor/__init__.py for a list.
56d868db 322 * when: When to run the postprocessor. Can be one of
323 pre_process|before_dl|post_process|after_move.
324 Assumed to be 'post_process' if not given
ab8e5e51
AM
325 post_hooks: A list of functions that get called as the final step
326 for each video file, after all postprocessors have been
327 called. The filename will be passed as the only argument.
71b640cc
PH
328 progress_hooks: A list of functions that get called on download
329 progress, with a dictionary with the entries
5cda4eda 330 * status: One of "downloading", "error", or "finished".
ee69b99a 331 Check this first and ignore unknown values.
3ba7740d 332 * info_dict: The extracted info_dict
71b640cc 333
5cda4eda 334 If status is one of "downloading", or "finished", the
ee69b99a
PH
335 following properties may also be present:
336 * filename: The final filename (always present)
5cda4eda 337 * tmpfilename: The filename we're currently writing to
71b640cc
PH
338 * downloaded_bytes: Bytes on disk
339 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
340 * total_bytes_estimate: Guess of the eventual file size,
341 None if unavailable.
342 * elapsed: The number of seconds since download started.
71b640cc
PH
343 * eta: The estimated time in seconds, None if unknown
344 * speed: The download speed in bytes/second, None if
345 unknown
5cda4eda
PH
346 * fragment_index: The counter of the currently
347 downloaded video fragment.
348 * fragment_count: The number of fragments (= individual
349 files that will be merged)
71b640cc
PH
350
351 Progress hooks are guaranteed to be called at least once
352 (with status "finished") if the download is successful.
45598f15 353 merge_output_format: Extension to use when merging formats.
6b591b29 354 final_ext: Expected final extension; used to detect when the file was
355 already downloaded and converted. "merge_output_format" is
356 replaced by this extension when given
6271f1ca
PH
357 fixup: Automatically correct known faults of the file.
358 One of:
359 - "never": do nothing
360 - "warn": only emit a warning
361 - "detect_or_warn": check whether we can do anything
62cd676c 362 about it, warn otherwise (default)
504f20dd 363 source_address: Client-side IP address to bind to.
6ec6cb4e 364 call_home: Boolean, true iff we are allowed to contact the
7a5c1cfe 365 yt-dlp servers for debugging. (BROKEN)
1cf376f5 366 sleep_interval_requests: Number of seconds to sleep between requests
367 during extraction
7aa589a5
S
368 sleep_interval: Number of seconds to sleep before each download when
369 used alone or a lower bound of a range for randomized
370 sleep before each download (minimum possible number
371 of seconds to sleep) when used along with
372 max_sleep_interval.
373 max_sleep_interval:Upper bound of a range for randomized sleep before each
374 download (maximum possible number of seconds to sleep).
375 Must only be used along with sleep_interval.
376 Actual sleep time will be a random float from range
377 [sleep_interval; max_sleep_interval].
1cf376f5 378 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
379 listformats: Print an overview of available video formats and exit.
380 list_thumbnails: Print a table of all thumbnails and exit.
347de493
PH
381 match_filter: A function that gets called with the info_dict of
382 every video.
383 If it returns a message, the video is ignored.
384 If it returns None, the video is downloaded.
385 match_filter_func in utils.py is one example for this.
7e5db8c9 386 no_color: Do not emit color codes in output.
0a840f58 387 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 388 HTTP header
0a840f58 389 geo_bypass_country:
773f291d
S
390 Two-letter ISO 3166-2 country code that will be used for
391 explicit geographic restriction bypassing via faking
504f20dd 392 X-Forwarded-For HTTP header
5f95927a
S
393 geo_bypass_ip_block:
394 IP range in CIDR notation that will be used similarly to
504f20dd 395 geo_bypass_country
71b640cc 396
85729c51 397 The following options determine which downloader is picked:
52a8a1e1 398 external_downloader: A dictionary of protocol keys and the executable of the
399 external downloader to use for it. The allowed protocols
400 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
401 Set the value to 'native' to use the native downloader
402 hls_prefer_native: Deprecated - Use external_downloader = {'m3u8': 'native'}
403 or {'m3u8': 'ffmpeg'} instead.
404 Use the native HLS downloader instead of ffmpeg/avconv
bf09af3a
S
405 if True, otherwise use ffmpeg/avconv if False, otherwise
406 use downloader suggested by extractor if None.
53ed7066 407 compat_opts: Compatibility options. See "Differences in default behavior".
3acf6d38 408 The following options do not work when used through the API:
409 filename, abort-on-error, multistreams, no-live-chat,
b51d2ae3 410 no-clean-infojson, no-playlist-metafiles, no-keep-subs.
e4f02757 411 Refer __init__.py for their implementation
fe7e0c98 412
8222d8de 413 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 414 the downloader (see yt_dlp/downloader/common.py):
51d9739f 415 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
416 max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle,
417 xattr_set_filesize, external_downloader_args, hls_use_mpegts, http_chunk_size.
76b1bd67
JMF
418
419 The following options are used by the post processors:
d4a24f40 420 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
e4172ac9 421 otherwise prefer ffmpeg. (avconv support is deprecated)
c0b7d117
S
422 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
423 to the binary or its containing directory.
43820c03 424 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
34488702 425 and a list of additional command-line arguments for the
426 postprocessor/executable. The dict can also have "PP+EXE" keys
427 which are used when the given exe is used by the given PP.
428 Use 'default' as the name for arguments to passed to all PP
429 For compatibility with youtube-dl, a single list of args
430 can also be used
e409895f 431
432 The following options are used by the extractors:
62bff2c1 433 extractor_retries: Number of times to retry for known errors
434 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 435 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 436 discontinuities such as ad breaks (default: False)
5d3a0e79 437 extractor_args: A dictionary of arguments to be passed to the extractors.
438 See "EXTRACTOR ARGUMENTS" for details.
439 Eg: {'youtube': {'skip': ['dash', 'hls']}}
440 youtube_include_dash_manifest: Deprecated - Use extractor_args instead.
441 If True (default), DASH manifests and related
62bff2c1 442 data will be downloaded and processed by extractor.
443 You can reduce network I/O by disabling it if you don't
444 care about DASH. (only for youtube)
5d3a0e79 445 youtube_include_hls_manifest: Deprecated - Use extractor_args instead.
446 If True (default), HLS manifests and related
62bff2c1 447 data will be downloaded and processed by extractor.
448 You can reduce network I/O by disabling it if you don't
449 care about HLS. (only for youtube)
8222d8de
JMF
450 """
451
c9969434
S
452 _NUMERIC_FIELDS = set((
453 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
454 'timestamp', 'upload_year', 'upload_month', 'upload_day',
455 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
456 'average_rating', 'comment_count', 'age_limit',
457 'start_time', 'end_time',
458 'chapter_number', 'season_number', 'episode_number',
459 'track_number', 'disc_number', 'release_year',
460 'playlist_index',
461 ))
462
8222d8de
JMF
463 params = None
464 _ies = []
56d868db 465 _pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
b35496d8 466 _printed_messages = set()
1cf376f5 467 _first_webpage_request = True
8222d8de
JMF
468 _download_retcode = None
469 _num_downloads = None
30a074c2 470 _playlist_level = 0
471 _playlist_urls = set()
8222d8de
JMF
472 _screen_file = None
473
3511266b 474 def __init__(self, params=None, auto_init=True):
8222d8de 475 """Create a FileDownloader object with the given options."""
e9f9a10f
JMF
476 if params is None:
477 params = {}
8222d8de 478 self._ies = []
56c73665 479 self._ies_instances = {}
56d868db 480 self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
b35496d8 481 self._printed_messages = set()
1cf376f5 482 self._first_webpage_request = True
ab8e5e51 483 self._post_hooks = []
933605d7 484 self._progress_hooks = []
8222d8de
JMF
485 self._download_retcode = 0
486 self._num_downloads = 0
487 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
0783b09b 488 self._err_file = sys.stderr
4abf617b
S
489 self.params = {
490 # Default parameters
491 'nocheckcertificate': False,
492 }
493 self.params.update(params)
a0e07d31 494 self.cache = Cache(self)
34308b30 495
a61f4b28 496 if sys.version_info < (3, 6):
497 self.report_warning(
0181adef 498 'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys.version_info[:2])
a61f4b28 499
be5df5ee
S
500 def check_deprecated(param, option, suggestion):
501 if self.params.get(param) is not None:
53ed7066 502 self.report_warning('%s is deprecated. Use %s instead' % (option, suggestion))
be5df5ee
S
503 return True
504 return False
505
506 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
507 if self.params.get('geo_verification_proxy') is None:
508 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
509
0d1bb027 510 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
511 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
53ed7066 512 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
0d1bb027 513
514 for msg in self.params.get('warnings', []):
515 self.report_warning(msg)
516
6b591b29 517 if self.params.get('final_ext'):
518 if self.params.get('merge_output_format'):
519 self.report_warning('--merge-output-format will be ignored since --remux-video or --recode-video is given')
520 self.params['merge_output_format'] = self.params['final_ext']
521
b868936c 522 if self.params.get('overwrites') is None:
523 self.params.pop('overwrites', None)
524 elif self.params.get('nooverwrites') is not None:
525 # nooverwrites was unnecessarily changed to overwrites
526 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
527 # This ensures compatibility with both keys
528 self.params['overwrites'] = not self.params['nooverwrites']
529 else:
530 self.params['nooverwrites'] = not self.params['overwrites']
b9d973be 531
0783b09b 532 if params.get('bidi_workaround', False):
1c088fa8
PH
533 try:
534 import pty
535 master, slave = pty.openpty()
003c69a8 536 width = compat_get_terminal_size().columns
1c088fa8
PH
537 if width is None:
538 width_args = []
539 else:
540 width_args = ['-w', str(width)]
5d681e96 541 sp_kwargs = dict(
1c088fa8
PH
542 stdin=subprocess.PIPE,
543 stdout=slave,
544 stderr=self._err_file)
5d681e96
PH
545 try:
546 self._output_process = subprocess.Popen(
547 ['bidiv'] + width_args, **sp_kwargs
548 )
549 except OSError:
5d681e96
PH
550 self._output_process = subprocess.Popen(
551 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
552 self._output_channel = os.fdopen(master, 'rb')
1c088fa8 553 except OSError as ose:
66e7ace1 554 if ose.errno == errno.ENOENT:
6febd1c1 555 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
1c088fa8
PH
556 else:
557 raise
0783b09b 558
3089bc74
S
559 if (sys.platform != 'win32'
560 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
561 and not params.get('restrictfilenames', False)):
e9137224 562 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 563 self.report_warning(
6febd1c1 564 'Assuming --restrict-filenames since file system encoding '
1b725173 565 'cannot encode all characters. '
6febd1c1 566 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 567 self.params['restrictfilenames'] = True
34308b30 568
de6000d9 569 self.outtmpl_dict = self.parse_outtmpl()
486dd09e 570
187986a8 571 # Creating format selector here allows us to catch syntax errors before the extraction
572 self.format_selector = (
573 None if self.params.get('format') is None
574 else self.build_format_selector(self.params['format']))
575
dca08720
PH
576 self._setup_opener()
577
4cd0a709 578 """Preload the archive, if any is specified"""
579 def preload_download_archive(fn):
580 if fn is None:
581 return False
0760b0a7 582 self.write_debug('Loading archive file %r\n' % fn)
4cd0a709 583 try:
584 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
585 for line in archive_file:
586 self.archive.add(line.strip())
587 except IOError as ioe:
588 if ioe.errno != errno.ENOENT:
589 raise
590 return False
591 return True
592
593 self.archive = set()
594 preload_download_archive(self.params.get('download_archive'))
595
3511266b
PH
596 if auto_init:
597 self.print_debug_header()
598 self.add_default_info_extractors()
599
4f026faf 600 for pp_def_raw in self.params.get('postprocessors', []):
4f026faf 601 pp_def = dict(pp_def_raw)
fd7cfb64 602 when = pp_def.pop('when', 'post_process')
603 pp_class = get_postprocessor(pp_def.pop('key'))
4f026faf 604 pp = pp_class(self, **compat_kwargs(pp_def))
5bfa4862 605 self.add_post_processor(pp, when=when)
4f026faf 606
ab8e5e51
AM
607 for ph in self.params.get('post_hooks', []):
608 self.add_post_hook(ph)
609
71b640cc
PH
610 for ph in self.params.get('progress_hooks', []):
611 self.add_progress_hook(ph)
612
51fb4995
YCH
613 register_socks_protocols()
614
7d4111ed
PH
615 def warn_if_short_id(self, argv):
616 # short YouTube ID starting with dash?
617 idxs = [
618 i for i, a in enumerate(argv)
619 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
620 if idxs:
621 correct_argv = (
7a5c1cfe 622 ['yt-dlp']
3089bc74
S
623 + [a for i, a in enumerate(argv) if i not in idxs]
624 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
625 )
626 self.report_warning(
627 'Long argument string detected. '
628 'Use -- to separate parameters and URLs, like this:\n%s\n' %
629 args_to_str(correct_argv))
630
8222d8de
JMF
631 def add_info_extractor(self, ie):
632 """Add an InfoExtractor object to the end of the list."""
633 self._ies.append(ie)
e52d7f85
JMF
634 if not isinstance(ie, type):
635 self._ies_instances[ie.ie_key()] = ie
636 ie.set_downloader(self)
8222d8de 637
56c73665
JMF
638 def get_info_extractor(self, ie_key):
639 """
640 Get an instance of an IE with name ie_key, it will try to get one from
641 the _ies list, if there's no instance it will create a new one and add
642 it to the extractor list.
643 """
644 ie = self._ies_instances.get(ie_key)
645 if ie is None:
646 ie = get_info_extractor(ie_key)()
647 self.add_info_extractor(ie)
648 return ie
649
023fa8c4
JMF
650 def add_default_info_extractors(self):
651 """
652 Add the InfoExtractors returned by gen_extractors to the end of the list
653 """
e52d7f85 654 for ie in gen_extractor_classes():
023fa8c4
JMF
655 self.add_info_extractor(ie)
656
56d868db 657 def add_post_processor(self, pp, when='post_process'):
8222d8de 658 """Add a PostProcessor object to the end of the chain."""
5bfa4862 659 self._pps[when].append(pp)
8222d8de
JMF
660 pp.set_downloader(self)
661
ab8e5e51
AM
662 def add_post_hook(self, ph):
663 """Add the post hook"""
664 self._post_hooks.append(ph)
665
933605d7
JMF
666 def add_progress_hook(self, ph):
667 """Add the progress hook (currently only for the file downloader)"""
668 self._progress_hooks.append(ph)
8ab470f1 669
1c088fa8 670 def _bidi_workaround(self, message):
5d681e96 671 if not hasattr(self, '_output_channel'):
1c088fa8
PH
672 return message
673
5d681e96 674 assert hasattr(self, '_output_process')
11b85ce6 675 assert isinstance(message, compat_str)
6febd1c1
PH
676 line_count = message.count('\n') + 1
677 self._output_process.stdin.write((message + '\n').encode('utf-8'))
5d681e96 678 self._output_process.stdin.flush()
6febd1c1 679 res = ''.join(self._output_channel.readline().decode('utf-8')
9e1a5b84 680 for _ in range(line_count))
6febd1c1 681 return res[:-len('\n')]
1c088fa8 682
b35496d8 683 def _write_string(self, message, out=None, only_once=False):
684 if only_once:
685 if message in self._printed_messages:
686 return
687 self._printed_messages.add(message)
688 write_string(message, out=out, encoding=self.params.get('encoding'))
734f90bb 689
848887eb 690 def to_stdout(self, message, skip_eol=False, quiet=False):
0760b0a7 691 """Print message to stdout"""
8bf9319e 692 if self.params.get('logger'):
43afe285 693 self.params['logger'].debug(message)
835a1478 694 elif not quiet or self.params.get('verbose'):
695 self._write_string(
696 '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
697 self._err_file if quiet else self._screen_file)
8222d8de 698
b35496d8 699 def to_stderr(self, message, only_once=False):
0760b0a7 700 """Print message to stderr"""
11b85ce6 701 assert isinstance(message, compat_str)
8bf9319e 702 if self.params.get('logger'):
43afe285
IB
703 self.params['logger'].error(message)
704 else:
b35496d8 705 self._write_string('%s\n' % self._bidi_workaround(message), self._err_file, only_once=only_once)
8222d8de 706
1e5b9a95
PH
707 def to_console_title(self, message):
708 if not self.params.get('consoletitle', False):
709 return
4bede0d8
C
710 if compat_os_name == 'nt':
711 if ctypes.windll.kernel32.GetConsoleWindow():
712 # c_wchar_p() might not be necessary if `message` is
713 # already of type unicode()
714 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
1e5b9a95 715 elif 'TERM' in os.environ:
b46696bd 716 self._write_string('\033]0;%s\007' % message, self._screen_file)
1e5b9a95 717
bdde425c
PH
718 def save_console_title(self):
719 if not self.params.get('consoletitle', False):
720 return
b7b04c78 721 if self.params.get('simulate'):
94c3442e 722 return
4bede0d8 723 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 724 # Save the title on stack
734f90bb 725 self._write_string('\033[22;0t', self._screen_file)
bdde425c
PH
726
727 def restore_console_title(self):
728 if not self.params.get('consoletitle', False):
729 return
b7b04c78 730 if self.params.get('simulate'):
94c3442e 731 return
4bede0d8 732 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 733 # Restore the title from stack
734f90bb 734 self._write_string('\033[23;0t', self._screen_file)
bdde425c
PH
735
736 def __enter__(self):
737 self.save_console_title()
738 return self
739
740 def __exit__(self, *args):
741 self.restore_console_title()
f89197d7 742
dca08720 743 if self.params.get('cookiefile') is not None:
1bab3437 744 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 745
8222d8de
JMF
746 def trouble(self, message=None, tb=None):
747 """Determine action to take when a download problem appears.
748
749 Depending on if the downloader has been configured to ignore
750 download errors or not, this method may throw an exception or
751 not when errors are found, after printing the message.
752
753 tb, if given, is additional traceback information.
754 """
755 if message is not None:
756 self.to_stderr(message)
757 if self.params.get('verbose'):
758 if tb is None:
759 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 760 tb = ''
8222d8de 761 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 762 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 763 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
764 else:
765 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 766 tb = ''.join(tb_data)
c19bc311 767 if tb:
768 self.to_stderr(tb)
8222d8de
JMF
769 if not self.params.get('ignoreerrors', False):
770 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
771 exc_info = sys.exc_info()[1].exc_info
772 else:
773 exc_info = sys.exc_info()
774 raise DownloadError(message, exc_info)
775 self._download_retcode = 1
776
0760b0a7 777 def to_screen(self, message, skip_eol=False):
778 """Print message to stdout if not in quiet mode"""
779 self.to_stdout(
780 message, skip_eol, quiet=self.params.get('quiet', False))
781
c84aeac6 782 def report_warning(self, message, only_once=False):
8222d8de
JMF
783 '''
784 Print the message to stderr, it will be prefixed with 'WARNING:'
785 If stderr is a tty file the 'WARNING:' will be colored
786 '''
6d07ce01
JMF
787 if self.params.get('logger') is not None:
788 self.params['logger'].warning(message)
8222d8de 789 else:
ad8915b7
PH
790 if self.params.get('no_warnings'):
791 return
e9c0cdd3 792 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6d07ce01
JMF
793 _msg_header = '\033[0;33mWARNING:\033[0m'
794 else:
795 _msg_header = 'WARNING:'
796 warning_message = '%s %s' % (_msg_header, message)
b35496d8 797 self.to_stderr(warning_message, only_once)
8222d8de
JMF
798
799 def report_error(self, message, tb=None):
800 '''
801 Do the same as trouble, but prefixes the message with 'ERROR:', colored
802 in red if stderr is a tty file.
803 '''
e9c0cdd3 804 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6febd1c1 805 _msg_header = '\033[0;31mERROR:\033[0m'
8222d8de 806 else:
6febd1c1
PH
807 _msg_header = 'ERROR:'
808 error_message = '%s %s' % (_msg_header, message)
8222d8de
JMF
809 self.trouble(error_message, tb)
810
b35496d8 811 def write_debug(self, message, only_once=False):
0760b0a7 812 '''Log debug message or Print message to stderr'''
813 if not self.params.get('verbose', False):
814 return
815 message = '[debug] %s' % message
816 if self.params.get('logger'):
817 self.params['logger'].debug(message)
818 else:
b35496d8 819 self.to_stderr(message, only_once)
0760b0a7 820
8222d8de
JMF
821 def report_file_already_downloaded(self, file_name):
822 """Report file has already been fully downloaded."""
823 try:
6febd1c1 824 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 825 except UnicodeEncodeError:
6febd1c1 826 self.to_screen('[download] The file has already been downloaded')
8222d8de 827
0c3d0f51 828 def report_file_delete(self, file_name):
829 """Report that existing file will be deleted."""
830 try:
c25228e5 831 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 832 except UnicodeEncodeError:
c25228e5 833 self.to_screen('Deleting existing file')
0c3d0f51 834
de6000d9 835 def parse_outtmpl(self):
836 outtmpl_dict = self.params.get('outtmpl', {})
837 if not isinstance(outtmpl_dict, dict):
838 outtmpl_dict = {'default': outtmpl_dict}
839 outtmpl_dict.update({
840 k: v for k, v in DEFAULT_OUTTMPL.items()
841 if not outtmpl_dict.get(k)})
842 for key, val in outtmpl_dict.items():
843 if isinstance(val, bytes):
844 self.report_warning(
845 'Parameter outtmpl is bytes, but should be a unicode string. '
846 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
847 return outtmpl_dict
848
21cd8fae 849 def get_output_path(self, dir_type='', filename=None):
850 paths = self.params.get('paths', {})
851 assert isinstance(paths, dict)
852 path = os.path.join(
853 expand_path(paths.get('home', '').strip()),
854 expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
855 filename or '')
856
857 # Temporary fix for #4787
858 # 'Treat' all problem characters by passing filename through preferredencoding
859 # to workaround encoding issues with subprocess on python2 @ Windows
860 if sys.version_info < (3, 0) and sys.platform == 'win32':
861 path = encodeFilename(path, True).decode(preferredencoding())
862 return sanitize_path(path, force=self.params.get('windowsfilenames'))
863
76a264ac 864 @staticmethod
901130bb 865 def _outtmpl_expandpath(outtmpl):
866 # expand_path translates '%%' into '%' and '$$' into '$'
867 # correspondingly that is not what we want since we need to keep
868 # '%%' intact for template dict substitution step. Working around
869 # with boundary-alike separator hack.
870 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
871 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
872
873 # outtmpl should be expand_path'ed before template dict substitution
874 # because meta fields may contain env variables we don't want to
875 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
876 # title "Hello $PATH", we don't want `$PATH` to be expanded.
877 return expand_path(outtmpl).replace(sep, '')
878
879 @staticmethod
880 def escape_outtmpl(outtmpl):
881 ''' Escape any remaining strings like %s, %abc% etc. '''
882 return re.sub(
883 STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
884 lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
885 outtmpl)
886
887 @classmethod
888 def validate_outtmpl(cls, outtmpl):
76a264ac 889 ''' @return None or Exception object '''
7d1eb38a 890 outtmpl = re.sub(
891 STR_FORMAT_RE_TMPL.format('[^)]*', '[ljq]'),
892 lambda mobj: f'{mobj.group(0)[:-1]}s',
893 cls._outtmpl_expandpath(outtmpl))
76a264ac 894 try:
7d1eb38a 895 cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
76a264ac 896 return None
897 except ValueError as err:
898 return err
899
143db31d 900 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
901130bb 901 """ Make the template and info_dict suitable for substitution : ydl.outtmpl_escape(outtmpl) % info_dict """
6e84b215 902 info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
143db31d 903
6e84b215 904 info_dict = dict(info_dict) # Do not sanitize so as not to consume LazyList
905 for key in ('__original_infodict', '__postprocessors'):
906 info_dict.pop(key, None)
752cda38 907 info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
53c18592 908 formatSeconds(info_dict['duration'], '-' if sanitize else ':')
143db31d 909 if info_dict.get('duration', None) is not None
910 else None)
752cda38 911 info_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
912 if info_dict.get('resolution') is None:
913 info_dict['resolution'] = self.format_resolution(info_dict, default=None)
143db31d 914
143db31d 915 # For fields playlist_index and autonumber convert all occurrences
916 # of %(field)s to %(field)0Nd for backward compatibility
917 field_size_compat_map = {
752cda38 918 'playlist_index': len(str(info_dict.get('_last_playlist_index') or '')),
919 'autonumber': self.params.get('autonumber_size') or 5,
143db31d 920 }
752cda38 921
385a27fa 922 TMPL_DICT = {}
7d1eb38a 923 EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljq]'))
385a27fa 924 MATH_FUNCTIONS = {
925 '+': float.__add__,
926 '-': float.__sub__,
927 }
e625be0d 928 # Field is of the form key1.key2...
929 # where keys (except first) can be string, int or slice
2b8a2973 930 FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
385a27fa 931 MATH_FIELD_RE = r'''{field}|{num}'''.format(field=FIELD_RE, num=r'-?\d+(?:.\d+)?')
932 MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
e625be0d 933 INTERNAL_FORMAT_RE = re.compile(r'''(?x)
934 (?P<negate>-)?
385a27fa 935 (?P<fields>{field})
936 (?P<maths>(?:{math_op}{math_field})*)
e625be0d 937 (?:>(?P<strf_format>.+?))?
938 (?:\|(?P<default>.*?))?
385a27fa 939 $'''.format(field=FIELD_RE, math_op=MATH_OPERATORS_RE, math_field=MATH_FIELD_RE))
752cda38 940
2b8a2973 941 def _traverse_infodict(k):
942 k = k.split('.')
943 if k[0] == '':
944 k.pop(0)
945 return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
76a264ac 946
752cda38 947 def get_value(mdict):
948 # Object traversal
2b8a2973 949 value = _traverse_infodict(mdict['fields'])
752cda38 950 # Negative
951 if mdict['negate']:
952 value = float_or_none(value)
953 if value is not None:
954 value *= -1
955 # Do maths
385a27fa 956 offset_key = mdict['maths']
957 if offset_key:
752cda38 958 value = float_or_none(value)
959 operator = None
385a27fa 960 while offset_key:
961 item = re.match(
962 MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
963 offset_key).group(0)
964 offset_key = offset_key[len(item):]
965 if operator is None:
752cda38 966 operator = MATH_FUNCTIONS[item]
385a27fa 967 continue
968 item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
969 offset = float_or_none(item)
970 if offset is None:
2b8a2973 971 offset = float_or_none(_traverse_infodict(item))
385a27fa 972 try:
973 value = operator(value, multiplier * offset)
974 except (TypeError, ZeroDivisionError):
975 return None
976 operator = None
752cda38 977 # Datetime formatting
978 if mdict['strf_format']:
979 value = strftime_or_none(value, mdict['strf_format'])
980
981 return value
982
b868936c 983 na = self.params.get('outtmpl_na_placeholder', 'NA')
984
6e84b215 985 def _dumpjson_default(obj):
986 if isinstance(obj, (set, LazyList)):
987 return list(obj)
988 raise TypeError(f'Object of type {type(obj).__name__} is not JSON serializable')
989
752cda38 990 def create_key(outer_mobj):
991 if not outer_mobj.group('has_key'):
901130bb 992 return f'%{outer_mobj.group(0)}'
752cda38 993 key = outer_mobj.group('key')
752cda38 994 mobj = re.match(INTERNAL_FORMAT_RE, key)
995 if mobj is None:
9fea350f 996 value, default, mobj = None, na, {'fields': ''}
752cda38 997 else:
e625be0d 998 mobj = mobj.groupdict()
752cda38 999 default = mobj['default'] if mobj['default'] is not None else na
1000 value = get_value(mobj)
1001
b868936c 1002 fmt = outer_mobj.group('format')
752cda38 1003 if fmt == 's' and value is not None and key in field_size_compat_map.keys():
1004 fmt = '0{:d}d'.format(field_size_compat_map[key])
1005
1006 value = default if value is None else value
752cda38 1007
7d1eb38a 1008 str_fmt = f'{fmt[:-1]}s'
1009 if fmt[-1] == 'l':
1010 value, fmt = ', '.join(variadic(value)), str_fmt
1011 elif fmt[-1] == 'j':
6e84b215 1012 value, fmt = json.dumps(value, default=_dumpjson_default), str_fmt
7d1eb38a 1013 elif fmt[-1] == 'q':
1014 value, fmt = compat_shlex_quote(str(value)), str_fmt
1015 elif fmt[-1] == 'c':
1016 value = str(value)
76a264ac 1017 if value is None:
1018 value, fmt = default, 's'
1019 else:
1020 value = value[0]
1021 elif fmt[-1] not in 'rs': # numeric
a439a3a4 1022 value = float_or_none(value)
752cda38 1023 if value is None:
1024 value, fmt = default, 's'
901130bb 1025
752cda38 1026 if sanitize:
1027 if fmt[-1] == 'r':
1028 # If value is an object, sanitize might convert it to a string
1029 # So we convert it to repr first
7d1eb38a 1030 value, fmt = repr(value), str_fmt
639f1cea 1031 if fmt[-1] in 'csr':
9fea350f 1032 value = sanitize(mobj['fields'].split('.')[-1], value)
901130bb 1033
b868936c 1034 key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
385a27fa 1035 TMPL_DICT[key] = value
b868936c 1036 return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
752cda38 1037
385a27fa 1038 return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
143db31d 1039
de6000d9 1040 def _prepare_filename(self, info_dict, tmpl_type='default'):
8222d8de 1041 try:
586a91b6 1042 sanitize = lambda k, v: sanitize_filename(
45598aab 1043 compat_str(v),
1bb5c511 1044 restricted=self.params.get('restrictfilenames'),
40df485f 1045 is_id=(k == 'id' or k.endswith('_id')))
de6000d9 1046 outtmpl = self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default'])
143db31d 1047 outtmpl, template_dict = self.prepare_outtmpl(outtmpl, info_dict, sanitize)
901130bb 1048 outtmpl = self.escape_outtmpl(self._outtmpl_expandpath(outtmpl))
1049 filename = outtmpl % template_dict
15da37c7 1050
143db31d 1051 force_ext = OUTTMPL_TYPES.get(tmpl_type)
de6000d9 1052 if force_ext is not None:
752cda38 1053 filename = replace_extension(filename, force_ext, info_dict.get('ext'))
de6000d9 1054
bdc3fd2f
U
1055 # https://github.com/blackjack4494/youtube-dlc/issues/85
1056 trim_file_name = self.params.get('trim_file_name', False)
1057 if trim_file_name:
1058 fn_groups = filename.rsplit('.')
1059 ext = fn_groups[-1]
1060 sub_ext = ''
1061 if len(fn_groups) > 2:
1062 sub_ext = fn_groups[-2]
1063 filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
1064
0202b52a 1065 return filename
8222d8de 1066 except ValueError as err:
6febd1c1 1067 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
1068 return None
1069
de6000d9 1070 def prepare_filename(self, info_dict, dir_type='', warn=False):
1071 """Generate the output filename."""
21cd8fae 1072
de6000d9 1073 filename = self._prepare_filename(info_dict, dir_type or 'default')
1074
c84aeac6 1075 if warn:
21cd8fae 1076 if not self.params.get('paths'):
de6000d9 1077 pass
1078 elif filename == '-':
c84aeac6 1079 self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
de6000d9 1080 elif os.path.isabs(filename):
c84aeac6 1081 self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
de6000d9 1082 if filename == '-' or not filename:
1083 return filename
1084
21cd8fae 1085 return self.get_output_path(dir_type, filename)
0202b52a 1086
120fe513 1087 def _match_entry(self, info_dict, incomplete=False, silent=False):
ecdec191 1088 """ Returns None if the file should be downloaded """
8222d8de 1089
c77495e3 1090 video_title = info_dict.get('title', info_dict.get('id', 'video'))
1091
8b0d7497 1092 def check_filter():
8b0d7497 1093 if 'title' in info_dict:
1094 # This can happen when we're just evaluating the playlist
1095 title = info_dict['title']
1096 matchtitle = self.params.get('matchtitle', False)
1097 if matchtitle:
1098 if not re.search(matchtitle, title, re.IGNORECASE):
1099 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
1100 rejecttitle = self.params.get('rejecttitle', False)
1101 if rejecttitle:
1102 if re.search(rejecttitle, title, re.IGNORECASE):
1103 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
1104 date = info_dict.get('upload_date')
1105 if date is not None:
1106 dateRange = self.params.get('daterange', DateRange())
1107 if date not in dateRange:
1108 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
1109 view_count = info_dict.get('view_count')
1110 if view_count is not None:
1111 min_views = self.params.get('min_views')
1112 if min_views is not None and view_count < min_views:
1113 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
1114 max_views = self.params.get('max_views')
1115 if max_views is not None and view_count > max_views:
1116 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
1117 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
1118 return 'Skipping "%s" because it is age restricted' % video_title
8b0d7497 1119
8f18aca8 1120 match_filter = self.params.get('match_filter')
1121 if match_filter is not None:
1122 try:
1123 ret = match_filter(info_dict, incomplete=incomplete)
1124 except TypeError:
1125 # For backward compatibility
1126 ret = None if incomplete else match_filter(info_dict)
1127 if ret is not None:
1128 return ret
8b0d7497 1129 return None
1130
c77495e3 1131 if self.in_download_archive(info_dict):
1132 reason = '%s has already been recorded in the archive' % video_title
1133 break_opt, break_err = 'break_on_existing', ExistingVideoReached
1134 else:
1135 reason = check_filter()
1136 break_opt, break_err = 'break_on_reject', RejectedVideoReached
8b0d7497 1137 if reason is not None:
120fe513 1138 if not silent:
1139 self.to_screen('[download] ' + reason)
c77495e3 1140 if self.params.get(break_opt, False):
1141 raise break_err()
8b0d7497 1142 return reason
fe7e0c98 1143
b6c45014
JMF
1144 @staticmethod
1145 def add_extra_info(info_dict, extra_info):
1146 '''Set the keys from extra_info in info dict if they are missing'''
1147 for key, value in extra_info.items():
1148 info_dict.setdefault(key, value)
1149
58f197b7 1150 def extract_info(self, url, download=True, ie_key=None, extra_info={},
61aa5ba3 1151 process=True, force_generic_extractor=False):
41d1cca3 1152 """
1153 Return a list with a dictionary for each video extracted.
1154
1155 Arguments:
1156 url -- URL to extract
1157
1158 Keyword arguments:
1159 download -- whether to download videos during extraction
1160 ie_key -- extractor key hint
1161 extra_info -- dictionary containing the extra values to add to each result
1162 process -- whether to resolve all unresolved references (URLs, playlist items),
1163 must be True for download to work.
1164 force_generic_extractor -- force using the generic extractor
1165 """
fe7e0c98 1166
61aa5ba3 1167 if not ie_key and force_generic_extractor:
d22dec74
S
1168 ie_key = 'Generic'
1169
8222d8de 1170 if ie_key:
56c73665 1171 ies = [self.get_info_extractor(ie_key)]
8222d8de
JMF
1172 else:
1173 ies = self._ies
1174
1175 for ie in ies:
1176 if not ie.suitable(url):
1177 continue
1178
9a68de12 1179 ie_key = ie.ie_key()
1180 ie = self.get_info_extractor(ie_key)
8222d8de 1181 if not ie.working():
6febd1c1
PH
1182 self.report_warning('The program functionality for this site has been marked as broken, '
1183 'and will probably not work.')
8222d8de
JMF
1184
1185 try:
d0757229 1186 temp_id = str_or_none(
63be1aab 1187 ie.extract_id(url) if callable(getattr(ie, 'extract_id', None))
1188 else ie._match_id(url))
a0566bbf 1189 except (AssertionError, IndexError, AttributeError):
1190 temp_id = None
1191 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1192 self.to_screen("[%s] %s: has already been recorded in archive" % (
1193 ie_key, temp_id))
1194 break
58f197b7 1195 return self.__extract_info(url, ie, download, extra_info, process)
a0566bbf 1196 else:
1197 self.report_error('no suitable InfoExtractor for URL %s' % url)
1198
cc9d1493 1199 def __handle_extraction_exceptions(func, handle_all_errors=True):
a0566bbf 1200 def wrapper(self, *args, **kwargs):
1201 try:
1202 return func(self, *args, **kwargs)
773f291d
S
1203 except GeoRestrictedError as e:
1204 msg = e.msg
1205 if e.countries:
1206 msg += '\nThis video is available in %s.' % ', '.join(
1207 map(ISO3166Utils.short2full, e.countries))
1208 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1209 self.report_error(msg)
fb043a6e 1210 except ExtractorError as e: # An error we somewhat expected
2c74e6fa 1211 self.report_error(compat_str(e), e.format_traceback())
51d9739f 1212 except ThrottledDownload:
1213 self.to_stderr('\r')
1214 self.report_warning('The download speed is below throttle limit. Re-extracting data')
1215 return wrapper(self, *args, **kwargs)
8b0d7497 1216 except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
d3e5bbf4 1217 raise
8222d8de 1218 except Exception as e:
cc9d1493 1219 if handle_all_errors and self.params.get('ignoreerrors', False):
9b9c5355 1220 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
8222d8de
JMF
1221 else:
1222 raise
a0566bbf 1223 return wrapper
1224
1225 @__handle_extraction_exceptions
58f197b7 1226 def __extract_info(self, url, ie, download, extra_info, process):
a0566bbf 1227 ie_result = ie.extract(url)
1228 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1229 return
1230 if isinstance(ie_result, list):
1231 # Backwards compatibility: old IE result format
1232 ie_result = {
1233 '_type': 'compat_list',
1234 'entries': ie_result,
1235 }
e37d0efb 1236 if extra_info.get('original_url'):
1237 ie_result.setdefault('original_url', extra_info['original_url'])
a0566bbf 1238 self.add_default_extra_info(ie_result, ie, url)
1239 if process:
1240 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1241 else:
a0566bbf 1242 return ie_result
fe7e0c98 1243
ea38e55f 1244 def add_default_extra_info(self, ie_result, ie, url):
6033d980 1245 if url is not None:
1246 self.add_extra_info(ie_result, {
1247 'webpage_url': url,
1248 'original_url': url,
1249 'webpage_url_basename': url_basename(url),
1250 })
1251 if ie is not None:
1252 self.add_extra_info(ie_result, {
1253 'extractor': ie.IE_NAME,
1254 'extractor_key': ie.ie_key(),
1255 })
ea38e55f 1256
8222d8de
JMF
1257 def process_ie_result(self, ie_result, download=True, extra_info={}):
1258 """
1259 Take the result of the ie(may be modified) and resolve all unresolved
1260 references (URLs, playlist items).
1261
1262 It will also download the videos if 'download'.
1263 Returns the resolved ie_result.
1264 """
e8ee972c
PH
1265 result_type = ie_result.get('_type', 'video')
1266
057a5206 1267 if result_type in ('url', 'url_transparent'):
134c6ea8 1268 ie_result['url'] = sanitize_url(ie_result['url'])
e37d0efb 1269 if ie_result.get('original_url'):
1270 extra_info.setdefault('original_url', ie_result['original_url'])
1271
057a5206 1272 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1273 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1274 or extract_flat is True):
ecb54191 1275 info_copy = ie_result.copy()
1276 self.add_extra_info(info_copy, extra_info)
6033d980 1277 ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
1278 self.add_default_extra_info(info_copy, ie, ie_result['url'])
ecb54191 1279 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
e8ee972c
PH
1280 return ie_result
1281
8222d8de 1282 if result_type == 'video':
b6c45014 1283 self.add_extra_info(ie_result, extra_info)
9c2b75b5 1284 ie_result = self.process_video_result(ie_result, download=download)
28b0eb0f 1285 additional_urls = (ie_result or {}).get('additional_urls')
9c2b75b5 1286 if additional_urls:
e9f4ccd1 1287 # TODO: Improve MetadataParserPP to allow setting a list
9c2b75b5 1288 if isinstance(additional_urls, compat_str):
1289 additional_urls = [additional_urls]
1290 self.to_screen(
1291 '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
1292 self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
1293 ie_result['additional_entries'] = [
1294 self.extract_info(
1295 url, download, extra_info,
1296 force_generic_extractor=self.params.get('force_generic_extractor'))
1297 for url in additional_urls
1298 ]
1299 return ie_result
8222d8de
JMF
1300 elif result_type == 'url':
1301 # We have to add extra_info to the results because it may be
1302 # contained in a playlist
07cce701 1303 return self.extract_info(
1304 ie_result['url'], download,
1305 ie_key=ie_result.get('ie_key'),
1306 extra_info=extra_info)
7fc3fa05
PH
1307 elif result_type == 'url_transparent':
1308 # Use the information from the embedding page
1309 info = self.extract_info(
1310 ie_result['url'], ie_key=ie_result.get('ie_key'),
1311 extra_info=extra_info, download=False, process=False)
1312
1640eb09
S
1313 # extract_info may return None when ignoreerrors is enabled and
1314 # extraction failed with an error, don't crash and return early
1315 # in this case
1316 if not info:
1317 return info
1318
412c617d
PH
1319 force_properties = dict(
1320 (k, v) for k, v in ie_result.items() if v is not None)
0396806f 1321 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
412c617d
PH
1322 if f in force_properties:
1323 del force_properties[f]
1324 new_result = info.copy()
1325 new_result.update(force_properties)
7fc3fa05 1326
0563f7ac
S
1327 # Extracted info may not be a video result (i.e.
1328 # info.get('_type', 'video') != video) but rather an url or
1329 # url_transparent. In such cases outer metadata (from ie_result)
1330 # should be propagated to inner one (info). For this to happen
1331 # _type of info should be overridden with url_transparent. This
067aa17e 1332 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1333 if new_result.get('_type') == 'url':
1334 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1335
1336 return self.process_ie_result(
1337 new_result, download=download, extra_info=extra_info)
40fcba5e 1338 elif result_type in ('playlist', 'multi_video'):
30a074c2 1339 # Protect from infinite recursion due to recursively nested playlists
1340 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1341 webpage_url = ie_result['webpage_url']
1342 if webpage_url in self._playlist_urls:
7e85e872 1343 self.to_screen(
30a074c2 1344 '[download] Skipping already downloaded playlist: %s'
1345 % ie_result.get('title') or ie_result.get('id'))
1346 return
7e85e872 1347
30a074c2 1348 self._playlist_level += 1
1349 self._playlist_urls.add(webpage_url)
bc516a3f 1350 self._sanitize_thumbnails(ie_result)
30a074c2 1351 try:
1352 return self.__process_playlist(ie_result, download)
1353 finally:
1354 self._playlist_level -= 1
1355 if not self._playlist_level:
1356 self._playlist_urls.clear()
8222d8de 1357 elif result_type == 'compat_list':
c9bf4114
PH
1358 self.report_warning(
1359 'Extractor %s returned a compat_list result. '
1360 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1361
8222d8de 1362 def _fixup(r):
b868936c 1363 self.add_extra_info(r, {
1364 'extractor': ie_result['extractor'],
1365 'webpage_url': ie_result['webpage_url'],
1366 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1367 'extractor_key': ie_result['extractor_key'],
1368 })
8222d8de
JMF
1369 return r
1370 ie_result['entries'] = [
b6c45014 1371 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1372 for r in ie_result['entries']
1373 ]
1374 return ie_result
1375 else:
1376 raise Exception('Invalid result type: %s' % result_type)
1377
e92caff5 1378 def _ensure_dir_exists(self, path):
1379 return make_dir(path, self.report_error)
1380
30a074c2 1381 def __process_playlist(self, ie_result, download):
1382 # We process each entry in the playlist
1383 playlist = ie_result.get('title') or ie_result.get('id')
1384 self.to_screen('[download] Downloading playlist: %s' % playlist)
1385
498f5606 1386 if 'entries' not in ie_result:
1387 raise EntryNotInPlaylist()
1388 incomplete_entries = bool(ie_result.get('requested_entries'))
1389 if incomplete_entries:
1390 def fill_missing_entries(entries, indexes):
1391 ret = [None] * max(*indexes)
1392 for i, entry in zip(indexes, entries):
1393 ret[i - 1] = entry
1394 return ret
1395 ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
02fd60d3 1396
30a074c2 1397 playlist_results = []
1398
56a8fb4f 1399 playliststart = self.params.get('playliststart', 1)
30a074c2 1400 playlistend = self.params.get('playlistend')
1401 # For backwards compatibility, interpret -1 as whole list
1402 if playlistend == -1:
1403 playlistend = None
1404
1405 playlistitems_str = self.params.get('playlist_items')
1406 playlistitems = None
1407 if playlistitems_str is not None:
1408 def iter_playlistitems(format):
1409 for string_segment in format.split(','):
1410 if '-' in string_segment:
1411 start, end = string_segment.split('-')
1412 for item in range(int(start), int(end) + 1):
1413 yield int(item)
1414 else:
1415 yield int(string_segment)
1416 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
1417
1418 ie_entries = ie_result['entries']
56a8fb4f 1419 msg = (
1420 'Downloading %d videos' if not isinstance(ie_entries, list)
1421 else 'Collected %d videos; downloading %%d of them' % len(ie_entries))
1422 if not isinstance(ie_entries, (list, PagedList)):
1423 ie_entries = LazyList(ie_entries)
1424
50fed816 1425 def get_entry(i):
1426 return YoutubeDL.__handle_extraction_exceptions(
cc9d1493 1427 lambda self, i: ie_entries[i - 1],
1428 False
50fed816 1429 )(self, i)
1430
56a8fb4f 1431 entries = []
1432 for i in playlistitems or itertools.count(playliststart):
1433 if playlistitems is None and playlistend is not None and playlistend < i:
1434 break
1435 entry = None
1436 try:
50fed816 1437 entry = get_entry(i)
56a8fb4f 1438 if entry is None:
498f5606 1439 raise EntryNotInPlaylist()
56a8fb4f 1440 except (IndexError, EntryNotInPlaylist):
1441 if incomplete_entries:
1442 raise EntryNotInPlaylist()
1443 elif not playlistitems:
1444 break
1445 entries.append(entry)
120fe513 1446 try:
1447 if entry is not None:
1448 self._match_entry(entry, incomplete=True, silent=True)
1449 except (ExistingVideoReached, RejectedVideoReached):
1450 break
56a8fb4f 1451 ie_result['entries'] = entries
30a074c2 1452
56a8fb4f 1453 # Save playlist_index before re-ordering
1454 entries = [
1455 ((playlistitems[i - 1] if playlistitems else i), entry)
1456 for i, entry in enumerate(entries, 1)
1457 if entry is not None]
1458 n_entries = len(entries)
498f5606 1459
498f5606 1460 if not playlistitems and (playliststart or playlistend):
56a8fb4f 1461 playlistitems = list(range(playliststart, playliststart + n_entries))
498f5606 1462 ie_result['requested_entries'] = playlistitems
1463
1464 if self.params.get('allow_playlist_files', True):
1465 ie_copy = {
1466 'playlist': playlist,
1467 'playlist_id': ie_result.get('id'),
1468 'playlist_title': ie_result.get('title'),
1469 'playlist_uploader': ie_result.get('uploader'),
1470 'playlist_uploader_id': ie_result.get('uploader_id'),
71729754 1471 'playlist_index': 0,
498f5606 1472 }
1473 ie_copy.update(dict(ie_result))
1474
1475 if self.params.get('writeinfojson', False):
1476 infofn = self.prepare_filename(ie_copy, 'pl_infojson')
1477 if not self._ensure_dir_exists(encodeFilename(infofn)):
1478 return
1479 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
1480 self.to_screen('[info] Playlist metadata is already present')
1481 else:
1482 self.to_screen('[info] Writing playlist metadata as JSON to: ' + infofn)
1483 try:
8012d892 1484 write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
498f5606 1485 except (OSError, IOError):
1486 self.report_error('Cannot write playlist metadata to JSON file ' + infofn)
1487
681de68e 1488 # TODO: This should be passed to ThumbnailsConvertor if necessary
1489 self._write_thumbnails(ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
1490
498f5606 1491 if self.params.get('writedescription', False):
1492 descfn = self.prepare_filename(ie_copy, 'pl_description')
1493 if not self._ensure_dir_exists(encodeFilename(descfn)):
1494 return
1495 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
1496 self.to_screen('[info] Playlist description is already present')
1497 elif ie_result.get('description') is None:
1498 self.report_warning('There\'s no playlist description to write.')
1499 else:
1500 try:
1501 self.to_screen('[info] Writing playlist description to: ' + descfn)
1502 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1503 descfile.write(ie_result['description'])
1504 except (OSError, IOError):
1505 self.report_error('Cannot write playlist description file ' + descfn)
1506 return
30a074c2 1507
1508 if self.params.get('playlistreverse', False):
1509 entries = entries[::-1]
30a074c2 1510 if self.params.get('playlistrandom', False):
1511 random.shuffle(entries)
1512
1513 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
1514
56a8fb4f 1515 self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries))
26e2805c 1516 failures = 0
1517 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
71729754 1518 for i, entry_tuple in enumerate(entries, 1):
1519 playlist_index, entry = entry_tuple
53ed7066 1520 if 'playlist_index' in self.params.get('compat_options', []):
1521 playlist_index = playlistitems[i - 1] if playlistitems else i
30a074c2 1522 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
1523 # This __x_forwarded_for_ip thing is a bit ugly but requires
1524 # minimal changes
1525 if x_forwarded_for:
1526 entry['__x_forwarded_for_ip'] = x_forwarded_for
1527 extra = {
1528 'n_entries': n_entries,
f59ae581 1529 '_last_playlist_index': max(playlistitems) if playlistitems else (playlistend or n_entries),
71729754 1530 'playlist_index': playlist_index,
1531 'playlist_autonumber': i,
30a074c2 1532 'playlist': playlist,
1533 'playlist_id': ie_result.get('id'),
1534 'playlist_title': ie_result.get('title'),
1535 'playlist_uploader': ie_result.get('uploader'),
1536 'playlist_uploader_id': ie_result.get('uploader_id'),
30a074c2 1537 'extractor': ie_result['extractor'],
1538 'webpage_url': ie_result['webpage_url'],
1539 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1540 'extractor_key': ie_result['extractor_key'],
1541 }
1542
1543 if self._match_entry(entry, incomplete=True) is not None:
1544 continue
1545
1546 entry_result = self.__process_iterable_entry(entry, download, extra)
26e2805c 1547 if not entry_result:
1548 failures += 1
1549 if failures >= max_failures:
1550 self.report_error(
1551 'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures))
1552 break
30a074c2 1553 # TODO: skip failed (empty) entries?
1554 playlist_results.append(entry_result)
1555 ie_result['entries'] = playlist_results
1556 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1557 return ie_result
1558
a0566bbf 1559 @__handle_extraction_exceptions
1560 def __process_iterable_entry(self, entry, download, extra_info):
1561 return self.process_ie_result(
1562 entry, download=download, extra_info=extra_info)
1563
67134eab
JMF
1564 def _build_format_filter(self, filter_spec):
1565 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1566
1567 OPERATORS = {
1568 '<': operator.lt,
1569 '<=': operator.le,
1570 '>': operator.gt,
1571 '>=': operator.ge,
1572 '=': operator.eq,
1573 '!=': operator.ne,
1574 }
67134eab 1575 operator_rex = re.compile(r'''(?x)\s*
187986a8 1576 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1577 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1578 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
083c9df9 1579 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
187986a8 1580 m = operator_rex.fullmatch(filter_spec)
9ddb6925
S
1581 if m:
1582 try:
1583 comparison_value = int(m.group('value'))
1584 except ValueError:
1585 comparison_value = parse_filesize(m.group('value'))
1586 if comparison_value is None:
1587 comparison_value = parse_filesize(m.group('value') + 'B')
1588 if comparison_value is None:
1589 raise ValueError(
1590 'Invalid value %r in format specification %r' % (
67134eab 1591 m.group('value'), filter_spec))
9ddb6925
S
1592 op = OPERATORS[m.group('op')]
1593
083c9df9 1594 if not m:
9ddb6925
S
1595 STR_OPERATORS = {
1596 '=': operator.eq,
10d33b34
YCH
1597 '^=': lambda attr, value: attr.startswith(value),
1598 '$=': lambda attr, value: attr.endswith(value),
1599 '*=': lambda attr, value: value in attr,
9ddb6925 1600 }
187986a8 1601 str_operator_rex = re.compile(r'''(?x)\s*
1602 (?P<key>[a-zA-Z0-9._-]+)\s*
1603 (?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1604 (?P<value>[a-zA-Z0-9._-]+)\s*
9ddb6925 1605 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
187986a8 1606 m = str_operator_rex.fullmatch(filter_spec)
9ddb6925
S
1607 if m:
1608 comparison_value = m.group('value')
2cc779f4
S
1609 str_op = STR_OPERATORS[m.group('op')]
1610 if m.group('negation'):
e118a879 1611 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1612 else:
1613 op = str_op
083c9df9 1614
9ddb6925 1615 if not m:
187986a8 1616 raise SyntaxError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1617
1618 def _filter(f):
1619 actual_value = f.get(m.group('key'))
1620 if actual_value is None:
1621 return m.group('none_inclusive')
1622 return op(actual_value, comparison_value)
67134eab
JMF
1623 return _filter
1624
0017d9ad 1625 def _default_format_spec(self, info_dict, download=True):
0017d9ad 1626
af0f7428
S
1627 def can_merge():
1628 merger = FFmpegMergerPP(self)
1629 return merger.available and merger.can_merge()
1630
91ebc640 1631 prefer_best = (
b7b04c78 1632 not self.params.get('simulate')
91ebc640 1633 and download
1634 and (
1635 not can_merge()
19807826 1636 or info_dict.get('is_live', False)
de6000d9 1637 or self.outtmpl_dict['default'] == '-'))
53ed7066 1638 compat = (
1639 prefer_best
1640 or self.params.get('allow_multiple_audio_streams', False)
1641 or 'format-spec' in self.params.get('compat_opts', []))
91ebc640 1642
1643 return (
53ed7066 1644 'best/bestvideo+bestaudio' if prefer_best
1645 else 'bestvideo*+bestaudio/best' if not compat
91ebc640 1646 else 'bestvideo+bestaudio/best')
0017d9ad 1647
67134eab
JMF
1648 def build_format_selector(self, format_spec):
1649 def syntax_error(note, start):
1650 message = (
1651 'Invalid format specification: '
1652 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1653 return SyntaxError(message)
1654
1655 PICKFIRST = 'PICKFIRST'
1656 MERGE = 'MERGE'
1657 SINGLE = 'SINGLE'
0130afb7 1658 GROUP = 'GROUP'
67134eab
JMF
1659 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1660
91ebc640 1661 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1662 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 1663
e8e73840 1664 check_formats = self.params.get('check_formats')
1665
67134eab
JMF
1666 def _parse_filter(tokens):
1667 filter_parts = []
1668 for type, string, start, _, _ in tokens:
1669 if type == tokenize.OP and string == ']':
1670 return ''.join(filter_parts)
1671 else:
1672 filter_parts.append(string)
1673
232541df 1674 def _remove_unused_ops(tokens):
17cc1534 1675 # Remove operators that we don't use and join them with the surrounding strings
232541df
JMF
1676 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1677 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1678 last_string, last_start, last_end, last_line = None, None, None, None
1679 for type, string, start, end, line in tokens:
1680 if type == tokenize.OP and string == '[':
1681 if last_string:
1682 yield tokenize.NAME, last_string, last_start, last_end, last_line
1683 last_string = None
1684 yield type, string, start, end, line
1685 # everything inside brackets will be handled by _parse_filter
1686 for type, string, start, end, line in tokens:
1687 yield type, string, start, end, line
1688 if type == tokenize.OP and string == ']':
1689 break
1690 elif type == tokenize.OP and string in ALLOWED_OPS:
1691 if last_string:
1692 yield tokenize.NAME, last_string, last_start, last_end, last_line
1693 last_string = None
1694 yield type, string, start, end, line
1695 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1696 if not last_string:
1697 last_string = string
1698 last_start = start
1699 last_end = end
1700 else:
1701 last_string += string
1702 if last_string:
1703 yield tokenize.NAME, last_string, last_start, last_end, last_line
1704
cf2ac6df 1705 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
1706 selectors = []
1707 current_selector = None
1708 for type, string, start, _, _ in tokens:
1709 # ENCODING is only defined in python 3.x
1710 if type == getattr(tokenize, 'ENCODING', None):
1711 continue
1712 elif type in [tokenize.NAME, tokenize.NUMBER]:
1713 current_selector = FormatSelector(SINGLE, string, [])
1714 elif type == tokenize.OP:
cf2ac6df
JMF
1715 if string == ')':
1716 if not inside_group:
1717 # ')' will be handled by the parentheses group
1718 tokens.restore_last_token()
67134eab 1719 break
cf2ac6df 1720 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
1721 tokens.restore_last_token()
1722 break
cf2ac6df
JMF
1723 elif inside_choice and string == ',':
1724 tokens.restore_last_token()
1725 break
1726 elif string == ',':
0a31a350
JMF
1727 if not current_selector:
1728 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
1729 selectors.append(current_selector)
1730 current_selector = None
1731 elif string == '/':
d96d604e
JMF
1732 if not current_selector:
1733 raise syntax_error('"/" must follow a format selector', start)
67134eab 1734 first_choice = current_selector
cf2ac6df 1735 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 1736 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
1737 elif string == '[':
1738 if not current_selector:
1739 current_selector = FormatSelector(SINGLE, 'best', [])
1740 format_filter = _parse_filter(tokens)
1741 current_selector.filters.append(format_filter)
0130afb7
JMF
1742 elif string == '(':
1743 if current_selector:
1744 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
1745 group = _parse_format_selection(tokens, inside_group=True)
1746 current_selector = FormatSelector(GROUP, group, [])
67134eab 1747 elif string == '+':
d03cfdce 1748 if not current_selector:
1749 raise syntax_error('Unexpected "+"', start)
1750 selector_1 = current_selector
1751 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1752 if not selector_2:
1753 raise syntax_error('Expected a selector', start)
1754 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab
JMF
1755 else:
1756 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1757 elif type == tokenize.ENDMARKER:
1758 break
1759 if current_selector:
1760 selectors.append(current_selector)
1761 return selectors
1762
f8d4ad9a 1763 def _merge(formats_pair):
1764 format_1, format_2 = formats_pair
1765
1766 formats_info = []
1767 formats_info.extend(format_1.get('requested_formats', (format_1,)))
1768 formats_info.extend(format_2.get('requested_formats', (format_2,)))
1769
1770 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
551f9388 1771 get_no_more = {'video': False, 'audio': False}
f8d4ad9a 1772 for (i, fmt_info) in enumerate(formats_info):
551f9388 1773 if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
1774 formats_info.pop(i)
1775 continue
1776 for aud_vid in ['audio', 'video']:
f8d4ad9a 1777 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
1778 if get_no_more[aud_vid]:
1779 formats_info.pop(i)
f5510afe 1780 break
f8d4ad9a 1781 get_no_more[aud_vid] = True
1782
1783 if len(formats_info) == 1:
1784 return formats_info[0]
1785
1786 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
1787 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
1788
1789 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
1790 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
1791
1792 output_ext = self.params.get('merge_output_format')
1793 if not output_ext:
1794 if the_only_video:
1795 output_ext = the_only_video['ext']
1796 elif the_only_audio and not video_fmts:
1797 output_ext = the_only_audio['ext']
1798 else:
1799 output_ext = 'mkv'
1800
1801 new_dict = {
1802 'requested_formats': formats_info,
1803 'format': '+'.join(fmt_info.get('format') for fmt_info in formats_info),
1804 'format_id': '+'.join(fmt_info.get('format_id') for fmt_info in formats_info),
1805 'ext': output_ext,
1806 }
1807
1808 if the_only_video:
1809 new_dict.update({
1810 'width': the_only_video.get('width'),
1811 'height': the_only_video.get('height'),
1812 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
1813 'fps': the_only_video.get('fps'),
1814 'vcodec': the_only_video.get('vcodec'),
1815 'vbr': the_only_video.get('vbr'),
1816 'stretched_ratio': the_only_video.get('stretched_ratio'),
1817 })
1818
1819 if the_only_audio:
1820 new_dict.update({
1821 'acodec': the_only_audio.get('acodec'),
1822 'abr': the_only_audio.get('abr'),
1823 })
1824
1825 return new_dict
1826
e8e73840 1827 def _check_formats(formats):
981052c9 1828 if not check_formats:
1829 yield from formats
b5ac45b1 1830 return
e8e73840 1831 for f in formats:
1832 self.to_screen('[info] Testing format %s' % f['format_id'])
21cd8fae 1833 temp_file = tempfile.NamedTemporaryFile(
1834 suffix='.tmp', delete=False,
1835 dir=self.get_output_path('temp') or None)
1836 temp_file.close()
fe346461 1837 try:
981052c9 1838 success, _ = self.dl(temp_file.name, f, test=True)
1839 except (DownloadError, IOError, OSError, ValueError) + network_exceptions:
1840 success = False
fe346461 1841 finally:
21cd8fae 1842 if os.path.exists(temp_file.name):
1843 try:
1844 os.remove(temp_file.name)
1845 except OSError:
1846 self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
981052c9 1847 if success:
e8e73840 1848 yield f
1849 else:
1850 self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
1851
67134eab 1852 def _build_selector_function(selector):
909d24dd 1853 if isinstance(selector, list): # ,
67134eab
JMF
1854 fs = [_build_selector_function(s) for s in selector]
1855
317f7ab6 1856 def selector_function(ctx):
67134eab 1857 for f in fs:
981052c9 1858 yield from f(ctx)
67134eab 1859 return selector_function
909d24dd 1860
1861 elif selector.type == GROUP: # ()
0130afb7 1862 selector_function = _build_selector_function(selector.selector)
909d24dd 1863
1864 elif selector.type == PICKFIRST: # /
67134eab
JMF
1865 fs = [_build_selector_function(s) for s in selector.selector]
1866
317f7ab6 1867 def selector_function(ctx):
67134eab 1868 for f in fs:
317f7ab6 1869 picked_formats = list(f(ctx))
67134eab
JMF
1870 if picked_formats:
1871 return picked_formats
1872 return []
67134eab 1873
981052c9 1874 elif selector.type == MERGE: # +
1875 selector_1, selector_2 = map(_build_selector_function, selector.selector)
1876
1877 def selector_function(ctx):
1878 for pair in itertools.product(
1879 selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
1880 yield _merge(pair)
1881
909d24dd 1882 elif selector.type == SINGLE: # atom
598d185d 1883 format_spec = selector.selector or 'best'
909d24dd 1884
f8d4ad9a 1885 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
909d24dd 1886 if format_spec == 'all':
1887 def selector_function(ctx):
981052c9 1888 yield from _check_formats(ctx['formats'])
f8d4ad9a 1889 elif format_spec == 'mergeall':
1890 def selector_function(ctx):
981052c9 1891 formats = list(_check_formats(ctx['formats']))
e01d6aa4 1892 if not formats:
1893 return
921b76ca 1894 merged_format = formats[-1]
1895 for f in formats[-2::-1]:
f8d4ad9a 1896 merged_format = _merge((merged_format, f))
1897 yield merged_format
909d24dd 1898
1899 else:
e8e73840 1900 format_fallback, format_reverse, format_idx = False, True, 1
eff63539 1901 mobj = re.match(
1902 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
1903 format_spec)
1904 if mobj is not None:
1905 format_idx = int_or_none(mobj.group('n'), default=1)
e8e73840 1906 format_reverse = mobj.group('bw')[0] == 'b'
eff63539 1907 format_type = (mobj.group('type') or [None])[0]
1908 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
1909 format_modified = mobj.group('mod') is not None
909d24dd 1910
1911 format_fallback = not format_type and not format_modified # for b, w
8326b00a 1912 _filter_f = (
eff63539 1913 (lambda f: f.get('%scodec' % format_type) != 'none')
1914 if format_type and format_modified # bv*, ba*, wv*, wa*
1915 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
1916 if format_type # bv, ba, wv, wa
1917 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
1918 if not format_modified # b, w
8326b00a 1919 else lambda f: True) # b*, w*
1920 filter_f = lambda f: _filter_f(f) and (
1921 f.get('vcodec') != 'none' or f.get('acodec') != 'none')
67134eab 1922 else:
909d24dd 1923 filter_f = ((lambda f: f.get('ext') == format_spec)
1924 if format_spec in ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] # extension
1925 else (lambda f: f.get('format_id') == format_spec)) # id
1926
1927 def selector_function(ctx):
1928 formats = list(ctx['formats'])
909d24dd 1929 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
e8e73840 1930 if format_fallback and ctx['incomplete_formats'] and not matches:
909d24dd 1931 # for extractors with incomplete formats (audio only (soundcloud)
1932 # or video only (imgur)) best/worst will fallback to
1933 # best/worst {video,audio}-only format
e8e73840 1934 matches = formats
981052c9 1935 matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
1936 try:
e8e73840 1937 yield matches[format_idx - 1]
981052c9 1938 except IndexError:
1939 return
083c9df9 1940
67134eab 1941 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 1942
317f7ab6
S
1943 def final_selector(ctx):
1944 ctx_copy = copy.deepcopy(ctx)
67134eab 1945 for _filter in filters:
317f7ab6
S
1946 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1947 return selector_function(ctx_copy)
67134eab 1948 return final_selector
083c9df9 1949
67134eab 1950 stream = io.BytesIO(format_spec.encode('utf-8'))
0130afb7 1951 try:
232541df 1952 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
0130afb7
JMF
1953 except tokenize.TokenError:
1954 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1955
1956 class TokenIterator(object):
1957 def __init__(self, tokens):
1958 self.tokens = tokens
1959 self.counter = 0
1960
1961 def __iter__(self):
1962 return self
1963
1964 def __next__(self):
1965 if self.counter >= len(self.tokens):
1966 raise StopIteration()
1967 value = self.tokens[self.counter]
1968 self.counter += 1
1969 return value
1970
1971 next = __next__
1972
1973 def restore_last_token(self):
1974 self.counter -= 1
1975
1976 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 1977 return _build_selector_function(parsed_selector)
a9c58ad9 1978
e5660ee6
JMF
1979 def _calc_headers(self, info_dict):
1980 res = std_headers.copy()
1981
1982 add_headers = info_dict.get('http_headers')
1983 if add_headers:
1984 res.update(add_headers)
1985
1986 cookies = self._calc_cookies(info_dict)
1987 if cookies:
1988 res['Cookie'] = cookies
1989
0016b84e
S
1990 if 'X-Forwarded-For' not in res:
1991 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
1992 if x_forwarded_for_ip:
1993 res['X-Forwarded-For'] = x_forwarded_for_ip
1994
e5660ee6
JMF
1995 return res
1996
1997 def _calc_cookies(self, info_dict):
5c2266df 1998 pr = sanitized_Request(info_dict['url'])
e5660ee6 1999 self.cookiejar.add_cookie_header(pr)
662435f7 2000 return pr.get_header('Cookie')
e5660ee6 2001
b0249bca 2002 def _sanitize_thumbnails(self, info_dict):
bc516a3f 2003 thumbnails = info_dict.get('thumbnails')
2004 if thumbnails is None:
2005 thumbnail = info_dict.get('thumbnail')
2006 if thumbnail:
2007 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
2008 if thumbnails:
2009 thumbnails.sort(key=lambda t: (
2010 t.get('preference') if t.get('preference') is not None else -1,
2011 t.get('width') if t.get('width') is not None else -1,
2012 t.get('height') if t.get('height') is not None else -1,
2013 t.get('id') if t.get('id') is not None else '',
2014 t.get('url')))
b0249bca 2015
0ba692ac 2016 def thumbnail_tester():
2017 if self.params.get('check_formats'):
cca80fe6 2018 test_all = True
2019 to_screen = lambda msg: self.to_screen(f'[info] {msg}')
0ba692ac 2020 else:
cca80fe6 2021 test_all = False
0ba692ac 2022 to_screen = self.write_debug
2023
2024 def test_thumbnail(t):
cca80fe6 2025 if not test_all and not t.get('_test_url'):
2026 return True
0ba692ac 2027 to_screen('Testing thumbnail %s' % t['id'])
2028 try:
2029 self.urlopen(HEADRequest(t['url']))
2030 except network_exceptions as err:
2031 to_screen('Unable to connect to thumbnail %s URL "%s" - %s. Skipping...' % (
2032 t['id'], t['url'], error_to_compat_str(err)))
2033 return False
2034 return True
2035
2036 return test_thumbnail
b0249bca 2037
bc516a3f 2038 for i, t in enumerate(thumbnails):
bc516a3f 2039 if t.get('id') is None:
2040 t['id'] = '%d' % i
b0249bca 2041 if t.get('width') and t.get('height'):
2042 t['resolution'] = '%dx%d' % (t['width'], t['height'])
2043 t['url'] = sanitize_url(t['url'])
0ba692ac 2044
2045 if self.params.get('check_formats') is not False:
2046 info_dict['thumbnails'] = LazyList(filter(thumbnail_tester(), thumbnails[::-1])).reverse()
2047 else:
2048 info_dict['thumbnails'] = thumbnails
bc516a3f 2049
dd82ffea
JMF
2050 def process_video_result(self, info_dict, download=True):
2051 assert info_dict.get('_type', 'video') == 'video'
2052
bec1fad2
PH
2053 if 'id' not in info_dict:
2054 raise ExtractorError('Missing "id" field in extractor result')
2055 if 'title' not in info_dict:
2056 raise ExtractorError('Missing "title" field in extractor result')
2057
c9969434
S
2058 def report_force_conversion(field, field_not, conversion):
2059 self.report_warning(
2060 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2061 % (field, field_not, conversion))
2062
2063 def sanitize_string_field(info, string_field):
2064 field = info.get(string_field)
2065 if field is None or isinstance(field, compat_str):
2066 return
2067 report_force_conversion(string_field, 'a string', 'string')
2068 info[string_field] = compat_str(field)
2069
2070 def sanitize_numeric_fields(info):
2071 for numeric_field in self._NUMERIC_FIELDS:
2072 field = info.get(numeric_field)
2073 if field is None or isinstance(field, compat_numeric_types):
2074 continue
2075 report_force_conversion(numeric_field, 'numeric', 'int')
2076 info[numeric_field] = int_or_none(field)
2077
2078 sanitize_string_field(info_dict, 'id')
2079 sanitize_numeric_fields(info_dict)
be6217b2 2080
dd82ffea
JMF
2081 if 'playlist' not in info_dict:
2082 # It isn't part of a playlist
2083 info_dict['playlist'] = None
2084 info_dict['playlist_index'] = None
2085
bc516a3f 2086 self._sanitize_thumbnails(info_dict)
d5519808 2087
536a55da 2088 thumbnail = info_dict.get('thumbnail')
bc516a3f 2089 thumbnails = info_dict.get('thumbnails')
536a55da
S
2090 if thumbnail:
2091 info_dict['thumbnail'] = sanitize_url(thumbnail)
2092 elif thumbnails:
d5519808
PH
2093 info_dict['thumbnail'] = thumbnails[-1]['url']
2094
ae30b840 2095 if info_dict.get('display_id') is None and 'id' in info_dict:
0afef30b
PH
2096 info_dict['display_id'] = info_dict['id']
2097
10db0d2f 2098 for ts_key, date_key in (
2099 ('timestamp', 'upload_date'),
2100 ('release_timestamp', 'release_date'),
2101 ):
2102 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
2103 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2104 # see http://bugs.python.org/issue1646728)
2105 try:
2106 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
2107 info_dict[date_key] = upload_date.strftime('%Y%m%d')
2108 except (ValueError, OverflowError, OSError):
2109 pass
9d2ecdbc 2110
ae30b840 2111 live_keys = ('is_live', 'was_live')
2112 live_status = info_dict.get('live_status')
2113 if live_status is None:
2114 for key in live_keys:
2115 if info_dict.get(key) is False:
2116 continue
2117 if info_dict.get(key):
2118 live_status = key
2119 break
2120 if all(info_dict.get(key) is False for key in live_keys):
2121 live_status = 'not_live'
2122 if live_status:
2123 info_dict['live_status'] = live_status
2124 for key in live_keys:
2125 if info_dict.get(key) is None:
2126 info_dict[key] = (live_status == key)
2127
33d2fc2f
S
2128 # Auto generate title fields corresponding to the *_number fields when missing
2129 # in order to always have clean titles. This is very common for TV series.
2130 for field in ('chapter', 'season', 'episode'):
2131 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
2132 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
2133
05108a49
S
2134 for cc_kind in ('subtitles', 'automatic_captions'):
2135 cc = info_dict.get(cc_kind)
2136 if cc:
2137 for _, subtitle in cc.items():
2138 for subtitle_format in subtitle:
2139 if subtitle_format.get('url'):
2140 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
2141 if subtitle_format.get('ext') is None:
2142 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
2143
2144 automatic_captions = info_dict.get('automatic_captions')
4bba3716 2145 subtitles = info_dict.get('subtitles')
4bba3716 2146
360e1ca5 2147 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 2148 info_dict['id'], subtitles, automatic_captions)
a504ced0 2149
dd82ffea
JMF
2150 # We now pick which formats have to be downloaded
2151 if info_dict.get('formats') is None:
2152 # There's only one format available
2153 formats = [info_dict]
2154 else:
2155 formats = info_dict['formats']
2156
db95dc13 2157 if not formats:
b7da73eb 2158 if not self.params.get('ignore_no_formats_error'):
2159 raise ExtractorError('No video formats found!')
2160 else:
2161 self.report_warning('No video formats found!')
db95dc13 2162
73af5cc8
S
2163 def is_wellformed(f):
2164 url = f.get('url')
a5ac0c47 2165 if not url:
73af5cc8
S
2166 self.report_warning(
2167 '"url" field is missing or empty - skipping format, '
2168 'there is an error in extractor')
a5ac0c47
S
2169 return False
2170 if isinstance(url, bytes):
2171 sanitize_string_field(f, 'url')
2172 return True
73af5cc8
S
2173
2174 # Filter out malformed formats for better extraction robustness
2175 formats = list(filter(is_wellformed, formats))
2176
181c7053
S
2177 formats_dict = {}
2178
dd82ffea 2179 # We check that all the formats have the format and format_id fields
db95dc13 2180 for i, format in enumerate(formats):
c9969434
S
2181 sanitize_string_field(format, 'format_id')
2182 sanitize_numeric_fields(format)
dcf77cf1 2183 format['url'] = sanitize_url(format['url'])
e74e3b63 2184 if not format.get('format_id'):
8016c922 2185 format['format_id'] = compat_str(i)
e2effb08
S
2186 else:
2187 # Sanitize format_id from characters used in format selector expression
ec85ded8 2188 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
181c7053
S
2189 format_id = format['format_id']
2190 if format_id not in formats_dict:
2191 formats_dict[format_id] = []
2192 formats_dict[format_id].append(format)
2193
2194 # Make sure all formats have unique format_id
2195 for format_id, ambiguous_formats in formats_dict.items():
2196 if len(ambiguous_formats) > 1:
2197 for i, format in enumerate(ambiguous_formats):
2198 format['format_id'] = '%s-%d' % (format_id, i)
2199
2200 for i, format in enumerate(formats):
8c51aa65 2201 if format.get('format') is None:
6febd1c1 2202 format['format'] = '{id} - {res}{note}'.format(
8c51aa65
JMF
2203 id=format['format_id'],
2204 res=self.format_resolution(format),
b868936c 2205 note=format_field(format, 'format_note', ' (%s)'),
8c51aa65 2206 )
c1002e96 2207 # Automatically determine file extension if missing
5b1d8575 2208 if format.get('ext') is None:
cce929ea 2209 format['ext'] = determine_ext(format['url']).lower()
b5559424
S
2210 # Automatically determine protocol if missing (useful for format
2211 # selection purposes)
6f0be937 2212 if format.get('protocol') is None:
b5559424 2213 format['protocol'] = determine_protocol(format)
e5660ee6
JMF
2214 # Add HTTP headers, so that external programs can use them from the
2215 # json output
2216 full_format_info = info_dict.copy()
2217 full_format_info.update(format)
2218 format['http_headers'] = self._calc_headers(full_format_info)
0016b84e
S
2219 # Remove private housekeeping stuff
2220 if '__x_forwarded_for_ip' in info_dict:
2221 del info_dict['__x_forwarded_for_ip']
dd82ffea 2222
4bcc7bd1 2223 # TODO Central sorting goes here
99e206d5 2224
b7da73eb 2225 if formats and formats[0] is not info_dict:
b3d9ef88
JMF
2226 # only set the 'formats' fields if the original info_dict list them
2227 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 2228 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 2229 # which can't be exported to json
b3d9ef88 2230 info_dict['formats'] = formats
4ec82a72 2231
2232 info_dict, _ = self.pre_process(info_dict)
2233
b7b04c78 2234 if self.params.get('list_thumbnails'):
2235 self.list_thumbnails(info_dict)
2236 if self.params.get('listformats'):
86c66b2d 2237 if not info_dict.get('formats') and not info_dict.get('url'):
b7b04c78 2238 raise ExtractorError('No video formats found', expected=True)
2239 self.list_formats(info_dict)
2240 if self.params.get('listsubtitles'):
2241 if 'automatic_captions' in info_dict:
2242 self.list_subtitles(
2243 info_dict['id'], automatic_captions, 'automatic captions')
2244 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
2245 list_only = self.params.get('simulate') is None and (
2246 self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
169dbde9 2247 if list_only:
b7b04c78 2248 # Without this printing, -F --print-json will not work
169dbde9 2249 self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
bfaae0a7 2250 return
2251
187986a8 2252 format_selector = self.format_selector
2253 if format_selector is None:
0017d9ad 2254 req_format = self._default_format_spec(info_dict, download=download)
0760b0a7 2255 self.write_debug('Default format spec: %s' % req_format)
187986a8 2256 format_selector = self.build_format_selector(req_format)
317f7ab6
S
2257
2258 # While in format selection we may need to have an access to the original
2259 # format set in order to calculate some metrics or do some processing.
2260 # For now we need to be able to guess whether original formats provided
2261 # by extractor are incomplete or not (i.e. whether extractor provides only
2262 # video-only or audio-only formats) for proper formats selection for
2263 # extractors with such incomplete formats (see
067aa17e 2264 # https://github.com/ytdl-org/youtube-dl/pull/5556).
317f7ab6
S
2265 # Since formats may be filtered during format selection and may not match
2266 # the original formats the results may be incorrect. Thus original formats
2267 # or pre-calculated metrics should be passed to format selection routines
2268 # as well.
2269 # We will pass a context object containing all necessary additional data
2270 # instead of just formats.
2271 # This fixes incorrect format selection issue (see
067aa17e 2272 # https://github.com/ytdl-org/youtube-dl/issues/10083).
2e221ca3 2273 incomplete_formats = (
317f7ab6 2274 # All formats are video-only or
3089bc74 2275 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
317f7ab6 2276 # all formats are audio-only
3089bc74 2277 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
317f7ab6
S
2278
2279 ctx = {
2280 'formats': formats,
2281 'incomplete_formats': incomplete_formats,
2282 }
2283
2284 formats_to_download = list(format_selector(ctx))
dd82ffea 2285 if not formats_to_download:
b7da73eb 2286 if not self.params.get('ignore_no_formats_error'):
2287 raise ExtractorError('Requested format is not available', expected=True)
2288 else:
2289 self.report_warning('Requested format is not available')
4513a41a
A
2290 # Process what we can, even without any available formats.
2291 self.process_info(dict(info_dict))
b7da73eb 2292 elif download:
2293 self.to_screen(
07cce701 2294 '[info] %s: Downloading %d format(s): %s' % (
2295 info_dict['id'], len(formats_to_download),
2296 ", ".join([f['format_id'] for f in formats_to_download])))
b7da73eb 2297 for fmt in formats_to_download:
dd82ffea 2298 new_info = dict(info_dict)
4ec82a72 2299 # Save a reference to the original info_dict so that it can be modified in process_info if needed
2300 new_info['__original_infodict'] = info_dict
b7da73eb 2301 new_info.update(fmt)
dd82ffea
JMF
2302 self.process_info(new_info)
2303 # We update the info dict with the best quality format (backwards compatibility)
b7da73eb 2304 if formats_to_download:
2305 info_dict.update(formats_to_download[-1])
dd82ffea
JMF
2306 return info_dict
2307
98c70d6f 2308 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 2309 """Select the requested subtitles and their format"""
98c70d6f
JMF
2310 available_subs = {}
2311 if normal_subtitles and self.params.get('writesubtitles'):
2312 available_subs.update(normal_subtitles)
2313 if automatic_captions and self.params.get('writeautomaticsub'):
2314 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
2315 if lang not in available_subs:
2316 available_subs[lang] = cap_info
2317
4d171848
JMF
2318 if (not self.params.get('writesubtitles') and not
2319 self.params.get('writeautomaticsub') or not
2320 available_subs):
2321 return None
a504ced0 2322
c32b0aab 2323 all_sub_langs = available_subs.keys()
a504ced0 2324 if self.params.get('allsubtitles', False):
c32b0aab 2325 requested_langs = all_sub_langs
2326 elif self.params.get('subtitleslangs', False):
2327 requested_langs = set()
2328 for lang in self.params.get('subtitleslangs'):
2329 if lang == 'all':
2330 requested_langs.update(all_sub_langs)
2331 continue
2332 discard = lang[0] == '-'
2333 if discard:
2334 lang = lang[1:]
2335 current_langs = filter(re.compile(lang + '$').match, all_sub_langs)
2336 if discard:
2337 for lang in current_langs:
2338 requested_langs.discard(lang)
2339 else:
2340 requested_langs.update(current_langs)
2341 elif 'en' in available_subs:
2342 requested_langs = ['en']
a504ced0 2343 else:
c32b0aab 2344 requested_langs = [list(all_sub_langs)[0]]
ad3dc496 2345 if requested_langs:
2346 self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
a504ced0
JMF
2347
2348 formats_query = self.params.get('subtitlesformat', 'best')
2349 formats_preference = formats_query.split('/') if formats_query else []
2350 subs = {}
2351 for lang in requested_langs:
2352 formats = available_subs.get(lang)
2353 if formats is None:
2354 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
2355 continue
a504ced0
JMF
2356 for ext in formats_preference:
2357 if ext == 'best':
2358 f = formats[-1]
2359 break
2360 matches = list(filter(lambda f: f['ext'] == ext, formats))
2361 if matches:
2362 f = matches[-1]
2363 break
2364 else:
2365 f = formats[-1]
2366 self.report_warning(
2367 'No subtitle format found matching "%s" for language %s, '
2368 'using %s' % (formats_query, lang, f['ext']))
2369 subs[lang] = f
2370 return subs
2371
d06daf23 2372 def __forced_printings(self, info_dict, filename, incomplete):
53c18592 2373 def print_mandatory(field, actual_field=None):
2374 if actual_field is None:
2375 actual_field = field
d06daf23 2376 if (self.params.get('force%s' % field, False)
53c18592 2377 and (not incomplete or info_dict.get(actual_field) is not None)):
2378 self.to_stdout(info_dict[actual_field])
d06daf23
S
2379
2380 def print_optional(field):
2381 if (self.params.get('force%s' % field, False)
2382 and info_dict.get(field) is not None):
2383 self.to_stdout(info_dict[field])
2384
53c18592 2385 info_dict = info_dict.copy()
2386 if filename is not None:
2387 info_dict['filename'] = filename
2388 if info_dict.get('requested_formats') is not None:
2389 # For RTMP URLs, also include the playpath
2390 info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
2391 elif 'url' in info_dict:
2392 info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
2393
2b8a2973 2394 if self.params.get('forceprint') or self.params.get('forcejson'):
2395 self.post_extract(info_dict)
53c18592 2396 for tmpl in self.params.get('forceprint', []):
2397 if re.match(r'\w+$', tmpl):
2398 tmpl = '%({})s'.format(tmpl)
2399 tmpl, info_copy = self.prepare_outtmpl(tmpl, info_dict)
901130bb 2400 self.to_stdout(self.escape_outtmpl(tmpl) % info_copy)
53c18592 2401
d06daf23
S
2402 print_mandatory('title')
2403 print_mandatory('id')
53c18592 2404 print_mandatory('url', 'urls')
d06daf23
S
2405 print_optional('thumbnail')
2406 print_optional('description')
53c18592 2407 print_optional('filename')
b868936c 2408 if self.params.get('forceduration') and info_dict.get('duration') is not None:
d06daf23
S
2409 self.to_stdout(formatSeconds(info_dict['duration']))
2410 print_mandatory('format')
53c18592 2411
2b8a2973 2412 if self.params.get('forcejson'):
6e84b215 2413 self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
d06daf23 2414
e8e73840 2415 def dl(self, name, info, subtitle=False, test=False):
2416
2417 if test:
2418 verbose = self.params.get('verbose')
2419 params = {
2420 'test': True,
2421 'quiet': not verbose,
2422 'verbose': verbose,
2423 'noprogress': not verbose,
2424 'nopart': True,
2425 'skip_unavailable_fragments': False,
2426 'keep_fragments': False,
2427 'overwrites': True,
2428 '_no_ytdl_file': True,
2429 }
2430 else:
2431 params = self.params
96fccc10 2432 fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
e8e73840 2433 if not test:
2434 for ph in self._progress_hooks:
2435 fd.add_progress_hook(ph)
18e674b4 2436 urls = '", "'.join([f['url'] for f in info.get('requested_formats', [])] or [info['url']])
2437 self.write_debug('Invoking downloader on "%s"' % urls)
e8e73840 2438 new_info = dict(info)
2439 if new_info.get('http_headers') is None:
2440 new_info['http_headers'] = self._calc_headers(new_info)
2441 return fd.download(name, new_info, subtitle)
2442
8222d8de
JMF
2443 def process_info(self, info_dict):
2444 """Process a single resolved IE result."""
2445
2446 assert info_dict.get('_type', 'video') == 'video'
fd288278
PH
2447
2448 max_downloads = self.params.get('max_downloads')
2449 if max_downloads is not None:
2450 if self._num_downloads >= int(max_downloads):
2451 raise MaxDownloadsReached()
8222d8de 2452
d06daf23 2453 # TODO: backward compatibility, to be removed
8222d8de 2454 info_dict['fulltitle'] = info_dict['title']
8222d8de 2455
4513a41a 2456 if 'format' not in info_dict and 'ext' in info_dict:
8222d8de
JMF
2457 info_dict['format'] = info_dict['ext']
2458
c77495e3 2459 if self._match_entry(info_dict) is not None:
8222d8de
JMF
2460 return
2461
277d6ff5 2462 self.post_extract(info_dict)
fd288278 2463 self._num_downloads += 1
8222d8de 2464
dcf64d43 2465 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 2466 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2467 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 2468 files_to_move = {}
8222d8de
JMF
2469
2470 # Forced printings
4513a41a 2471 self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
8222d8de 2472
b7b04c78 2473 if self.params.get('simulate'):
2d30509f 2474 if self.params.get('force_write_download_archive', False):
2475 self.record_download_archive(info_dict)
2476
2477 # Do nothing else if in simulate mode
8222d8de
JMF
2478 return
2479
de6000d9 2480 if full_filename is None:
8222d8de
JMF
2481 return
2482
e92caff5 2483 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 2484 return
e92caff5 2485 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
2486 return
2487
2488 if self.params.get('writedescription', False):
de6000d9 2489 descfn = self.prepare_filename(info_dict, 'description')
e92caff5 2490 if not self._ensure_dir_exists(encodeFilename(descfn)):
0202b52a 2491 return
0c3d0f51 2492 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
6febd1c1 2493 self.to_screen('[info] Video description is already present')
f00fd51d
JMF
2494 elif info_dict.get('description') is None:
2495 self.report_warning('There\'s no description to write.')
7b6fefc9
PH
2496 else:
2497 try:
6febd1c1 2498 self.to_screen('[info] Writing video description to: ' + descfn)
7b6fefc9
PH
2499 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
2500 descfile.write(info_dict['description'])
7b6fefc9 2501 except (OSError, IOError):
6febd1c1 2502 self.report_error('Cannot write description file ' + descfn)
7b6fefc9 2503 return
8222d8de 2504
1fb07d10 2505 if self.params.get('writeannotations', False):
de6000d9 2506 annofn = self.prepare_filename(info_dict, 'annotation')
e92caff5 2507 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 2508 return
0c3d0f51 2509 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 2510 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
2511 elif not info_dict.get('annotations'):
2512 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
2513 else:
2514 try:
6febd1c1 2515 self.to_screen('[info] Writing video annotations to: ' + annofn)
7b6fefc9
PH
2516 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2517 annofile.write(info_dict['annotations'])
2518 except (KeyError, TypeError):
6febd1c1 2519 self.report_warning('There are no annotations to write.')
7b6fefc9 2520 except (OSError, IOError):
6febd1c1 2521 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 2522 return
1fb07d10 2523
c4a91be7 2524 subtitles_are_requested = any([self.params.get('writesubtitles', False),
0b7f3118 2525 self.params.get('writeautomaticsub')])
c4a91be7 2526
c84dd8a9 2527 if subtitles_are_requested and info_dict.get('requested_subtitles'):
8222d8de
JMF
2528 # subtitles download errors are already managed as troubles in relevant IE
2529 # that way it will silently go on when used with unsupporting IE
c84dd8a9 2530 subtitles = info_dict['requested_subtitles']
fa57af1e 2531 # ie = self.get_info_extractor(info_dict['extractor_key'])
a504ced0
JMF
2532 for sub_lang, sub_info in subtitles.items():
2533 sub_format = sub_info['ext']
56d868db 2534 sub_filename = subtitles_filename(temp_filename, sub_lang, sub_format, info_dict.get('ext'))
2535 sub_filename_final = subtitles_filename(
2536 self.prepare_filename(info_dict, 'subtitle'), sub_lang, sub_format, info_dict.get('ext'))
0c3d0f51 2537 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(sub_filename)):
5ff1bc0c 2538 self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
dcf64d43 2539 sub_info['filepath'] = sub_filename
0202b52a 2540 files_to_move[sub_filename] = sub_filename_final
a504ced0 2541 else:
0c9df79e 2542 self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
5ff1bc0c
RA
2543 if sub_info.get('data') is not None:
2544 try:
2545 # Use newline='' to prevent conversion of newline characters
067aa17e 2546 # See https://github.com/ytdl-org/youtube-dl/issues/10268
5ff1bc0c
RA
2547 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
2548 subfile.write(sub_info['data'])
dcf64d43 2549 sub_info['filepath'] = sub_filename
0202b52a 2550 files_to_move[sub_filename] = sub_filename_final
5ff1bc0c
RA
2551 except (OSError, IOError):
2552 self.report_error('Cannot write subtitles file ' + sub_filename)
2553 return
7b6fefc9 2554 else:
5ff1bc0c 2555 try:
e8e73840 2556 self.dl(sub_filename, sub_info.copy(), subtitle=True)
dcf64d43 2557 sub_info['filepath'] = sub_filename
0202b52a 2558 files_to_move[sub_filename] = sub_filename_final
fe346461 2559 except (ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
5ff1bc0c
RA
2560 self.report_warning('Unable to download subtitle for "%s": %s' %
2561 (sub_lang, error_to_compat_str(err)))
2562 continue
8222d8de 2563
8222d8de 2564 if self.params.get('writeinfojson', False):
de6000d9 2565 infofn = self.prepare_filename(info_dict, 'infojson')
e92caff5 2566 if not self._ensure_dir_exists(encodeFilename(infofn)):
0202b52a 2567 return
0c3d0f51 2568 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
66c935fb 2569 self.to_screen('[info] Video metadata is already present')
7b6fefc9 2570 else:
66c935fb 2571 self.to_screen('[info] Writing video metadata as JSON to: ' + infofn)
7b6fefc9 2572 try:
8012d892 2573 write_json_file(self.sanitize_info(info_dict, self.params.get('clean_infojson', True)), infofn)
7b6fefc9 2574 except (OSError, IOError):
66c935fb 2575 self.report_error('Cannot write video metadata to JSON file ' + infofn)
7b6fefc9 2576 return
de6000d9 2577 info_dict['__infojson_filename'] = infofn
8222d8de 2578
56d868db 2579 for thumb_ext in self._write_thumbnails(info_dict, temp_filename):
2580 thumb_filename_temp = replace_extension(temp_filename, thumb_ext, info_dict.get('ext'))
2581 thumb_filename = replace_extension(
2582 self.prepare_filename(info_dict, 'thumbnail'), thumb_ext, info_dict.get('ext'))
dcf64d43 2583 files_to_move[thumb_filename_temp] = thumb_filename
8222d8de 2584
732044af 2585 # Write internet shortcut files
2586 url_link = webloc_link = desktop_link = False
2587 if self.params.get('writelink', False):
2588 if sys.platform == "darwin": # macOS.
2589 webloc_link = True
2590 elif sys.platform.startswith("linux"):
2591 desktop_link = True
2592 else: # if sys.platform in ['win32', 'cygwin']:
2593 url_link = True
2594 if self.params.get('writeurllink', False):
2595 url_link = True
2596 if self.params.get('writewebloclink', False):
2597 webloc_link = True
2598 if self.params.get('writedesktoplink', False):
2599 desktop_link = True
2600
2601 if url_link or webloc_link or desktop_link:
2602 if 'webpage_url' not in info_dict:
2603 self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
2604 return
2605 ascii_url = iri_to_uri(info_dict['webpage_url'])
2606
2607 def _write_link_file(extension, template, newline, embed_filename):
0202b52a 2608 linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
10e3742e 2609 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
732044af 2610 self.to_screen('[info] Internet shortcut is already present')
2611 else:
2612 try:
2613 self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
2614 with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
2615 template_vars = {'url': ascii_url}
2616 if embed_filename:
2617 template_vars['filename'] = linkfn[:-(len(extension) + 1)]
2618 linkfile.write(template % template_vars)
2619 except (OSError, IOError):
2620 self.report_error('Cannot write internet shortcut ' + linkfn)
2621 return False
2622 return True
2623
2624 if url_link:
2625 if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
2626 return
2627 if webloc_link:
2628 if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
2629 return
2630 if desktop_link:
2631 if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
2632 return
2633
56d868db 2634 try:
2635 info_dict, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
2636 except PostProcessingError as err:
2637 self.report_error('Preprocessing: %s' % str(err))
2638 return
2639
732044af 2640 must_record_download_archive = False
56d868db 2641 if self.params.get('skip_download', False):
2642 info_dict['filepath'] = temp_filename
2643 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2644 info_dict['__files_to_move'] = files_to_move
2645 info_dict = self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict)
2646 else:
2647 # Download
b868936c 2648 info_dict.setdefault('__postprocessors', [])
4340deca 2649 try:
0202b52a 2650
6b591b29 2651 def existing_file(*filepaths):
2652 ext = info_dict.get('ext')
2653 final_ext = self.params.get('final_ext', ext)
2654 existing_files = []
2655 for file in orderedSet(filepaths):
2656 if final_ext != ext:
2657 converted = replace_extension(file, final_ext, ext)
2658 if os.path.exists(encodeFilename(converted)):
2659 existing_files.append(converted)
2660 if os.path.exists(encodeFilename(file)):
2661 existing_files.append(file)
2662
2663 if not existing_files or self.params.get('overwrites', False):
2664 for file in orderedSet(existing_files):
2665 self.report_file_delete(file)
2666 os.remove(encodeFilename(file))
2667 return None
2668
6b591b29 2669 info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
2670 return existing_files[0]
0202b52a 2671
2672 success = True
4340deca 2673 if info_dict.get('requested_formats') is not None:
81cd954a
S
2674
2675 def compatible_formats(formats):
d03cfdce 2676 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
2677 video_formats = [format for format in formats if format.get('vcodec') != 'none']
2678 audio_formats = [format for format in formats if format.get('acodec') != 'none']
2679 if len(video_formats) > 2 or len(audio_formats) > 2:
2680 return False
2681
81cd954a 2682 # Check extension
d03cfdce 2683 exts = set(format.get('ext') for format in formats)
2684 COMPATIBLE_EXTS = (
2685 set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
2686 set(('webm',)),
2687 )
2688 for ext_sets in COMPATIBLE_EXTS:
2689 if ext_sets.issuperset(exts):
2690 return True
81cd954a
S
2691 # TODO: Check acodec/vcodec
2692 return False
2693
2694 requested_formats = info_dict['requested_formats']
0202b52a 2695 old_ext = info_dict['ext']
3b297919 2696 if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
2697 info_dict['ext'] = 'mkv'
2698 self.report_warning(
2699 'Requested formats are incompatible for merge and will be merged into mkv.')
124bc071 2700 new_ext = info_dict['ext']
0202b52a 2701
124bc071 2702 def correct_ext(filename, ext=new_ext):
96fccc10 2703 if filename == '-':
2704 return filename
0202b52a 2705 filename_real_ext = os.path.splitext(filename)[1][1:]
2706 filename_wo_ext = (
2707 os.path.splitext(filename)[0]
124bc071 2708 if filename_real_ext in (old_ext, new_ext)
0202b52a 2709 else filename)
124bc071 2710 return '%s.%s' % (filename_wo_ext, ext)
0202b52a 2711
38c6902b 2712 # Ensure filename always has a correct extension for successful merge
0202b52a 2713 full_filename = correct_ext(full_filename)
2714 temp_filename = correct_ext(temp_filename)
2715 dl_filename = existing_file(full_filename, temp_filename)
1ea24129 2716 info_dict['__real_download'] = False
18e674b4 2717
2718 _protocols = set(determine_protocol(f) for f in requested_formats)
dbf5416a 2719 if len(_protocols) == 1: # All requested formats have same protocol
18e674b4 2720 info_dict['protocol'] = _protocols.pop()
dbf5416a 2721 directly_mergable = FFmpegFD.can_merge_formats(info_dict)
2722 if dl_filename is not None:
6c7274ec 2723 self.report_file_already_downloaded(dl_filename)
96fccc10 2724 elif (directly_mergable and get_suitable_downloader(
a46a815b 2725 info_dict, self.params, to_stdout=(temp_filename == '-')) == FFmpegFD):
dbf5416a 2726 info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
2727 success, real_download = self.dl(temp_filename, info_dict)
2728 info_dict['__real_download'] = real_download
18e674b4 2729 else:
2730 downloaded = []
2731 merger = FFmpegMergerPP(self)
2732 if self.params.get('allow_unplayable_formats'):
2733 self.report_warning(
2734 'You have requested merging of multiple formats '
2735 'while also allowing unplayable formats to be downloaded. '
2736 'The formats won\'t be merged to prevent data corruption.')
2737 elif not merger.available:
2738 self.report_warning(
2739 'You have requested merging of multiple formats but ffmpeg is not installed. '
2740 'The formats won\'t be merged.')
2741
96fccc10 2742 if temp_filename == '-':
2743 reason = ('using a downloader other than ffmpeg' if directly_mergable
2744 else 'but the formats are incompatible for simultaneous download' if merger.available
2745 else 'but ffmpeg is not installed')
2746 self.report_warning(
2747 f'You have requested downloading multiple formats to stdout {reason}. '
2748 'The formats will be streamed one after the other')
2749 fname = temp_filename
dbf5416a 2750 for f in requested_formats:
2751 new_info = dict(info_dict)
2752 del new_info['requested_formats']
2753 new_info.update(f)
96fccc10 2754 if temp_filename != '-':
124bc071 2755 fname = prepend_extension(
2756 correct_ext(temp_filename, new_info['ext']),
2757 'f%s' % f['format_id'], new_info['ext'])
96fccc10 2758 if not self._ensure_dir_exists(fname):
2759 return
2760 downloaded.append(fname)
dbf5416a 2761 partial_success, real_download = self.dl(fname, new_info)
2762 info_dict['__real_download'] = info_dict['__real_download'] or real_download
2763 success = success and partial_success
2764 if merger.available and not self.params.get('allow_unplayable_formats'):
2765 info_dict['__postprocessors'].append(merger)
2766 info_dict['__files_to_merge'] = downloaded
2767 # Even if there were no downloads, it is being merged only now
2768 info_dict['__real_download'] = True
2769 else:
2770 for file in downloaded:
2771 files_to_move[file] = None
4340deca
P
2772 else:
2773 # Just a single file
0202b52a 2774 dl_filename = existing_file(full_filename, temp_filename)
6c7274ec 2775 if dl_filename is None or dl_filename == temp_filename:
2776 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
2777 # So we should try to resume the download
e8e73840 2778 success, real_download = self.dl(temp_filename, info_dict)
0202b52a 2779 info_dict['__real_download'] = real_download
6c7274ec 2780 else:
2781 self.report_file_already_downloaded(dl_filename)
0202b52a 2782
0202b52a 2783 dl_filename = dl_filename or temp_filename
c571435f 2784 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 2785
3158150c 2786 except network_exceptions as err:
7960b056 2787 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca
P
2788 return
2789 except (OSError, IOError) as err:
2790 raise UnavailableVideoError(err)
2791 except (ContentTooShortError, ) as err:
2792 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
2793 return
8222d8de 2794
de6000d9 2795 if success and full_filename != '-':
f17f8651 2796
fd7cfb64 2797 def fixup():
2798 do_fixup = True
2799 fixup_policy = self.params.get('fixup')
2800 vid = info_dict['id']
2801
2802 if fixup_policy in ('ignore', 'never'):
2803 return
2804 elif fixup_policy == 'warn':
2805 do_fixup = False
f89b3e2d 2806 elif fixup_policy != 'force':
2807 assert fixup_policy in ('detect_or_warn', None)
2808 if not info_dict.get('__real_download'):
2809 do_fixup = False
fd7cfb64 2810
2811 def ffmpeg_fixup(cndn, msg, cls):
2812 if not cndn:
2813 return
2814 if not do_fixup:
2815 self.report_warning(f'{vid}: {msg}')
2816 return
2817 pp = cls(self)
2818 if pp.available:
2819 info_dict['__postprocessors'].append(pp)
2820 else:
2821 self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
2822
2823 stretched_ratio = info_dict.get('stretched_ratio')
2824 ffmpeg_fixup(
2825 stretched_ratio not in (1, None),
2826 f'Non-uniform pixel ratio {stretched_ratio}',
2827 FFmpegFixupStretchedPP)
2828
2829 ffmpeg_fixup(
2830 (info_dict.get('requested_formats') is None
2831 and info_dict.get('container') == 'm4a_dash'
2832 and info_dict.get('ext') == 'm4a'),
2833 'writing DASH m4a. Only some players support this container',
2834 FFmpegFixupM4aPP)
2835
2836 downloader = (get_suitable_downloader(info_dict, self.params).__name__
2837 if 'protocol' in info_dict else None)
2838 ffmpeg_fixup(downloader == 'HlsFD', 'malformed AAC bitstream detected', FFmpegFixupM3u8PP)
e36d50c5 2839 ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed timestamps detected', FFmpegFixupTimestampPP)
2840 ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed duration detected', FFmpegFixupDurationPP)
fd7cfb64 2841
2842 fixup()
8222d8de 2843 try:
23c1a667 2844 info_dict = self.post_process(dl_filename, info_dict, files_to_move)
af819c21 2845 except PostProcessingError as err:
2846 self.report_error('Postprocessing: %s' % str(err))
8222d8de 2847 return
ab8e5e51
AM
2848 try:
2849 for ph in self._post_hooks:
23c1a667 2850 ph(info_dict['filepath'])
ab8e5e51
AM
2851 except Exception as err:
2852 self.report_error('post hooks: %s' % str(err))
2853 return
2d30509f 2854 must_record_download_archive = True
2855
2856 if must_record_download_archive or self.params.get('force_write_download_archive', False):
2857 self.record_download_archive(info_dict)
c3e6ffba 2858 max_downloads = self.params.get('max_downloads')
2859 if max_downloads is not None and self._num_downloads >= int(max_downloads):
2860 raise MaxDownloadsReached()
8222d8de
JMF
2861
2862 def download(self, url_list):
2863 """Download a given list of URLs."""
de6000d9 2864 outtmpl = self.outtmpl_dict['default']
3089bc74
S
2865 if (len(url_list) > 1
2866 and outtmpl != '-'
2867 and '%' not in outtmpl
2868 and self.params.get('max_downloads') != 1):
acd69589 2869 raise SameFileError(outtmpl)
8222d8de
JMF
2870
2871 for url in url_list:
2872 try:
5f6a1245 2873 # It also downloads the videos
61aa5ba3
S
2874 res = self.extract_info(
2875 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de 2876 except UnavailableVideoError:
6febd1c1 2877 self.report_error('unable to download video')
8222d8de 2878 except MaxDownloadsReached:
8f18aca8 2879 self.to_screen('[info] Maximum number of downloads reached')
8b0d7497 2880 raise
2881 except ExistingVideoReached:
8f18aca8 2882 self.to_screen('[info] Encountered a video that is already in the archive, stopping due to --break-on-existing')
8b0d7497 2883 raise
2884 except RejectedVideoReached:
8f18aca8 2885 self.to_screen('[info] Encountered a video that did not match filter, stopping due to --break-on-reject')
8222d8de 2886 raise
63e0be34
PH
2887 else:
2888 if self.params.get('dump_single_json', False):
277d6ff5 2889 self.post_extract(res)
6e84b215 2890 self.to_stdout(json.dumps(self.sanitize_info(res)))
8222d8de
JMF
2891
2892 return self._download_retcode
2893
1dcc4c0c 2894 def download_with_info_file(self, info_filename):
31bd3925
JMF
2895 with contextlib.closing(fileinput.FileInput(
2896 [info_filename], mode='r',
2897 openhook=fileinput.hook_encoded('utf-8'))) as f:
2898 # FileInput doesn't have a read method, we can't call json.load
8012d892 2899 info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
d4943898
JMF
2900 try:
2901 self.process_ie_result(info, download=True)
d3f62c19 2902 except (DownloadError, EntryNotInPlaylist, ThrottledDownload):
d4943898
JMF
2903 webpage_url = info.get('webpage_url')
2904 if webpage_url is not None:
6febd1c1 2905 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
d4943898
JMF
2906 return self.download([webpage_url])
2907 else:
2908 raise
2909 return self._download_retcode
1dcc4c0c 2910
cb202fd2 2911 @staticmethod
8012d892 2912 def sanitize_info(info_dict, remove_private_keys=False):
2913 ''' Sanitize the infodict for converting to json '''
3ad56b42 2914 if info_dict is None:
2915 return info_dict
6e84b215 2916 info_dict.setdefault('epoch', int(time.time()))
2917 remove_keys = {'__original_infodict'} # Always remove this since this may contain a copy of the entire dict
ae8f99e6 2918 keep_keys = ['_type'], # Always keep this to facilitate load-info-json
8012d892 2919 if remove_private_keys:
6e84b215 2920 remove_keys |= {
2921 'requested_formats', 'requested_subtitles', 'requested_entries',
2922 'filepath', 'entries', 'original_url', 'playlist_autonumber',
2923 }
ae8f99e6 2924 empty_values = (None, {}, [], set(), tuple())
2925 reject = lambda k, v: k not in keep_keys and (
2926 k.startswith('_') or k in remove_keys or v in empty_values)
2927 else:
ae8f99e6 2928 reject = lambda k, v: k in remove_keys
5226731e 2929 filter_fn = lambda obj: (
b0249bca 2930 list(map(filter_fn, obj)) if isinstance(obj, (LazyList, list, tuple, set))
a515a78d 2931 else obj if not isinstance(obj, dict)
ae8f99e6 2932 else dict((k, filter_fn(v)) for k, v in obj.items() if not reject(k, v)))
5226731e 2933 return filter_fn(info_dict)
cb202fd2 2934
8012d892 2935 @staticmethod
2936 def filter_requested_info(info_dict, actually_filter=True):
2937 ''' Alias of sanitize_info for backward compatibility '''
2938 return YoutubeDL.sanitize_info(info_dict, actually_filter)
2939
dcf64d43 2940 def run_pp(self, pp, infodict):
5bfa4862 2941 files_to_delete = []
dcf64d43 2942 if '__files_to_move' not in infodict:
2943 infodict['__files_to_move'] = {}
af819c21 2944 files_to_delete, infodict = pp.run(infodict)
5bfa4862 2945 if not files_to_delete:
dcf64d43 2946 return infodict
5bfa4862 2947
2948 if self.params.get('keepvideo', False):
2949 for f in files_to_delete:
dcf64d43 2950 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 2951 else:
2952 for old_filename in set(files_to_delete):
2953 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2954 try:
2955 os.remove(encodeFilename(old_filename))
2956 except (IOError, OSError):
2957 self.report_warning('Unable to remove downloaded original file')
dcf64d43 2958 if old_filename in infodict['__files_to_move']:
2959 del infodict['__files_to_move'][old_filename]
2960 return infodict
5bfa4862 2961
277d6ff5 2962 @staticmethod
2963 def post_extract(info_dict):
2964 def actual_post_extract(info_dict):
2965 if info_dict.get('_type') in ('playlist', 'multi_video'):
2966 for video_dict in info_dict.get('entries', {}):
b050d210 2967 actual_post_extract(video_dict or {})
277d6ff5 2968 return
2969
07cce701 2970 post_extractor = info_dict.get('__post_extractor') or (lambda: {})
4ec82a72 2971 extra = post_extractor().items()
2972 info_dict.update(extra)
07cce701 2973 info_dict.pop('__post_extractor', None)
277d6ff5 2974
4ec82a72 2975 original_infodict = info_dict.get('__original_infodict') or {}
2976 original_infodict.update(extra)
2977 original_infodict.pop('__post_extractor', None)
2978
b050d210 2979 actual_post_extract(info_dict or {})
277d6ff5 2980
56d868db 2981 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
5bfa4862 2982 info = dict(ie_info)
56d868db 2983 info['__files_to_move'] = files_to_move or {}
2984 for pp in self._pps[key]:
dcf64d43 2985 info = self.run_pp(pp, info)
56d868db 2986 return info, info.pop('__files_to_move', None)
5bfa4862 2987
dcf64d43 2988 def post_process(self, filename, ie_info, files_to_move=None):
8222d8de
JMF
2989 """Run all the postprocessors on the given file."""
2990 info = dict(ie_info)
2991 info['filepath'] = filename
dcf64d43 2992 info['__files_to_move'] = files_to_move or {}
0202b52a 2993
56d868db 2994 for pp in ie_info.get('__postprocessors', []) + self._pps['post_process']:
dcf64d43 2995 info = self.run_pp(pp, info)
2996 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
2997 del info['__files_to_move']
56d868db 2998 for pp in self._pps['after_move']:
dcf64d43 2999 info = self.run_pp(pp, info)
23c1a667 3000 return info
c1c9a79c 3001
5db07df6 3002 def _make_archive_id(self, info_dict):
e9fef7ee
S
3003 video_id = info_dict.get('id')
3004 if not video_id:
3005 return
5db07df6
PH
3006 # Future-proof against any change in case
3007 # and backwards compatibility with prior versions
e9fef7ee 3008 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 3009 if extractor is None:
1211bb6d
S
3010 url = str_or_none(info_dict.get('url'))
3011 if not url:
3012 return
e9fef7ee
S
3013 # Try to find matching extractor for the URL and take its ie_key
3014 for ie in self._ies:
1211bb6d 3015 if ie.suitable(url):
e9fef7ee
S
3016 extractor = ie.ie_key()
3017 break
3018 else:
3019 return
d0757229 3020 return '%s %s' % (extractor.lower(), video_id)
5db07df6
PH
3021
3022 def in_download_archive(self, info_dict):
3023 fn = self.params.get('download_archive')
3024 if fn is None:
3025 return False
3026
3027 vid_id = self._make_archive_id(info_dict)
e9fef7ee 3028 if not vid_id:
7012b23c 3029 return False # Incomplete video information
5db07df6 3030
a45e8619 3031 return vid_id in self.archive
c1c9a79c
PH
3032
3033 def record_download_archive(self, info_dict):
3034 fn = self.params.get('download_archive')
3035 if fn is None:
3036 return
5db07df6
PH
3037 vid_id = self._make_archive_id(info_dict)
3038 assert vid_id
c1c9a79c 3039 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
6febd1c1 3040 archive_file.write(vid_id + '\n')
a45e8619 3041 self.archive.add(vid_id)
dd82ffea 3042
8c51aa65 3043 @staticmethod
8abeeb94 3044 def format_resolution(format, default='unknown'):
fb04e403 3045 if format.get('vcodec') == 'none':
8326b00a 3046 if format.get('acodec') == 'none':
3047 return 'images'
fb04e403 3048 return 'audio only'
f49d89ee
PH
3049 if format.get('resolution') is not None:
3050 return format['resolution']
35615307
DA
3051 if format.get('width') and format.get('height'):
3052 res = '%dx%d' % (format['width'], format['height'])
3053 elif format.get('height'):
3054 res = '%sp' % format['height']
3055 elif format.get('width'):
388ae76b 3056 res = '%dx?' % format['width']
8c51aa65 3057 else:
8abeeb94 3058 res = default
8c51aa65
JMF
3059 return res
3060
c57f7757
PH
3061 def _format_note(self, fdict):
3062 res = ''
3063 if fdict.get('ext') in ['f4f', 'f4m']:
3064 res += '(unsupported) '
32f90364
PH
3065 if fdict.get('language'):
3066 if res:
3067 res += ' '
9016d76f 3068 res += '[%s] ' % fdict['language']
c57f7757
PH
3069 if fdict.get('format_note') is not None:
3070 res += fdict['format_note'] + ' '
3071 if fdict.get('tbr') is not None:
3072 res += '%4dk ' % fdict['tbr']
3073 if fdict.get('container') is not None:
3074 if res:
3075 res += ', '
3076 res += '%s container' % fdict['container']
3089bc74
S
3077 if (fdict.get('vcodec') is not None
3078 and fdict.get('vcodec') != 'none'):
c57f7757
PH
3079 if res:
3080 res += ', '
3081 res += fdict['vcodec']
91c7271a 3082 if fdict.get('vbr') is not None:
c57f7757
PH
3083 res += '@'
3084 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
3085 res += 'video@'
3086 if fdict.get('vbr') is not None:
3087 res += '%4dk' % fdict['vbr']
fbb21cf5 3088 if fdict.get('fps') is not None:
5d583bdf
S
3089 if res:
3090 res += ', '
3091 res += '%sfps' % fdict['fps']
c57f7757
PH
3092 if fdict.get('acodec') is not None:
3093 if res:
3094 res += ', '
3095 if fdict['acodec'] == 'none':
3096 res += 'video only'
3097 else:
3098 res += '%-5s' % fdict['acodec']
3099 elif fdict.get('abr') is not None:
3100 if res:
3101 res += ', '
3102 res += 'audio'
3103 if fdict.get('abr') is not None:
3104 res += '@%3dk' % fdict['abr']
3105 if fdict.get('asr') is not None:
3106 res += ' (%5dHz)' % fdict['asr']
3107 if fdict.get('filesize') is not None:
3108 if res:
3109 res += ', '
3110 res += format_bytes(fdict['filesize'])
9732d77e
PH
3111 elif fdict.get('filesize_approx') is not None:
3112 if res:
3113 res += ', '
3114 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 3115 return res
91c7271a 3116
c57f7757 3117 def list_formats(self, info_dict):
94badb25 3118 formats = info_dict.get('formats', [info_dict])
53ed7066 3119 new_format = (
3120 'list-formats' not in self.params.get('compat_opts', [])
169dbde9 3121 and self.params.get('listformats_table', True) is not False)
76d321f6 3122 if new_format:
3123 table = [
3124 [
3125 format_field(f, 'format_id'),
3126 format_field(f, 'ext'),
3127 self.format_resolution(f),
3128 format_field(f, 'fps', '%d'),
3129 '|',
3130 format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
3131 format_field(f, 'tbr', '%4dk'),
52a8a1e1 3132 shorten_protocol_name(f.get('protocol', '').replace("native", "n")),
76d321f6 3133 '|',
3134 format_field(f, 'vcodec', default='unknown').replace('none', ''),
3135 format_field(f, 'vbr', '%4dk'),
3136 format_field(f, 'acodec', default='unknown').replace('none', ''),
3137 format_field(f, 'abr', '%3dk'),
3138 format_field(f, 'asr', '%5dHz'),
3f698246 3139 ', '.join(filter(None, (
3140 'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
3141 format_field(f, 'language', '[%s]'),
3142 format_field(f, 'format_note'),
3143 format_field(f, 'container', ignore=(None, f.get('ext'))),
ea05b302 3144 ))),
3f698246 3145 ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
76d321f6 3146 header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', ' TBR', 'PROTO',
3f698246 3147 '|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'MORE INFO']
76d321f6 3148 else:
3149 table = [
3150 [
3151 format_field(f, 'format_id'),
3152 format_field(f, 'ext'),
3153 self.format_resolution(f),
3154 self._format_note(f)]
3155 for f in formats
3156 if f.get('preference') is None or f['preference'] >= -1000]
3157 header_line = ['format code', 'extension', 'resolution', 'note']
57dd9a8f 3158
cfb56d1a 3159 self.to_screen(
169dbde9 3160 '[info] Available formats for %s:' % info_dict['id'])
3161 self.to_stdout(render_table(
bc97cdae 3162 header_line, table, delim=new_format, extraGap=(0 if new_format else 1), hideEmpty=new_format))
cfb56d1a
PH
3163
3164 def list_thumbnails(self, info_dict):
b0249bca 3165 thumbnails = list(info_dict.get('thumbnails'))
cfb56d1a 3166 if not thumbnails:
b7b72db9 3167 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
3168 return
cfb56d1a
PH
3169
3170 self.to_screen(
3171 '[info] Thumbnails for %s:' % info_dict['id'])
169dbde9 3172 self.to_stdout(render_table(
cfb56d1a
PH
3173 ['ID', 'width', 'height', 'URL'],
3174 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
dca08720 3175
360e1ca5 3176 def list_subtitles(self, video_id, subtitles, name='subtitles'):
a504ced0 3177 if not subtitles:
360e1ca5 3178 self.to_screen('%s has no %s' % (video_id, name))
a504ced0 3179 return
a504ced0 3180 self.to_screen(
edab9dbf 3181 'Available %s for %s:' % (name, video_id))
2412044c 3182
3183 def _row(lang, formats):
49c258e1 3184 exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
2412044c 3185 if len(set(names)) == 1:
7aee40c1 3186 names = [] if names[0] == 'unknown' else names[:1]
2412044c 3187 return [lang, ', '.join(names), ', '.join(exts)]
3188
169dbde9 3189 self.to_stdout(render_table(
2412044c 3190 ['Language', 'Name', 'Formats'],
3191 [_row(lang, formats) for lang, formats in subtitles.items()],
3192 hideEmpty=True))
a504ced0 3193
dca08720
PH
3194 def urlopen(self, req):
3195 """ Start an HTTP download """
82d8a8b6 3196 if isinstance(req, compat_basestring):
67dda517 3197 req = sanitized_Request(req)
19a41fc6 3198 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
3199
3200 def print_debug_header(self):
3201 if not self.params.get('verbose'):
3202 return
62fec3b2 3203
c6afed48
PH
3204 stdout_encoding = getattr(
3205 sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
b0472057 3206 encoding_str = (
734f90bb
PH
3207 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
3208 locale.getpreferredencoding(),
3209 sys.getfilesystemencoding(),
c6afed48 3210 stdout_encoding,
b0472057 3211 self.get_encoding()))
4192b51c 3212 write_string(encoding_str, encoding=None)
734f90bb 3213
e5813e53 3214 source = (
3215 '(exe)' if hasattr(sys, 'frozen')
3216 else '(zip)' if isinstance(globals().get('__loader__'), zipimporter)
3217 else '(source)' if os.path.basename(sys.argv[0]) == '__main__.py'
3218 else '')
3219 self._write_string('[debug] yt-dlp version %s %s\n' % (__version__, source))
e0986e31 3220 if _LAZY_LOADER:
f74980cb 3221 self._write_string('[debug] Lazy loading extractors enabled\n')
3222 if _PLUGIN_CLASSES:
3223 self._write_string(
3224 '[debug] Plugin Extractors: %s\n' % [ie.ie_key() for ie in _PLUGIN_CLASSES])
53ed7066 3225 if self.params.get('compat_opts'):
3226 self._write_string(
3227 '[debug] Compatibility options: %s\n' % ', '.join(self.params.get('compat_opts')))
dca08720
PH
3228 try:
3229 sp = subprocess.Popen(
3230 ['git', 'rev-parse', '--short', 'HEAD'],
3231 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3232 cwd=os.path.dirname(os.path.abspath(__file__)))
f5b1bca9 3233 out, err = process_communicate_or_kill(sp)
dca08720
PH
3234 out = out.decode().strip()
3235 if re.match('[0-9a-f]+', out):
f74980cb 3236 self._write_string('[debug] Git HEAD: %s\n' % out)
70a1165b 3237 except Exception:
dca08720
PH
3238 try:
3239 sys.exc_clear()
70a1165b 3240 except Exception:
dca08720 3241 pass
b300cda4
S
3242
3243 def python_implementation():
3244 impl_name = platform.python_implementation()
3245 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
3246 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
3247 return impl_name
3248
e5813e53 3249 self._write_string('[debug] Python version %s (%s %s) - %s\n' % (
3250 platform.python_version(),
3251 python_implementation(),
3252 platform.architecture()[0],
b300cda4 3253 platform_name()))
d28b5171 3254
73fac4e9 3255 exe_versions = FFmpegPostProcessor.get_versions(self)
4c83c967 3256 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 3257 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171 3258 exe_str = ', '.join(
2831b468 3259 f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
3260 ) or 'none'
d28b5171 3261 self._write_string('[debug] exe versions: %s\n' % exe_str)
dca08720 3262
2831b468 3263 from .downloader.fragment import can_decrypt_frag
3264 from .downloader.websocket import has_websockets
3265 from .postprocessor.embedthumbnail import has_mutagen
3266 from .cookies import SQLITE_AVAILABLE, KEYRING_AVAILABLE
3267
ad3dc496 3268 lib_str = ', '.join(sorted(filter(None, (
2831b468 3269 can_decrypt_frag and 'pycryptodome',
3270 has_websockets and 'websockets',
3271 has_mutagen and 'mutagen',
3272 SQLITE_AVAILABLE and 'sqlite',
3273 KEYRING_AVAILABLE and 'keyring',
ad3dc496 3274 )))) or 'none'
2831b468 3275 self._write_string('[debug] Optional libraries: %s\n' % lib_str)
3276
dca08720
PH
3277 proxy_map = {}
3278 for handler in self._opener.handlers:
3279 if hasattr(handler, 'proxies'):
3280 proxy_map.update(handler.proxies)
734f90bb 3281 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
dca08720 3282
58b1f00d
PH
3283 if self.params.get('call_home', False):
3284 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
3285 self._write_string('[debug] Public IP address: %s\n' % ipaddr)
f5546c0b 3286 return
58b1f00d
PH
3287 latest_version = self.urlopen(
3288 'https://yt-dl.org/latest/version').read().decode('utf-8')
3289 if version_tuple(latest_version) > version_tuple(__version__):
3290 self.report_warning(
3291 'You are using an outdated version (newest version: %s)! '
3292 'See https://yt-dl.org/update if you need help updating.' %
3293 latest_version)
3294
e344693b 3295 def _setup_opener(self):
6ad14cab 3296 timeout_val = self.params.get('socket_timeout')
19a41fc6 3297 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
6ad14cab 3298
982ee69a 3299 opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
dca08720
PH
3300 opts_cookiefile = self.params.get('cookiefile')
3301 opts_proxy = self.params.get('proxy')
3302
982ee69a 3303 self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
dca08720 3304
6a3f4c3f 3305 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
3306 if opts_proxy is not None:
3307 if opts_proxy == '':
3308 proxies = {}
3309 else:
3310 proxies = {'http': opts_proxy, 'https': opts_proxy}
3311 else:
3312 proxies = compat_urllib_request.getproxies()
067aa17e 3313 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
3314 if 'http' in proxies and 'https' not in proxies:
3315 proxies['https'] = proxies['http']
91410c9b 3316 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
3317
3318 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
3319 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3320 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 3321 redirect_handler = YoutubeDLRedirectHandler()
8b172c2e 3322 data_handler = compat_urllib_request_DataHandler()
6240b0a2
JMF
3323
3324 # When passing our own FileHandler instance, build_opener won't add the
3325 # default FileHandler and allows us to disable the file protocol, which
3326 # can be used for malicious purposes (see
067aa17e 3327 # https://github.com/ytdl-org/youtube-dl/issues/8227)
6240b0a2
JMF
3328 file_handler = compat_urllib_request.FileHandler()
3329
3330 def file_open(*args, **kwargs):
7a5c1cfe 3331 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
6240b0a2
JMF
3332 file_handler.file_open = file_open
3333
3334 opener = compat_urllib_request.build_opener(
fca6dba8 3335 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 3336
dca08720
PH
3337 # Delete the default user-agent header, which would otherwise apply in
3338 # cases where our custom HTTP handler doesn't come into play
067aa17e 3339 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
3340 opener.addheaders = []
3341 self._opener = opener
62fec3b2
PH
3342
3343 def encode(self, s):
3344 if isinstance(s, bytes):
3345 return s # Already encoded
3346
3347 try:
3348 return s.encode(self.get_encoding())
3349 except UnicodeEncodeError as err:
3350 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3351 raise
3352
3353 def get_encoding(self):
3354 encoding = self.params.get('encoding')
3355 if encoding is None:
3356 encoding = preferredencoding()
3357 return encoding
ec82d85a 3358
de6000d9 3359 def _write_thumbnails(self, info_dict, filename): # return the extensions
6c4fd172 3360 write_all = self.params.get('write_all_thumbnails', False)
3361 thumbnails = []
3362 if write_all or self.params.get('writethumbnail', False):
0202b52a 3363 thumbnails = info_dict.get('thumbnails') or []
6c4fd172 3364 multiple = write_all and len(thumbnails) > 1
ec82d85a 3365
0202b52a 3366 ret = []
981052c9 3367 for t in thumbnails[::-1]:
ec82d85a 3368 thumb_ext = determine_ext(t['url'], 'jpg')
6c4fd172 3369 suffix = '%s.' % t['id'] if multiple else ''
3370 thumb_display_id = '%s ' % t['id'] if multiple else ''
885cc0b7 3371 thumb_filename = replace_extension(filename, suffix + thumb_ext, info_dict.get('ext'))
ec82d85a 3372
0c3d0f51 3373 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(thumb_filename)):
de6000d9 3374 ret.append(suffix + thumb_ext)
8ba87148 3375 t['filepath'] = thumb_filename
ec82d85a
PH
3376 self.to_screen('[%s] %s: Thumbnail %sis already present' %
3377 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3378 else:
5ef7d9bd 3379 self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
ec82d85a
PH
3380 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3381 try:
3382 uf = self.urlopen(t['url'])
d3d89c32 3383 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 3384 shutil.copyfileobj(uf, thumbf)
de6000d9 3385 ret.append(suffix + thumb_ext)
ec82d85a
PH
3386 self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
3387 (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
885cc0b7 3388 t['filepath'] = thumb_filename
3158150c 3389 except network_exceptions as err:
ec82d85a 3390 self.report_warning('Unable to download thumbnail "%s": %s' %
9b9c5355 3391 (t['url'], error_to_compat_str(err)))
6c4fd172 3392 if ret and not write_all:
3393 break
0202b52a 3394 return ret