]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
[version] update
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
8222d8de 1#!/usr/bin/env python
dcdb292f 2# coding: utf-8
8222d8de 3
6febd1c1 4from __future__ import absolute_import, unicode_literals
8222d8de 5
26e63931 6import collections
31bd3925 7import contextlib
317f7ab6 8import copy
9d2ecdbc 9import datetime
c1c9a79c 10import errno
31bd3925 11import fileinput
8222d8de 12import io
b82f815f 13import itertools
8694c600 14import json
62fec3b2 15import locale
083c9df9 16import operator
8222d8de 17import os
dca08720 18import platform
8222d8de
JMF
19import re
20import shutil
dca08720 21import subprocess
8222d8de
JMF
22import socket
23import sys
24import time
67134eab 25import tokenize
8222d8de 26import traceback
75822ca7 27import random
8222d8de 28
961ea474 29from string import ascii_letters
e5813e53 30from zipimport import zipimporter
961ea474 31
8c25f81b 32from .compat import (
82d8a8b6 33 compat_basestring,
dca08720 34 compat_cookiejar,
003c69a8 35 compat_get_terminal_size,
ce02ed60 36 compat_http_client,
4f026faf 37 compat_kwargs,
d0d9ade4 38 compat_numeric_types,
e9c0cdd3 39 compat_os_name,
ce02ed60 40 compat_str,
67134eab 41 compat_tokenize_tokenize,
ce02ed60
PH
42 compat_urllib_error,
43 compat_urllib_request,
8b172c2e 44 compat_urllib_request_DataHandler,
8c25f81b
PH
45)
46from .utils import (
eedb7ba5
S
47 age_restricted,
48 args_to_str,
ce02ed60
PH
49 ContentTooShortError,
50 date_from_str,
51 DateRange,
acd69589 52 DEFAULT_OUTTMPL,
de6000d9 53 OUTTMPL_TYPES,
ce02ed60 54 determine_ext,
b5559424 55 determine_protocol,
732044af 56 DOT_DESKTOP_LINK_TEMPLATE,
57 DOT_URL_LINK_TEMPLATE,
58 DOT_WEBLOC_LINK_TEMPLATE,
ce02ed60 59 DownloadError,
c0384f22 60 encode_compat_str,
ce02ed60 61 encodeFilename,
9b9c5355 62 error_to_compat_str,
498f5606 63 EntryNotInPlaylist,
8b0d7497 64 ExistingVideoReached,
590bc6f6 65 expand_path,
ce02ed60 66 ExtractorError,
e29663c6 67 float_or_none,
02dbf93f 68 format_bytes,
76d321f6 69 format_field,
143db31d 70 FORMAT_RE,
525ef922 71 formatSeconds,
773f291d 72 GeoRestrictedError,
c9969434 73 int_or_none,
732044af 74 iri_to_uri,
773f291d 75 ISO3166Utils,
ce02ed60 76 locked_file,
0202b52a 77 make_dir,
dca08720 78 make_HTTPS_handler,
ce02ed60 79 MaxDownloadsReached,
cd6fc19e 80 orderedSet,
b7ab0590 81 PagedList,
083c9df9 82 parse_filesize,
91410c9b 83 PerRequestProxyHandler,
dca08720 84 platform_name,
eedb7ba5 85 PostProcessingError,
ce02ed60 86 preferredencoding,
eedb7ba5 87 prepend_extension,
51fb4995 88 register_socks_protocols,
cfb56d1a 89 render_table,
eedb7ba5 90 replace_extension,
8b0d7497 91 RejectedVideoReached,
ce02ed60
PH
92 SameFileError,
93 sanitize_filename,
1bb5c511 94 sanitize_path,
dcf77cf1 95 sanitize_url,
67dda517 96 sanitized_Request,
e5660ee6 97 std_headers,
1211bb6d 98 str_or_none,
e29663c6 99 strftime_or_none,
ce02ed60 100 subtitles_filename,
732044af 101 to_high_limit_path,
ce02ed60 102 UnavailableVideoError,
29eb5174 103 url_basename,
58b1f00d 104 version_tuple,
ce02ed60
PH
105 write_json_file,
106 write_string,
1bab3437 107 YoutubeDLCookieJar,
6a3f4c3f 108 YoutubeDLCookieProcessor,
dca08720 109 YoutubeDLHandler,
fca6dba8 110 YoutubeDLRedirectHandler,
f5b1bca9 111 process_communicate_or_kill,
ce02ed60 112)
a0e07d31 113from .cache import Cache
f74980cb 114from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER, _PLUGIN_CLASSES
4c54b89e 115from .extractor.openload import PhantomJSwrapper
3bc2ddcc 116from .downloader import get_suitable_downloader
4c83c967 117from .downloader.rtmp import rtmpdump_version
4f026faf 118from .postprocessor import (
f17f8651 119 FFmpegFixupM3u8PP,
62cd676c 120 FFmpegFixupM4aPP,
6271f1ca 121 FFmpegFixupStretchedPP,
4f026faf
PH
122 FFmpegMergerPP,
123 FFmpegPostProcessor,
0202b52a 124 # FFmpegSubtitlesConvertorPP,
4f026faf 125 get_postprocessor,
0202b52a 126 MoveFilesAfterDownloadPP,
4f026faf 127)
dca08720 128from .version import __version__
8222d8de 129
e9c0cdd3
YCH
130if compat_os_name == 'nt':
131 import ctypes
132
2459b6e1 133
8222d8de
JMF
134class YoutubeDL(object):
135 """YoutubeDL class.
136
137 YoutubeDL objects are the ones responsible of downloading the
138 actual video file and writing it to disk if the user has requested
139 it, among some other tasks. In most cases there should be one per
140 program. As, given a video URL, the downloader doesn't know how to
141 extract all the needed information, task that InfoExtractors do, it
142 has to pass the URL to one of them.
143
144 For this, YoutubeDL objects have a method that allows
145 InfoExtractors to be registered in a given order. When it is passed
146 a URL, the YoutubeDL object handles it to the first InfoExtractor it
147 finds that reports being able to handle it. The InfoExtractor extracts
148 all the information about the video or videos the URL refers to, and
149 YoutubeDL process the extracted information, possibly using a File
150 Downloader to download the video.
151
152 YoutubeDL objects accept a lot of parameters. In order not to saturate
153 the object constructor with arguments, it receives a dictionary of
154 options instead. These options are available through the params
155 attribute for the InfoExtractors to use. The YoutubeDL also
156 registers itself as the downloader in charge for the InfoExtractors
157 that are added to it, so this is a "mutual registration".
158
159 Available options:
160
161 username: Username for authentication purposes.
162 password: Password for authentication purposes.
180940e0 163 videopassword: Password for accessing a video.
1da50aa3
S
164 ap_mso: Adobe Pass multiple-system operator identifier.
165 ap_username: Multiple-system operator account username.
166 ap_password: Multiple-system operator account password.
8222d8de
JMF
167 usenetrc: Use netrc for authentication instead.
168 verbose: Print additional info to stdout.
169 quiet: Do not print messages to stdout.
ad8915b7 170 no_warnings: Do not print out anything for warnings.
8222d8de
JMF
171 forceurl: Force printing final URL.
172 forcetitle: Force printing title.
173 forceid: Force printing ID.
174 forcethumbnail: Force printing thumbnail URL.
175 forcedescription: Force printing description.
176 forcefilename: Force printing final filename.
525ef922 177 forceduration: Force printing duration.
8694c600 178 forcejson: Force printing info_dict as JSON.
63e0be34
PH
179 dump_single_json: Force printing the info_dict of the whole playlist
180 (or video) as a single JSON line.
c25228e5 181 force_write_download_archive: Force writing download archive regardless
182 of 'skip_download' or 'simulate'.
8222d8de 183 simulate: Do not download the video files.
eb8a4433 184 format: Video format code. see "FORMAT SELECTION" for more details.
63ad4d43 185 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
c25228e5 186 format_sort: How to sort the video formats. see "Sorting Formats"
187 for more details.
188 format_sort_force: Force the given format_sort. see "Sorting Formats"
189 for more details.
190 allow_multiple_video_streams: Allow multiple video streams to be merged
191 into a single file
192 allow_multiple_audio_streams: Allow multiple audio streams to be merged
193 into a single file
4524baf0 194 paths: Dictionary of output paths. The allowed keys are 'home'
195 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 196 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 197 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
198 A string a also accepted for backward compatibility
a820dc72
RA
199 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
200 restrictfilenames: Do not allow "&" and spaces in file names
201 trim_file_name: Limit length of filename (extension excluded)
4524baf0 202 windowsfilenames: Force the filenames to be windows compatible
a820dc72 203 ignoreerrors: Do not stop on download errors
7a5c1cfe 204 (Default True when running yt-dlp,
a820dc72 205 but False when directly accessing YoutubeDL class)
d22dec74 206 force_generic_extractor: Force downloader to use the generic extractor
0c3d0f51 207 overwrites: Overwrite all video and metadata files if True,
208 overwrite only non-video files if None
209 and don't overwrite any file if False
8222d8de
JMF
210 playliststart: Playlist item to start at.
211 playlistend: Playlist item to end at.
c14e88f0 212 playlist_items: Specific indices of playlist to download.
ff815fe6 213 playlistreverse: Download playlist items in reverse order.
75822ca7 214 playlistrandom: Download playlist items in random order.
8222d8de
JMF
215 matchtitle: Download only matching titles.
216 rejecttitle: Reject downloads for matching titles.
8bf9319e 217 logger: Log messages to a logging.Logger instance.
8222d8de
JMF
218 logtostderr: Log messages to stderr instead of stdout.
219 writedescription: Write the video description to a .description file
220 writeinfojson: Write the video description to a .info.json file
75d43ca0 221 clean_infojson: Remove private fields from the infojson
06167fbb 222 writecomments: Extract video comments. This will not be written to disk
223 unless writeinfojson is also given
1fb07d10 224 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 225 writethumbnail: Write the thumbnail image to a file
c25228e5 226 allow_playlist_files: Whether to write playlists' description, infojson etc
227 also to disk when using the 'write*' options
ec82d85a 228 write_all_thumbnails: Write all thumbnail formats to files
732044af 229 writelink: Write an internet shortcut file, depending on the
230 current platform (.url/.webloc/.desktop)
231 writeurllink: Write a Windows internet shortcut file (.url)
232 writewebloclink: Write a macOS internet shortcut file (.webloc)
233 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 234 writesubtitles: Write the video subtitles to a file
741dd8ea 235 writeautomaticsub: Write the automatically generated subtitles to a file
8222d8de 236 allsubtitles: Downloads all the subtitles of the video
0b7f3118 237 (requires writesubtitles or writeautomaticsub)
8222d8de 238 listsubtitles: Lists all available subtitles for the video
a504ced0 239 subtitlesformat: The format code for subtitles
aa6a10c4 240 subtitleslangs: List of languages of the subtitles to download
8222d8de
JMF
241 keepvideo: Keep the video file after post-processing
242 daterange: A DateRange object, download only if the upload_date is in the range.
243 skip_download: Skip the actual download of the video file
c35f9e72 244 cachedir: Location of the cache files in the filesystem.
a0e07d31 245 False to disable filesystem cache.
47192f92 246 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
247 age_limit: An integer representing the user's age in years.
248 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
249 min_views: An integer representing the minimum view count the video
250 must have in order to not be skipped.
251 Videos without view count information are always
252 downloaded. None for no limit.
253 max_views: An integer representing the maximum view count.
254 Videos that are more popular than that are not
255 downloaded.
256 Videos without view count information are always
257 downloaded. None for no limit.
258 download_archive: File name of a file where all downloads are recorded.
c1c9a79c
PH
259 Videos already present in the file are not downloaded
260 again.
8a51f564 261 break_on_existing: Stop the download process after attempting to download a
262 file that is in the archive.
263 break_on_reject: Stop the download process when encountering a video that
264 has been filtered out.
265 cookiefile: File name where cookies should be read from and dumped to
a1ee09e8 266 nocheckcertificate:Do not verify SSL certificates
7e8c0af0
PH
267 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
268 At the moment, this is only supported by YouTube.
a1ee09e8 269 proxy: URL of the proxy server to use
38cce791 270 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 271 on geo-restricted sites.
e344693b 272 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
273 bidi_workaround: Work around buggy terminals without bidirectional text
274 support, using fridibi
a0ddb8a2 275 debug_printtraffic:Print out sent and received HTTP traffic
7b0817e8 276 include_ads: Download ads as well
04b4d394
PH
277 default_search: Prepend this string if an input url is not valid.
278 'auto' for elaborate guessing
62fec3b2 279 encoding: Use this encoding instead of the system-specified.
e8ee972c 280 extract_flat: Do not resolve URLs, return the immediate result.
057a5206
PH
281 Pass in 'in_playlist' to only show this behavior for
282 playlist items.
4f026faf 283 postprocessors: A list of dictionaries, each with an entry
71b640cc 284 * key: The name of the postprocessor. See
7a5c1cfe 285 yt_dlp/postprocessor/__init__.py for a list.
0202b52a 286 * _after_move: Optional. If True, run this post_processor
287 after 'MoveFilesAfterDownload'
4f026faf
PH
288 as well as any further keyword arguments for the
289 postprocessor.
ab8e5e51
AM
290 post_hooks: A list of functions that get called as the final step
291 for each video file, after all postprocessors have been
292 called. The filename will be passed as the only argument.
71b640cc
PH
293 progress_hooks: A list of functions that get called on download
294 progress, with a dictionary with the entries
5cda4eda 295 * status: One of "downloading", "error", or "finished".
ee69b99a 296 Check this first and ignore unknown values.
71b640cc 297
5cda4eda 298 If status is one of "downloading", or "finished", the
ee69b99a
PH
299 following properties may also be present:
300 * filename: The final filename (always present)
5cda4eda 301 * tmpfilename: The filename we're currently writing to
71b640cc
PH
302 * downloaded_bytes: Bytes on disk
303 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
304 * total_bytes_estimate: Guess of the eventual file size,
305 None if unavailable.
306 * elapsed: The number of seconds since download started.
71b640cc
PH
307 * eta: The estimated time in seconds, None if unknown
308 * speed: The download speed in bytes/second, None if
309 unknown
5cda4eda
PH
310 * fragment_index: The counter of the currently
311 downloaded video fragment.
312 * fragment_count: The number of fragments (= individual
313 files that will be merged)
71b640cc
PH
314
315 Progress hooks are guaranteed to be called at least once
316 (with status "finished") if the download is successful.
45598f15 317 merge_output_format: Extension to use when merging formats.
6b591b29 318 final_ext: Expected final extension; used to detect when the file was
319 already downloaded and converted. "merge_output_format" is
320 replaced by this extension when given
6271f1ca
PH
321 fixup: Automatically correct known faults of the file.
322 One of:
323 - "never": do nothing
324 - "warn": only emit a warning
325 - "detect_or_warn": check whether we can do anything
62cd676c 326 about it, warn otherwise (default)
504f20dd 327 source_address: Client-side IP address to bind to.
6ec6cb4e 328 call_home: Boolean, true iff we are allowed to contact the
7a5c1cfe 329 yt-dlp servers for debugging. (BROKEN)
1cf376f5 330 sleep_interval_requests: Number of seconds to sleep between requests
331 during extraction
7aa589a5
S
332 sleep_interval: Number of seconds to sleep before each download when
333 used alone or a lower bound of a range for randomized
334 sleep before each download (minimum possible number
335 of seconds to sleep) when used along with
336 max_sleep_interval.
337 max_sleep_interval:Upper bound of a range for randomized sleep before each
338 download (maximum possible number of seconds to sleep).
339 Must only be used along with sleep_interval.
340 Actual sleep time will be a random float from range
341 [sleep_interval; max_sleep_interval].
1cf376f5 342 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
343 listformats: Print an overview of available video formats and exit.
344 list_thumbnails: Print a table of all thumbnails and exit.
347de493
PH
345 match_filter: A function that gets called with the info_dict of
346 every video.
347 If it returns a message, the video is ignored.
348 If it returns None, the video is downloaded.
349 match_filter_func in utils.py is one example for this.
7e5db8c9 350 no_color: Do not emit color codes in output.
0a840f58 351 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 352 HTTP header
0a840f58 353 geo_bypass_country:
773f291d
S
354 Two-letter ISO 3166-2 country code that will be used for
355 explicit geographic restriction bypassing via faking
504f20dd 356 X-Forwarded-For HTTP header
5f95927a
S
357 geo_bypass_ip_block:
358 IP range in CIDR notation that will be used similarly to
504f20dd 359 geo_bypass_country
71b640cc 360
85729c51
PH
361 The following options determine which downloader is picked:
362 external_downloader: Executable of the external downloader to call.
363 None or unset for standard (built-in) downloader.
bf09af3a
S
364 hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
365 if True, otherwise use ffmpeg/avconv if False, otherwise
366 use downloader suggested by extractor if None.
fe7e0c98 367
8222d8de 368 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 369 the downloader (see yt_dlp/downloader/common.py):
8222d8de 370 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
881e6a1f 371 noresizebuffer, retries, continuedl, noprogress, consoletitle,
b54d4a5c 372 xattr_set_filesize, external_downloader_args, hls_use_mpegts,
e409895f 373 http_chunk_size.
76b1bd67
JMF
374
375 The following options are used by the post processors:
d4a24f40 376 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
e4172ac9 377 otherwise prefer ffmpeg. (avconv support is deprecated)
c0b7d117
S
378 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
379 to the binary or its containing directory.
43820c03 380 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
381 and a list of additional command-line arguments for the
382 postprocessor/executable. The dict can also have "PP+EXE" keys
383 which are used when the given exe is used by the given PP.
384 Use 'default' as the name for arguments to passed to all PP
e409895f 385
386 The following options are used by the extractors:
62bff2c1 387 extractor_retries: Number of times to retry for known errors
388 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 389 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 390 discontinuities such as ad breaks (default: False)
3600fd59 391 youtube_include_dash_manifest: If True (default), DASH manifests and related
62bff2c1 392 data will be downloaded and processed by extractor.
393 You can reduce network I/O by disabling it if you don't
394 care about DASH. (only for youtube)
e409895f 395 youtube_include_hls_manifest: If True (default), HLS manifests and related
62bff2c1 396 data will be downloaded and processed by extractor.
397 You can reduce network I/O by disabling it if you don't
398 care about HLS. (only for youtube)
8222d8de
JMF
399 """
400
c9969434
S
401 _NUMERIC_FIELDS = set((
402 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
403 'timestamp', 'upload_year', 'upload_month', 'upload_day',
404 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
405 'average_rating', 'comment_count', 'age_limit',
406 'start_time', 'end_time',
407 'chapter_number', 'season_number', 'episode_number',
408 'track_number', 'disc_number', 'release_year',
409 'playlist_index',
410 ))
411
8222d8de
JMF
412 params = None
413 _ies = []
5bfa4862 414 _pps = {'beforedl': [], 'aftermove': [], 'normal': []}
0202b52a 415 __prepare_filename_warned = False
1cf376f5 416 _first_webpage_request = True
8222d8de
JMF
417 _download_retcode = None
418 _num_downloads = None
30a074c2 419 _playlist_level = 0
420 _playlist_urls = set()
8222d8de
JMF
421 _screen_file = None
422
3511266b 423 def __init__(self, params=None, auto_init=True):
8222d8de 424 """Create a FileDownloader object with the given options."""
e9f9a10f
JMF
425 if params is None:
426 params = {}
8222d8de 427 self._ies = []
56c73665 428 self._ies_instances = {}
5bfa4862 429 self._pps = {'beforedl': [], 'aftermove': [], 'normal': []}
0202b52a 430 self.__prepare_filename_warned = False
1cf376f5 431 self._first_webpage_request = True
ab8e5e51 432 self._post_hooks = []
933605d7 433 self._progress_hooks = []
8222d8de
JMF
434 self._download_retcode = 0
435 self._num_downloads = 0
436 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
0783b09b 437 self._err_file = sys.stderr
4abf617b
S
438 self.params = {
439 # Default parameters
440 'nocheckcertificate': False,
441 }
442 self.params.update(params)
a0e07d31 443 self.cache = Cache(self)
a45e8619 444 self.archive = set()
ecdec191
JB
445
446 """Preload the archive, if any is specified"""
447 def preload_download_archive(self):
448 fn = self.params.get('download_archive')
449 if fn is None:
450 return False
451 try:
452 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
453 for line in archive_file:
a45e8619 454 self.archive.add(line.strip())
ecdec191
JB
455 except IOError as ioe:
456 if ioe.errno != errno.ENOENT:
457 raise
1d74d8d9 458 return False
ecdec191 459 return True
34308b30 460
be5df5ee
S
461 def check_deprecated(param, option, suggestion):
462 if self.params.get(param) is not None:
463 self.report_warning(
464 '%s is deprecated. Use %s instead.' % (option, suggestion))
465 return True
466 return False
467
1de7ea76
JB
468 if self.params.get('verbose'):
469 self.to_stdout('[debug] Loading archive file %r' % self.params.get('download_archive'))
470
ecdec191
JB
471 preload_download_archive(self)
472
be5df5ee 473 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
474 if self.params.get('geo_verification_proxy') is None:
475 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
476
6b591b29 477 if self.params.get('final_ext'):
478 if self.params.get('merge_output_format'):
479 self.report_warning('--merge-output-format will be ignored since --remux-video or --recode-video is given')
480 self.params['merge_output_format'] = self.params['final_ext']
481
b9d973be 482 if 'overwrites' in self.params and self.params['overwrites'] is None:
483 del self.params['overwrites']
484
be5df5ee
S
485 check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
486 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
487 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
488
0783b09b 489 if params.get('bidi_workaround', False):
1c088fa8
PH
490 try:
491 import pty
492 master, slave = pty.openpty()
003c69a8 493 width = compat_get_terminal_size().columns
1c088fa8
PH
494 if width is None:
495 width_args = []
496 else:
497 width_args = ['-w', str(width)]
5d681e96 498 sp_kwargs = dict(
1c088fa8
PH
499 stdin=subprocess.PIPE,
500 stdout=slave,
501 stderr=self._err_file)
5d681e96
PH
502 try:
503 self._output_process = subprocess.Popen(
504 ['bidiv'] + width_args, **sp_kwargs
505 )
506 except OSError:
5d681e96
PH
507 self._output_process = subprocess.Popen(
508 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
509 self._output_channel = os.fdopen(master, 'rb')
1c088fa8 510 except OSError as ose:
66e7ace1 511 if ose.errno == errno.ENOENT:
6febd1c1 512 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
1c088fa8
PH
513 else:
514 raise
0783b09b 515
3089bc74
S
516 if (sys.platform != 'win32'
517 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
518 and not params.get('restrictfilenames', False)):
e9137224 519 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 520 self.report_warning(
6febd1c1 521 'Assuming --restrict-filenames since file system encoding '
1b725173 522 'cannot encode all characters. '
6febd1c1 523 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 524 self.params['restrictfilenames'] = True
34308b30 525
de6000d9 526 self.outtmpl_dict = self.parse_outtmpl()
486dd09e 527
dca08720
PH
528 self._setup_opener()
529
3511266b
PH
530 if auto_init:
531 self.print_debug_header()
532 self.add_default_info_extractors()
533
4f026faf
PH
534 for pp_def_raw in self.params.get('postprocessors', []):
535 pp_class = get_postprocessor(pp_def_raw['key'])
536 pp_def = dict(pp_def_raw)
537 del pp_def['key']
5bfa4862 538 if 'when' in pp_def:
539 when = pp_def['when']
540 del pp_def['when']
541 else:
542 when = 'normal'
4f026faf 543 pp = pp_class(self, **compat_kwargs(pp_def))
5bfa4862 544 self.add_post_processor(pp, when=when)
4f026faf 545
ab8e5e51
AM
546 for ph in self.params.get('post_hooks', []):
547 self.add_post_hook(ph)
548
71b640cc
PH
549 for ph in self.params.get('progress_hooks', []):
550 self.add_progress_hook(ph)
551
51fb4995
YCH
552 register_socks_protocols()
553
7d4111ed
PH
554 def warn_if_short_id(self, argv):
555 # short YouTube ID starting with dash?
556 idxs = [
557 i for i, a in enumerate(argv)
558 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
559 if idxs:
560 correct_argv = (
7a5c1cfe 561 ['yt-dlp']
3089bc74
S
562 + [a for i, a in enumerate(argv) if i not in idxs]
563 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
564 )
565 self.report_warning(
566 'Long argument string detected. '
567 'Use -- to separate parameters and URLs, like this:\n%s\n' %
568 args_to_str(correct_argv))
569
8222d8de
JMF
570 def add_info_extractor(self, ie):
571 """Add an InfoExtractor object to the end of the list."""
572 self._ies.append(ie)
e52d7f85
JMF
573 if not isinstance(ie, type):
574 self._ies_instances[ie.ie_key()] = ie
575 ie.set_downloader(self)
8222d8de 576
56c73665
JMF
577 def get_info_extractor(self, ie_key):
578 """
579 Get an instance of an IE with name ie_key, it will try to get one from
580 the _ies list, if there's no instance it will create a new one and add
581 it to the extractor list.
582 """
583 ie = self._ies_instances.get(ie_key)
584 if ie is None:
585 ie = get_info_extractor(ie_key)()
586 self.add_info_extractor(ie)
587 return ie
588
023fa8c4
JMF
589 def add_default_info_extractors(self):
590 """
591 Add the InfoExtractors returned by gen_extractors to the end of the list
592 """
e52d7f85 593 for ie in gen_extractor_classes():
023fa8c4
JMF
594 self.add_info_extractor(ie)
595
5bfa4862 596 def add_post_processor(self, pp, when='normal'):
8222d8de 597 """Add a PostProcessor object to the end of the chain."""
5bfa4862 598 self._pps[when].append(pp)
8222d8de
JMF
599 pp.set_downloader(self)
600
ab8e5e51
AM
601 def add_post_hook(self, ph):
602 """Add the post hook"""
603 self._post_hooks.append(ph)
604
933605d7
JMF
605 def add_progress_hook(self, ph):
606 """Add the progress hook (currently only for the file downloader)"""
607 self._progress_hooks.append(ph)
8ab470f1 608
1c088fa8 609 def _bidi_workaround(self, message):
5d681e96 610 if not hasattr(self, '_output_channel'):
1c088fa8
PH
611 return message
612
5d681e96 613 assert hasattr(self, '_output_process')
11b85ce6 614 assert isinstance(message, compat_str)
6febd1c1
PH
615 line_count = message.count('\n') + 1
616 self._output_process.stdin.write((message + '\n').encode('utf-8'))
5d681e96 617 self._output_process.stdin.flush()
6febd1c1 618 res = ''.join(self._output_channel.readline().decode('utf-8')
9e1a5b84 619 for _ in range(line_count))
6febd1c1 620 return res[:-len('\n')]
1c088fa8 621
8222d8de 622 def to_screen(self, message, skip_eol=False):
0783b09b
PH
623 """Print message to stdout if not in quiet mode."""
624 return self.to_stdout(message, skip_eol, check_quiet=True)
625
734f90bb 626 def _write_string(self, s, out=None):
b58ddb32 627 write_string(s, out=out, encoding=self.params.get('encoding'))
734f90bb 628
0783b09b 629 def to_stdout(self, message, skip_eol=False, check_quiet=False):
8222d8de 630 """Print message to stdout if not in quiet mode."""
8bf9319e 631 if self.params.get('logger'):
43afe285 632 self.params['logger'].debug(message)
0783b09b 633 elif not check_quiet or not self.params.get('quiet', False):
1c088fa8 634 message = self._bidi_workaround(message)
6febd1c1 635 terminator = ['\n', ''][skip_eol]
8222d8de 636 output = message + terminator
1c088fa8 637
734f90bb 638 self._write_string(output, self._screen_file)
8222d8de
JMF
639
640 def to_stderr(self, message):
641 """Print message to stderr."""
11b85ce6 642 assert isinstance(message, compat_str)
8bf9319e 643 if self.params.get('logger'):
43afe285
IB
644 self.params['logger'].error(message)
645 else:
1c088fa8 646 message = self._bidi_workaround(message)
6febd1c1 647 output = message + '\n'
734f90bb 648 self._write_string(output, self._err_file)
8222d8de 649
1e5b9a95
PH
650 def to_console_title(self, message):
651 if not self.params.get('consoletitle', False):
652 return
4bede0d8
C
653 if compat_os_name == 'nt':
654 if ctypes.windll.kernel32.GetConsoleWindow():
655 # c_wchar_p() might not be necessary if `message` is
656 # already of type unicode()
657 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
1e5b9a95 658 elif 'TERM' in os.environ:
b46696bd 659 self._write_string('\033]0;%s\007' % message, self._screen_file)
1e5b9a95 660
bdde425c
PH
661 def save_console_title(self):
662 if not self.params.get('consoletitle', False):
663 return
94c3442e
S
664 if self.params.get('simulate', False):
665 return
4bede0d8 666 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 667 # Save the title on stack
734f90bb 668 self._write_string('\033[22;0t', self._screen_file)
bdde425c
PH
669
670 def restore_console_title(self):
671 if not self.params.get('consoletitle', False):
672 return
94c3442e
S
673 if self.params.get('simulate', False):
674 return
4bede0d8 675 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 676 # Restore the title from stack
734f90bb 677 self._write_string('\033[23;0t', self._screen_file)
bdde425c
PH
678
679 def __enter__(self):
680 self.save_console_title()
681 return self
682
683 def __exit__(self, *args):
684 self.restore_console_title()
f89197d7 685
dca08720 686 if self.params.get('cookiefile') is not None:
1bab3437 687 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 688
8222d8de
JMF
689 def trouble(self, message=None, tb=None):
690 """Determine action to take when a download problem appears.
691
692 Depending on if the downloader has been configured to ignore
693 download errors or not, this method may throw an exception or
694 not when errors are found, after printing the message.
695
696 tb, if given, is additional traceback information.
697 """
698 if message is not None:
699 self.to_stderr(message)
700 if self.params.get('verbose'):
701 if tb is None:
702 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 703 tb = ''
8222d8de 704 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 705 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 706 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
707 else:
708 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 709 tb = ''.join(tb_data)
8222d8de
JMF
710 self.to_stderr(tb)
711 if not self.params.get('ignoreerrors', False):
712 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
713 exc_info = sys.exc_info()[1].exc_info
714 else:
715 exc_info = sys.exc_info()
716 raise DownloadError(message, exc_info)
717 self._download_retcode = 1
718
719 def report_warning(self, message):
720 '''
721 Print the message to stderr, it will be prefixed with 'WARNING:'
722 If stderr is a tty file the 'WARNING:' will be colored
723 '''
6d07ce01
JMF
724 if self.params.get('logger') is not None:
725 self.params['logger'].warning(message)
8222d8de 726 else:
ad8915b7
PH
727 if self.params.get('no_warnings'):
728 return
e9c0cdd3 729 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6d07ce01
JMF
730 _msg_header = '\033[0;33mWARNING:\033[0m'
731 else:
732 _msg_header = 'WARNING:'
733 warning_message = '%s %s' % (_msg_header, message)
734 self.to_stderr(warning_message)
8222d8de
JMF
735
736 def report_error(self, message, tb=None):
737 '''
738 Do the same as trouble, but prefixes the message with 'ERROR:', colored
739 in red if stderr is a tty file.
740 '''
e9c0cdd3 741 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6febd1c1 742 _msg_header = '\033[0;31mERROR:\033[0m'
8222d8de 743 else:
6febd1c1
PH
744 _msg_header = 'ERROR:'
745 error_message = '%s %s' % (_msg_header, message)
8222d8de
JMF
746 self.trouble(error_message, tb)
747
8222d8de
JMF
748 def report_file_already_downloaded(self, file_name):
749 """Report file has already been fully downloaded."""
750 try:
6febd1c1 751 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 752 except UnicodeEncodeError:
6febd1c1 753 self.to_screen('[download] The file has already been downloaded')
8222d8de 754
0c3d0f51 755 def report_file_delete(self, file_name):
756 """Report that existing file will be deleted."""
757 try:
c25228e5 758 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 759 except UnicodeEncodeError:
c25228e5 760 self.to_screen('Deleting existing file')
0c3d0f51 761
de6000d9 762 def parse_outtmpl(self):
763 outtmpl_dict = self.params.get('outtmpl', {})
764 if not isinstance(outtmpl_dict, dict):
765 outtmpl_dict = {'default': outtmpl_dict}
766 outtmpl_dict.update({
767 k: v for k, v in DEFAULT_OUTTMPL.items()
768 if not outtmpl_dict.get(k)})
769 for key, val in outtmpl_dict.items():
770 if isinstance(val, bytes):
771 self.report_warning(
772 'Parameter outtmpl is bytes, but should be a unicode string. '
773 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
774 return outtmpl_dict
775
143db31d 776 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
777 """ Make the template and info_dict suitable for substitution (outtmpl % info_dict)"""
778 template_dict = dict(info_dict)
779
780 # duration_string
781 template_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
782 formatSeconds(info_dict['duration'], '-')
783 if info_dict.get('duration', None) is not None
784 else None)
785
786 # epoch
787 template_dict['epoch'] = int(time.time())
788
789 # autonumber
790 autonumber_size = self.params.get('autonumber_size')
791 if autonumber_size is None:
792 autonumber_size = 5
793 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
794
795 # resolution if not defined
796 if template_dict.get('resolution') is None:
797 if template_dict.get('width') and template_dict.get('height'):
798 template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
799 elif template_dict.get('height'):
800 template_dict['resolution'] = '%sp' % template_dict['height']
801 elif template_dict.get('width'):
802 template_dict['resolution'] = '%dx?' % template_dict['width']
803
804 if sanitize is None:
805 sanitize = lambda k, v: v
806 template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
807 for k, v in template_dict.items()
808 if v is not None and not isinstance(v, (list, tuple, dict)))
809 na = self.params.get('outtmpl_na_placeholder', 'NA')
810 template_dict = collections.defaultdict(lambda: na, template_dict)
811
812 # For fields playlist_index and autonumber convert all occurrences
813 # of %(field)s to %(field)0Nd for backward compatibility
814 field_size_compat_map = {
815 'playlist_index': len(str(template_dict['n_entries'])),
816 'autonumber': autonumber_size,
817 }
818 FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
819 mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
820 if mobj:
821 outtmpl = re.sub(
822 FIELD_SIZE_COMPAT_RE,
823 r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
824 outtmpl)
825
826 numeric_fields = list(self._NUMERIC_FIELDS)
827
828 # Format date
829 FORMAT_DATE_RE = FORMAT_RE.format(r'(?P<key>(?P<field>\w+)>(?P<format>.+?))')
830 for mobj in re.finditer(FORMAT_DATE_RE, outtmpl):
831 conv_type, field, frmt, key = mobj.group('type', 'field', 'format', 'key')
832 if key in template_dict:
833 continue
834 value = strftime_or_none(template_dict.get(field), frmt, na)
835 if conv_type in 'crs': # string
836 value = sanitize(field, value)
837 else: # number
838 numeric_fields.append(key)
839 value = float_or_none(value, default=None)
840 if value is not None:
841 template_dict[key] = value
842
843 # Missing numeric fields used together with integer presentation types
844 # in format specification will break the argument substitution since
845 # string NA placeholder is returned for missing fields. We will patch
846 # output template for missing fields to meet string presentation type.
847 for numeric_field in numeric_fields:
848 if numeric_field not in template_dict:
849 outtmpl = re.sub(
850 FORMAT_RE.format(re.escape(numeric_field)),
851 r'%({0})s'.format(numeric_field), outtmpl)
852
853 return outtmpl, template_dict
854
de6000d9 855 def _prepare_filename(self, info_dict, tmpl_type='default'):
8222d8de 856 try:
586a91b6 857 sanitize = lambda k, v: sanitize_filename(
45598aab 858 compat_str(v),
1bb5c511 859 restricted=self.params.get('restrictfilenames'),
40df485f 860 is_id=(k == 'id' or k.endswith('_id')))
de6000d9 861 outtmpl = self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default'])
143db31d 862 outtmpl, template_dict = self.prepare_outtmpl(outtmpl, info_dict, sanitize)
d0d9ade4 863
15da37c7
S
864 # expand_path translates '%%' into '%' and '$$' into '$'
865 # correspondingly that is not what we want since we need to keep
866 # '%%' intact for template dict substitution step. Working around
867 # with boundary-alike separator hack.
961ea474 868 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
15da37c7
S
869 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
870
871 # outtmpl should be expand_path'ed before template dict substitution
872 # because meta fields may contain env variables we don't want to
873 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
874 # title "Hello $PATH", we don't want `$PATH` to be expanded.
875 filename = expand_path(outtmpl).replace(sep, '') % template_dict
876
143db31d 877 force_ext = OUTTMPL_TYPES.get(tmpl_type)
de6000d9 878 if force_ext is not None:
879 filename = replace_extension(filename, force_ext, template_dict.get('ext'))
880
bdc3fd2f
U
881 # https://github.com/blackjack4494/youtube-dlc/issues/85
882 trim_file_name = self.params.get('trim_file_name', False)
883 if trim_file_name:
884 fn_groups = filename.rsplit('.')
885 ext = fn_groups[-1]
886 sub_ext = ''
887 if len(fn_groups) > 2:
888 sub_ext = fn_groups[-2]
889 filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
890
0202b52a 891 return filename
8222d8de 892 except ValueError as err:
6febd1c1 893 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
894 return None
895
de6000d9 896 def prepare_filename(self, info_dict, dir_type='', warn=False):
897 """Generate the output filename."""
0202b52a 898 paths = self.params.get('paths', {})
899 assert isinstance(paths, dict)
de6000d9 900 filename = self._prepare_filename(info_dict, dir_type or 'default')
901
902 if warn and not self.__prepare_filename_warned:
903 if not paths:
904 pass
905 elif filename == '-':
906 self.report_warning('--paths is ignored when an outputting to stdout')
907 elif os.path.isabs(filename):
908 self.report_warning('--paths is ignored since an absolute path is given in output template')
909 self.__prepare_filename_warned = True
910 if filename == '-' or not filename:
911 return filename
912
0202b52a 913 homepath = expand_path(paths.get('home', '').strip())
914 assert isinstance(homepath, compat_str)
915 subdir = expand_path(paths.get(dir_type, '').strip()) if dir_type else ''
916 assert isinstance(subdir, compat_str)
c2934512 917 path = os.path.join(homepath, subdir, filename)
918
919 # Temporary fix for #4787
920 # 'Treat' all problem characters by passing filename through preferredencoding
921 # to workaround encoding issues with subprocess on python2 @ Windows
922 if sys.version_info < (3, 0) and sys.platform == 'win32':
923 path = encodeFilename(path, True).decode(preferredencoding())
924 return sanitize_path(path, force=self.params.get('windowsfilenames'))
0202b52a 925
442c37b7 926 def _match_entry(self, info_dict, incomplete):
ecdec191 927 """ Returns None if the file should be downloaded """
8222d8de 928
8b0d7497 929 def check_filter():
930 video_title = info_dict.get('title', info_dict.get('id', 'video'))
931 if 'title' in info_dict:
932 # This can happen when we're just evaluating the playlist
933 title = info_dict['title']
934 matchtitle = self.params.get('matchtitle', False)
935 if matchtitle:
936 if not re.search(matchtitle, title, re.IGNORECASE):
937 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
938 rejecttitle = self.params.get('rejecttitle', False)
939 if rejecttitle:
940 if re.search(rejecttitle, title, re.IGNORECASE):
941 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
942 date = info_dict.get('upload_date')
943 if date is not None:
944 dateRange = self.params.get('daterange', DateRange())
945 if date not in dateRange:
946 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
947 view_count = info_dict.get('view_count')
948 if view_count is not None:
949 min_views = self.params.get('min_views')
950 if min_views is not None and view_count < min_views:
951 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
952 max_views = self.params.get('max_views')
953 if max_views is not None and view_count > max_views:
954 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
955 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
956 return 'Skipping "%s" because it is age restricted' % video_title
957 if self.in_download_archive(info_dict):
958 return '%s has already been recorded in archive' % video_title
959
960 if not incomplete:
961 match_filter = self.params.get('match_filter')
962 if match_filter is not None:
963 ret = match_filter(info_dict)
964 if ret is not None:
965 return ret
966 return None
967
968 reason = check_filter()
969 if reason is not None:
970 self.to_screen('[download] ' + reason)
d83cb531 971 if reason.endswith('has already been recorded in the archive') and self.params.get('break_on_existing', False):
8b0d7497 972 raise ExistingVideoReached()
d83cb531 973 elif self.params.get('break_on_reject', False):
8b0d7497 974 raise RejectedVideoReached()
975 return reason
fe7e0c98 976
b6c45014
JMF
977 @staticmethod
978 def add_extra_info(info_dict, extra_info):
979 '''Set the keys from extra_info in info dict if they are missing'''
980 for key, value in extra_info.items():
981 info_dict.setdefault(key, value)
982
0704d222 983 def extract_info(self, url, download=True, ie_key=None, info_dict=None, extra_info={},
61aa5ba3 984 process=True, force_generic_extractor=False):
8222d8de
JMF
985 '''
986 Returns a list with a dictionary for each video we find.
987 If 'download', also downloads the videos.
988 extra_info is a dict containing the extra values to add to each result
613b2d9d 989 '''
fe7e0c98 990
61aa5ba3 991 if not ie_key and force_generic_extractor:
d22dec74
S
992 ie_key = 'Generic'
993
8222d8de 994 if ie_key:
56c73665 995 ies = [self.get_info_extractor(ie_key)]
8222d8de
JMF
996 else:
997 ies = self._ies
998
999 for ie in ies:
1000 if not ie.suitable(url):
1001 continue
1002
9a68de12 1003 ie_key = ie.ie_key()
1004 ie = self.get_info_extractor(ie_key)
8222d8de 1005 if not ie.working():
6febd1c1
PH
1006 self.report_warning('The program functionality for this site has been marked as broken, '
1007 'and will probably not work.')
8222d8de
JMF
1008
1009 try:
d0757229 1010 temp_id = str_or_none(
63be1aab 1011 ie.extract_id(url) if callable(getattr(ie, 'extract_id', None))
1012 else ie._match_id(url))
a0566bbf 1013 except (AssertionError, IndexError, AttributeError):
1014 temp_id = None
1015 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1016 self.to_screen("[%s] %s: has already been recorded in archive" % (
1017 ie_key, temp_id))
1018 break
a0566bbf 1019 return self.__extract_info(url, ie, download, extra_info, process, info_dict)
a0566bbf 1020 else:
1021 self.report_error('no suitable InfoExtractor for URL %s' % url)
1022
1023 def __handle_extraction_exceptions(func):
1024 def wrapper(self, *args, **kwargs):
1025 try:
1026 return func(self, *args, **kwargs)
773f291d
S
1027 except GeoRestrictedError as e:
1028 msg = e.msg
1029 if e.countries:
1030 msg += '\nThis video is available in %s.' % ', '.join(
1031 map(ISO3166Utils.short2full, e.countries))
1032 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1033 self.report_error(msg)
fb043a6e 1034 except ExtractorError as e: # An error we somewhat expected
2c74e6fa 1035 self.report_error(compat_str(e), e.format_traceback())
8b0d7497 1036 except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
d3e5bbf4 1037 raise
8222d8de
JMF
1038 except Exception as e:
1039 if self.params.get('ignoreerrors', False):
9b9c5355 1040 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
8222d8de
JMF
1041 else:
1042 raise
a0566bbf 1043 return wrapper
1044
1045 @__handle_extraction_exceptions
1046 def __extract_info(self, url, ie, download, extra_info, process, info_dict):
1047 ie_result = ie.extract(url)
1048 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1049 return
1050 if isinstance(ie_result, list):
1051 # Backwards compatibility: old IE result format
1052 ie_result = {
1053 '_type': 'compat_list',
1054 'entries': ie_result,
1055 }
1056 if info_dict:
1057 if info_dict.get('id'):
1058 ie_result['id'] = info_dict['id']
1059 if info_dict.get('title'):
1060 ie_result['title'] = info_dict['title']
1061 self.add_default_extra_info(ie_result, ie, url)
1062 if process:
1063 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1064 else:
a0566bbf 1065 return ie_result
fe7e0c98 1066
ea38e55f
PH
1067 def add_default_extra_info(self, ie_result, ie, url):
1068 self.add_extra_info(ie_result, {
1069 'extractor': ie.IE_NAME,
1070 'webpage_url': url,
1071 'webpage_url_basename': url_basename(url),
1072 'extractor_key': ie.ie_key(),
1073 })
1074
8222d8de
JMF
1075 def process_ie_result(self, ie_result, download=True, extra_info={}):
1076 """
1077 Take the result of the ie(may be modified) and resolve all unresolved
1078 references (URLs, playlist items).
1079
1080 It will also download the videos if 'download'.
1081 Returns the resolved ie_result.
1082 """
e8ee972c
PH
1083 result_type = ie_result.get('_type', 'video')
1084
057a5206 1085 if result_type in ('url', 'url_transparent'):
134c6ea8 1086 ie_result['url'] = sanitize_url(ie_result['url'])
057a5206 1087 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1088 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1089 or extract_flat is True):
de6000d9 1090 self.__forced_printings(ie_result, self.prepare_filename(ie_result), incomplete=True)
e8ee972c
PH
1091 return ie_result
1092
8222d8de 1093 if result_type == 'video':
b6c45014 1094 self.add_extra_info(ie_result, extra_info)
feee2ecf 1095 return self.process_video_result(ie_result, download=download)
8222d8de
JMF
1096 elif result_type == 'url':
1097 # We have to add extra_info to the results because it may be
1098 # contained in a playlist
1099 return self.extract_info(ie_result['url'],
0704d222 1100 download, info_dict=ie_result,
8222d8de
JMF
1101 ie_key=ie_result.get('ie_key'),
1102 extra_info=extra_info)
7fc3fa05
PH
1103 elif result_type == 'url_transparent':
1104 # Use the information from the embedding page
1105 info = self.extract_info(
1106 ie_result['url'], ie_key=ie_result.get('ie_key'),
1107 extra_info=extra_info, download=False, process=False)
1108
1640eb09
S
1109 # extract_info may return None when ignoreerrors is enabled and
1110 # extraction failed with an error, don't crash and return early
1111 # in this case
1112 if not info:
1113 return info
1114
412c617d
PH
1115 force_properties = dict(
1116 (k, v) for k, v in ie_result.items() if v is not None)
0396806f 1117 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
412c617d
PH
1118 if f in force_properties:
1119 del force_properties[f]
1120 new_result = info.copy()
1121 new_result.update(force_properties)
7fc3fa05 1122
0563f7ac
S
1123 # Extracted info may not be a video result (i.e.
1124 # info.get('_type', 'video') != video) but rather an url or
1125 # url_transparent. In such cases outer metadata (from ie_result)
1126 # should be propagated to inner one (info). For this to happen
1127 # _type of info should be overridden with url_transparent. This
067aa17e 1128 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1129 if new_result.get('_type') == 'url':
1130 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1131
1132 return self.process_ie_result(
1133 new_result, download=download, extra_info=extra_info)
40fcba5e 1134 elif result_type in ('playlist', 'multi_video'):
30a074c2 1135 # Protect from infinite recursion due to recursively nested playlists
1136 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1137 webpage_url = ie_result['webpage_url']
1138 if webpage_url in self._playlist_urls:
7e85e872 1139 self.to_screen(
30a074c2 1140 '[download] Skipping already downloaded playlist: %s'
1141 % ie_result.get('title') or ie_result.get('id'))
1142 return
7e85e872 1143
30a074c2 1144 self._playlist_level += 1
1145 self._playlist_urls.add(webpage_url)
1146 try:
1147 return self.__process_playlist(ie_result, download)
1148 finally:
1149 self._playlist_level -= 1
1150 if not self._playlist_level:
1151 self._playlist_urls.clear()
8222d8de 1152 elif result_type == 'compat_list':
c9bf4114
PH
1153 self.report_warning(
1154 'Extractor %s returned a compat_list result. '
1155 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1156
8222d8de 1157 def _fixup(r):
9e1a5b84
JW
1158 self.add_extra_info(
1159 r,
9103bbc5
JMF
1160 {
1161 'extractor': ie_result['extractor'],
1162 'webpage_url': ie_result['webpage_url'],
29eb5174 1163 'webpage_url_basename': url_basename(ie_result['webpage_url']),
be97abc2 1164 'extractor_key': ie_result['extractor_key'],
9e1a5b84
JW
1165 }
1166 )
8222d8de
JMF
1167 return r
1168 ie_result['entries'] = [
b6c45014 1169 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1170 for r in ie_result['entries']
1171 ]
1172 return ie_result
1173 else:
1174 raise Exception('Invalid result type: %s' % result_type)
1175
e92caff5 1176 def _ensure_dir_exists(self, path):
1177 return make_dir(path, self.report_error)
1178
30a074c2 1179 def __process_playlist(self, ie_result, download):
1180 # We process each entry in the playlist
1181 playlist = ie_result.get('title') or ie_result.get('id')
1182 self.to_screen('[download] Downloading playlist: %s' % playlist)
1183
498f5606 1184 if 'entries' not in ie_result:
1185 raise EntryNotInPlaylist()
1186 incomplete_entries = bool(ie_result.get('requested_entries'))
1187 if incomplete_entries:
1188 def fill_missing_entries(entries, indexes):
1189 ret = [None] * max(*indexes)
1190 for i, entry in zip(indexes, entries):
1191 ret[i - 1] = entry
1192 return ret
1193 ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
02fd60d3 1194
30a074c2 1195 playlist_results = []
1196
1197 playliststart = self.params.get('playliststart', 1) - 1
1198 playlistend = self.params.get('playlistend')
1199 # For backwards compatibility, interpret -1 as whole list
1200 if playlistend == -1:
1201 playlistend = None
1202
1203 playlistitems_str = self.params.get('playlist_items')
1204 playlistitems = None
1205 if playlistitems_str is not None:
1206 def iter_playlistitems(format):
1207 for string_segment in format.split(','):
1208 if '-' in string_segment:
1209 start, end = string_segment.split('-')
1210 for item in range(int(start), int(end) + 1):
1211 yield int(item)
1212 else:
1213 yield int(string_segment)
1214 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
1215
1216 ie_entries = ie_result['entries']
1217
1218 def make_playlistitems_entries(list_ie_entries):
1219 num_entries = len(list_ie_entries)
498f5606 1220 for i in playlistitems:
1221 if -num_entries < i <= num_entries:
1222 yield list_ie_entries[i - 1]
1223 elif incomplete_entries:
1224 raise EntryNotInPlaylist()
30a074c2 1225
1226 if isinstance(ie_entries, list):
1227 n_all_entries = len(ie_entries)
1228 if playlistitems:
498f5606 1229 entries = list(make_playlistitems_entries(ie_entries))
30a074c2 1230 else:
1231 entries = ie_entries[playliststart:playlistend]
1232 n_entries = len(entries)
498f5606 1233 msg = 'Collected %d videos; downloading %d of them' % (n_all_entries, n_entries)
30a074c2 1234 elif isinstance(ie_entries, PagedList):
1235 if playlistitems:
1236 entries = []
1237 for item in playlistitems:
1238 entries.extend(ie_entries.getslice(
1239 item - 1, item
1240 ))
1241 else:
1242 entries = ie_entries.getslice(
1243 playliststart, playlistend)
1244 n_entries = len(entries)
498f5606 1245 msg = 'Downloading %d videos' % n_entries
30a074c2 1246 else: # iterable
1247 if playlistitems:
498f5606 1248 entries = list(make_playlistitems_entries(list(itertools.islice(
1249 ie_entries, 0, max(playlistitems)))))
30a074c2 1250 else:
1251 entries = list(itertools.islice(
1252 ie_entries, playliststart, playlistend))
1253 n_entries = len(entries)
498f5606 1254 msg = 'Downloading %d videos' % n_entries
1255
1256 if any((entry is None for entry in entries)):
1257 raise EntryNotInPlaylist()
1258 if not playlistitems and (playliststart or playlistend):
1259 playlistitems = list(range(1 + playliststart, 1 + playliststart + len(entries)))
1260 ie_result['entries'] = entries
1261 ie_result['requested_entries'] = playlistitems
1262
1263 if self.params.get('allow_playlist_files', True):
1264 ie_copy = {
1265 'playlist': playlist,
1266 'playlist_id': ie_result.get('id'),
1267 'playlist_title': ie_result.get('title'),
1268 'playlist_uploader': ie_result.get('uploader'),
1269 'playlist_uploader_id': ie_result.get('uploader_id'),
1270 'playlist_index': 0
1271 }
1272 ie_copy.update(dict(ie_result))
1273
1274 if self.params.get('writeinfojson', False):
1275 infofn = self.prepare_filename(ie_copy, 'pl_infojson')
1276 if not self._ensure_dir_exists(encodeFilename(infofn)):
1277 return
1278 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
1279 self.to_screen('[info] Playlist metadata is already present')
1280 else:
1281 self.to_screen('[info] Writing playlist metadata as JSON to: ' + infofn)
1282 try:
1283 write_json_file(self.filter_requested_info(ie_result, self.params.get('clean_infojson', True)), infofn)
1284 except (OSError, IOError):
1285 self.report_error('Cannot write playlist metadata to JSON file ' + infofn)
1286
1287 if self.params.get('writedescription', False):
1288 descfn = self.prepare_filename(ie_copy, 'pl_description')
1289 if not self._ensure_dir_exists(encodeFilename(descfn)):
1290 return
1291 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
1292 self.to_screen('[info] Playlist description is already present')
1293 elif ie_result.get('description') is None:
1294 self.report_warning('There\'s no playlist description to write.')
1295 else:
1296 try:
1297 self.to_screen('[info] Writing playlist description to: ' + descfn)
1298 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1299 descfile.write(ie_result['description'])
1300 except (OSError, IOError):
1301 self.report_error('Cannot write playlist description file ' + descfn)
1302 return
30a074c2 1303
1304 if self.params.get('playlistreverse', False):
1305 entries = entries[::-1]
30a074c2 1306 if self.params.get('playlistrandom', False):
1307 random.shuffle(entries)
1308
1309 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
1310
498f5606 1311 self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg))
30a074c2 1312 for i, entry in enumerate(entries, 1):
1313 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
1314 # This __x_forwarded_for_ip thing is a bit ugly but requires
1315 # minimal changes
1316 if x_forwarded_for:
1317 entry['__x_forwarded_for_ip'] = x_forwarded_for
1318 extra = {
1319 'n_entries': n_entries,
1320 'playlist': playlist,
1321 'playlist_id': ie_result.get('id'),
1322 'playlist_title': ie_result.get('title'),
1323 'playlist_uploader': ie_result.get('uploader'),
1324 'playlist_uploader_id': ie_result.get('uploader_id'),
498f5606 1325 'playlist_index': playlistitems[i - 1] if playlistitems else i,
30a074c2 1326 'extractor': ie_result['extractor'],
1327 'webpage_url': ie_result['webpage_url'],
1328 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1329 'extractor_key': ie_result['extractor_key'],
1330 }
1331
1332 if self._match_entry(entry, incomplete=True) is not None:
1333 continue
1334
1335 entry_result = self.__process_iterable_entry(entry, download, extra)
1336 # TODO: skip failed (empty) entries?
1337 playlist_results.append(entry_result)
1338 ie_result['entries'] = playlist_results
1339 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1340 return ie_result
1341
a0566bbf 1342 @__handle_extraction_exceptions
1343 def __process_iterable_entry(self, entry, download, extra_info):
1344 return self.process_ie_result(
1345 entry, download=download, extra_info=extra_info)
1346
67134eab
JMF
1347 def _build_format_filter(self, filter_spec):
1348 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1349
1350 OPERATORS = {
1351 '<': operator.lt,
1352 '<=': operator.le,
1353 '>': operator.gt,
1354 '>=': operator.ge,
1355 '=': operator.eq,
1356 '!=': operator.ne,
1357 }
67134eab 1358 operator_rex = re.compile(r'''(?x)\s*
a03a3c80 1359 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
083c9df9
PH
1360 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1361 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
67134eab 1362 $
083c9df9 1363 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
67134eab 1364 m = operator_rex.search(filter_spec)
9ddb6925
S
1365 if m:
1366 try:
1367 comparison_value = int(m.group('value'))
1368 except ValueError:
1369 comparison_value = parse_filesize(m.group('value'))
1370 if comparison_value is None:
1371 comparison_value = parse_filesize(m.group('value') + 'B')
1372 if comparison_value is None:
1373 raise ValueError(
1374 'Invalid value %r in format specification %r' % (
67134eab 1375 m.group('value'), filter_spec))
9ddb6925
S
1376 op = OPERATORS[m.group('op')]
1377
083c9df9 1378 if not m:
9ddb6925
S
1379 STR_OPERATORS = {
1380 '=': operator.eq,
10d33b34
YCH
1381 '^=': lambda attr, value: attr.startswith(value),
1382 '$=': lambda attr, value: attr.endswith(value),
1383 '*=': lambda attr, value: value in attr,
9ddb6925 1384 }
67134eab 1385 str_operator_rex = re.compile(r'''(?x)
f96bff99 1386 \s*(?P<key>[a-zA-Z0-9._-]+)
2cc779f4 1387 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
b0df5223 1388 \s*(?P<value>[a-zA-Z0-9._-]+)
67134eab 1389 \s*$
9ddb6925 1390 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
67134eab 1391 m = str_operator_rex.search(filter_spec)
9ddb6925
S
1392 if m:
1393 comparison_value = m.group('value')
2cc779f4
S
1394 str_op = STR_OPERATORS[m.group('op')]
1395 if m.group('negation'):
e118a879 1396 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1397 else:
1398 op = str_op
083c9df9 1399
9ddb6925 1400 if not m:
67134eab 1401 raise ValueError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1402
1403 def _filter(f):
1404 actual_value = f.get(m.group('key'))
1405 if actual_value is None:
1406 return m.group('none_inclusive')
1407 return op(actual_value, comparison_value)
67134eab
JMF
1408 return _filter
1409
0017d9ad 1410 def _default_format_spec(self, info_dict, download=True):
0017d9ad 1411
af0f7428
S
1412 def can_merge():
1413 merger = FFmpegMergerPP(self)
1414 return merger.available and merger.can_merge()
1415
91ebc640 1416 prefer_best = (
1417 not self.params.get('simulate', False)
1418 and download
1419 and (
1420 not can_merge()
19807826 1421 or info_dict.get('is_live', False)
de6000d9 1422 or self.outtmpl_dict['default'] == '-'))
91ebc640 1423
1424 return (
1425 'best/bestvideo+bestaudio'
1426 if prefer_best
1427 else 'bestvideo*+bestaudio/best'
19807826 1428 if not self.params.get('allow_multiple_audio_streams', False)
91ebc640 1429 else 'bestvideo+bestaudio/best')
0017d9ad 1430
67134eab
JMF
1431 def build_format_selector(self, format_spec):
1432 def syntax_error(note, start):
1433 message = (
1434 'Invalid format specification: '
1435 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1436 return SyntaxError(message)
1437
1438 PICKFIRST = 'PICKFIRST'
1439 MERGE = 'MERGE'
1440 SINGLE = 'SINGLE'
0130afb7 1441 GROUP = 'GROUP'
67134eab
JMF
1442 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1443
91ebc640 1444 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1445 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 1446
67134eab
JMF
1447 def _parse_filter(tokens):
1448 filter_parts = []
1449 for type, string, start, _, _ in tokens:
1450 if type == tokenize.OP and string == ']':
1451 return ''.join(filter_parts)
1452 else:
1453 filter_parts.append(string)
1454
232541df 1455 def _remove_unused_ops(tokens):
17cc1534 1456 # Remove operators that we don't use and join them with the surrounding strings
232541df
JMF
1457 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1458 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1459 last_string, last_start, last_end, last_line = None, None, None, None
1460 for type, string, start, end, line in tokens:
1461 if type == tokenize.OP and string == '[':
1462 if last_string:
1463 yield tokenize.NAME, last_string, last_start, last_end, last_line
1464 last_string = None
1465 yield type, string, start, end, line
1466 # everything inside brackets will be handled by _parse_filter
1467 for type, string, start, end, line in tokens:
1468 yield type, string, start, end, line
1469 if type == tokenize.OP and string == ']':
1470 break
1471 elif type == tokenize.OP and string in ALLOWED_OPS:
1472 if last_string:
1473 yield tokenize.NAME, last_string, last_start, last_end, last_line
1474 last_string = None
1475 yield type, string, start, end, line
1476 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1477 if not last_string:
1478 last_string = string
1479 last_start = start
1480 last_end = end
1481 else:
1482 last_string += string
1483 if last_string:
1484 yield tokenize.NAME, last_string, last_start, last_end, last_line
1485
cf2ac6df 1486 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
1487 selectors = []
1488 current_selector = None
1489 for type, string, start, _, _ in tokens:
1490 # ENCODING is only defined in python 3.x
1491 if type == getattr(tokenize, 'ENCODING', None):
1492 continue
1493 elif type in [tokenize.NAME, tokenize.NUMBER]:
1494 current_selector = FormatSelector(SINGLE, string, [])
1495 elif type == tokenize.OP:
cf2ac6df
JMF
1496 if string == ')':
1497 if not inside_group:
1498 # ')' will be handled by the parentheses group
1499 tokens.restore_last_token()
67134eab 1500 break
cf2ac6df 1501 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
1502 tokens.restore_last_token()
1503 break
cf2ac6df
JMF
1504 elif inside_choice and string == ',':
1505 tokens.restore_last_token()
1506 break
1507 elif string == ',':
0a31a350
JMF
1508 if not current_selector:
1509 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
1510 selectors.append(current_selector)
1511 current_selector = None
1512 elif string == '/':
d96d604e
JMF
1513 if not current_selector:
1514 raise syntax_error('"/" must follow a format selector', start)
67134eab 1515 first_choice = current_selector
cf2ac6df 1516 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 1517 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
1518 elif string == '[':
1519 if not current_selector:
1520 current_selector = FormatSelector(SINGLE, 'best', [])
1521 format_filter = _parse_filter(tokens)
1522 current_selector.filters.append(format_filter)
0130afb7
JMF
1523 elif string == '(':
1524 if current_selector:
1525 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
1526 group = _parse_format_selection(tokens, inside_group=True)
1527 current_selector = FormatSelector(GROUP, group, [])
67134eab 1528 elif string == '+':
d03cfdce 1529 if not current_selector:
1530 raise syntax_error('Unexpected "+"', start)
1531 selector_1 = current_selector
1532 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1533 if not selector_2:
1534 raise syntax_error('Expected a selector', start)
1535 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab
JMF
1536 else:
1537 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1538 elif type == tokenize.ENDMARKER:
1539 break
1540 if current_selector:
1541 selectors.append(current_selector)
1542 return selectors
1543
1544 def _build_selector_function(selector):
909d24dd 1545 if isinstance(selector, list): # ,
67134eab
JMF
1546 fs = [_build_selector_function(s) for s in selector]
1547
317f7ab6 1548 def selector_function(ctx):
67134eab 1549 for f in fs:
317f7ab6 1550 for format in f(ctx):
67134eab
JMF
1551 yield format
1552 return selector_function
909d24dd 1553
1554 elif selector.type == GROUP: # ()
0130afb7 1555 selector_function = _build_selector_function(selector.selector)
909d24dd 1556
1557 elif selector.type == PICKFIRST: # /
67134eab
JMF
1558 fs = [_build_selector_function(s) for s in selector.selector]
1559
317f7ab6 1560 def selector_function(ctx):
67134eab 1561 for f in fs:
317f7ab6 1562 picked_formats = list(f(ctx))
67134eab
JMF
1563 if picked_formats:
1564 return picked_formats
1565 return []
67134eab 1566
909d24dd 1567 elif selector.type == SINGLE: # atom
1568 format_spec = selector.selector if selector.selector is not None else 'best'
1569
1570 if format_spec == 'all':
1571 def selector_function(ctx):
1572 formats = list(ctx['formats'])
1573 if formats:
1574 for f in formats:
1575 yield f
1576
1577 else:
1578 format_fallback = False
eff63539 1579 mobj = re.match(
1580 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
1581 format_spec)
1582 if mobj is not None:
1583 format_idx = int_or_none(mobj.group('n'), default=1)
1584 format_idx = format_idx - 1 if mobj.group('bw')[0] == 'w' else -format_idx
1585 format_type = (mobj.group('type') or [None])[0]
1586 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
1587 format_modified = mobj.group('mod') is not None
909d24dd 1588
1589 format_fallback = not format_type and not format_modified # for b, w
eff63539 1590 filter_f = (
1591 (lambda f: f.get('%scodec' % format_type) != 'none')
1592 if format_type and format_modified # bv*, ba*, wv*, wa*
1593 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
1594 if format_type # bv, ba, wv, wa
1595 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
1596 if not format_modified # b, w
1597 else None) # b*, w*
67134eab 1598 else:
909d24dd 1599 format_idx = -1
1600 filter_f = ((lambda f: f.get('ext') == format_spec)
1601 if format_spec in ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] # extension
1602 else (lambda f: f.get('format_id') == format_spec)) # id
1603
1604 def selector_function(ctx):
1605 formats = list(ctx['formats'])
1606 if not formats:
1607 return
1608 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
eff63539 1609 n = len(matches)
1610 if -n <= format_idx < n:
909d24dd 1611 yield matches[format_idx]
eff63539 1612 elif format_fallback and ctx['incomplete_formats']:
909d24dd 1613 # for extractors with incomplete formats (audio only (soundcloud)
1614 # or video only (imgur)) best/worst will fallback to
1615 # best/worst {video,audio}-only format
eff63539 1616 n = len(formats)
1617 if -n <= format_idx < n:
1618 yield formats[format_idx]
909d24dd 1619
1620 elif selector.type == MERGE: # +
d03cfdce 1621 def _merge(formats_pair):
1622 format_1, format_2 = formats_pair
1623
1624 formats_info = []
1625 formats_info.extend(format_1.get('requested_formats', (format_1,)))
1626 formats_info.extend(format_2.get('requested_formats', (format_2,)))
1627
909d24dd 1628 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
1629 get_no_more = {"video": False, "audio": False}
1630 for (i, fmt_info) in enumerate(formats_info):
1631 for aud_vid in ["audio", "video"]:
1632 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
1633 if get_no_more[aud_vid]:
1634 formats_info.pop(i)
1635 get_no_more[aud_vid] = True
1636
1637 if len(formats_info) == 1:
1638 return formats_info[0]
1639
d03cfdce 1640 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
1641 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
1642
1643 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
1644 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
1645
1646 output_ext = self.params.get('merge_output_format')
1647 if not output_ext:
1648 if the_only_video:
1649 output_ext = the_only_video['ext']
1650 elif the_only_audio and not video_fmts:
1651 output_ext = the_only_audio['ext']
1652 else:
1653 output_ext = 'mkv'
1654
1655 new_dict = {
67134eab 1656 'requested_formats': formats_info,
d03cfdce 1657 'format': '+'.join(fmt_info.get('format') for fmt_info in formats_info),
1658 'format_id': '+'.join(fmt_info.get('format_id') for fmt_info in formats_info),
67134eab
JMF
1659 'ext': output_ext,
1660 }
d03cfdce 1661
1662 if the_only_video:
1663 new_dict.update({
1664 'width': the_only_video.get('width'),
1665 'height': the_only_video.get('height'),
35615307 1666 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
d03cfdce 1667 'fps': the_only_video.get('fps'),
1668 'vcodec': the_only_video.get('vcodec'),
1669 'vbr': the_only_video.get('vbr'),
1670 'stretched_ratio': the_only_video.get('stretched_ratio'),
1671 })
1672
1673 if the_only_audio:
1674 new_dict.update({
1675 'acodec': the_only_audio.get('acodec'),
1676 'abr': the_only_audio.get('abr'),
1677 })
1678
1679 return new_dict
1680
1681 selector_1, selector_2 = map(_build_selector_function, selector.selector)
083c9df9 1682
317f7ab6
S
1683 def selector_function(ctx):
1684 for pair in itertools.product(
d03cfdce 1685 selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
67134eab 1686 yield _merge(pair)
083c9df9 1687
67134eab 1688 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 1689
317f7ab6
S
1690 def final_selector(ctx):
1691 ctx_copy = copy.deepcopy(ctx)
67134eab 1692 for _filter in filters:
317f7ab6
S
1693 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1694 return selector_function(ctx_copy)
67134eab 1695 return final_selector
083c9df9 1696
67134eab 1697 stream = io.BytesIO(format_spec.encode('utf-8'))
0130afb7 1698 try:
232541df 1699 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
0130afb7
JMF
1700 except tokenize.TokenError:
1701 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1702
1703 class TokenIterator(object):
1704 def __init__(self, tokens):
1705 self.tokens = tokens
1706 self.counter = 0
1707
1708 def __iter__(self):
1709 return self
1710
1711 def __next__(self):
1712 if self.counter >= len(self.tokens):
1713 raise StopIteration()
1714 value = self.tokens[self.counter]
1715 self.counter += 1
1716 return value
1717
1718 next = __next__
1719
1720 def restore_last_token(self):
1721 self.counter -= 1
1722
1723 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 1724 return _build_selector_function(parsed_selector)
a9c58ad9 1725
e5660ee6
JMF
1726 def _calc_headers(self, info_dict):
1727 res = std_headers.copy()
1728
1729 add_headers = info_dict.get('http_headers')
1730 if add_headers:
1731 res.update(add_headers)
1732
1733 cookies = self._calc_cookies(info_dict)
1734 if cookies:
1735 res['Cookie'] = cookies
1736
0016b84e
S
1737 if 'X-Forwarded-For' not in res:
1738 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
1739 if x_forwarded_for_ip:
1740 res['X-Forwarded-For'] = x_forwarded_for_ip
1741
e5660ee6
JMF
1742 return res
1743
1744 def _calc_cookies(self, info_dict):
5c2266df 1745 pr = sanitized_Request(info_dict['url'])
e5660ee6 1746 self.cookiejar.add_cookie_header(pr)
662435f7 1747 return pr.get_header('Cookie')
e5660ee6 1748
dd82ffea
JMF
1749 def process_video_result(self, info_dict, download=True):
1750 assert info_dict.get('_type', 'video') == 'video'
1751
bec1fad2
PH
1752 if 'id' not in info_dict:
1753 raise ExtractorError('Missing "id" field in extractor result')
1754 if 'title' not in info_dict:
1755 raise ExtractorError('Missing "title" field in extractor result')
1756
c9969434
S
1757 def report_force_conversion(field, field_not, conversion):
1758 self.report_warning(
1759 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
1760 % (field, field_not, conversion))
1761
1762 def sanitize_string_field(info, string_field):
1763 field = info.get(string_field)
1764 if field is None or isinstance(field, compat_str):
1765 return
1766 report_force_conversion(string_field, 'a string', 'string')
1767 info[string_field] = compat_str(field)
1768
1769 def sanitize_numeric_fields(info):
1770 for numeric_field in self._NUMERIC_FIELDS:
1771 field = info.get(numeric_field)
1772 if field is None or isinstance(field, compat_numeric_types):
1773 continue
1774 report_force_conversion(numeric_field, 'numeric', 'int')
1775 info[numeric_field] = int_or_none(field)
1776
1777 sanitize_string_field(info_dict, 'id')
1778 sanitize_numeric_fields(info_dict)
be6217b2 1779
dd82ffea
JMF
1780 if 'playlist' not in info_dict:
1781 # It isn't part of a playlist
1782 info_dict['playlist'] = None
1783 info_dict['playlist_index'] = None
1784
d5519808 1785 thumbnails = info_dict.get('thumbnails')
cfb56d1a
PH
1786 if thumbnails is None:
1787 thumbnail = info_dict.get('thumbnail')
1788 if thumbnail:
a7a14d95 1789 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
d5519808 1790 if thumbnails:
be6d7229 1791 thumbnails.sort(key=lambda t: (
d37708fc
RA
1792 t.get('preference') if t.get('preference') is not None else -1,
1793 t.get('width') if t.get('width') is not None else -1,
1794 t.get('height') if t.get('height') is not None else -1,
1795 t.get('id') if t.get('id') is not None else '', t.get('url')))
f6c24009 1796 for i, t in enumerate(thumbnails):
dcf77cf1 1797 t['url'] = sanitize_url(t['url'])
9603e8a7 1798 if t.get('width') and t.get('height'):
d5519808 1799 t['resolution'] = '%dx%d' % (t['width'], t['height'])
f6c24009
PH
1800 if t.get('id') is None:
1801 t['id'] = '%d' % i
d5519808 1802
b7b72db9 1803 if self.params.get('list_thumbnails'):
1804 self.list_thumbnails(info_dict)
1805 return
1806
536a55da
S
1807 thumbnail = info_dict.get('thumbnail')
1808 if thumbnail:
1809 info_dict['thumbnail'] = sanitize_url(thumbnail)
1810 elif thumbnails:
d5519808
PH
1811 info_dict['thumbnail'] = thumbnails[-1]['url']
1812
c9ae7b95 1813 if 'display_id' not in info_dict and 'id' in info_dict:
0afef30b
PH
1814 info_dict['display_id'] = info_dict['id']
1815
10db0d2f 1816 for ts_key, date_key in (
1817 ('timestamp', 'upload_date'),
1818 ('release_timestamp', 'release_date'),
1819 ):
1820 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
1821 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
1822 # see http://bugs.python.org/issue1646728)
1823 try:
1824 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
1825 info_dict[date_key] = upload_date.strftime('%Y%m%d')
1826 except (ValueError, OverflowError, OSError):
1827 pass
9d2ecdbc 1828
33d2fc2f
S
1829 # Auto generate title fields corresponding to the *_number fields when missing
1830 # in order to always have clean titles. This is very common for TV series.
1831 for field in ('chapter', 'season', 'episode'):
1832 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
1833 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
1834
05108a49
S
1835 for cc_kind in ('subtitles', 'automatic_captions'):
1836 cc = info_dict.get(cc_kind)
1837 if cc:
1838 for _, subtitle in cc.items():
1839 for subtitle_format in subtitle:
1840 if subtitle_format.get('url'):
1841 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
1842 if subtitle_format.get('ext') is None:
1843 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
1844
1845 automatic_captions = info_dict.get('automatic_captions')
4bba3716 1846 subtitles = info_dict.get('subtitles')
4bba3716 1847
a504ced0 1848 if self.params.get('listsubtitles', False):
360e1ca5 1849 if 'automatic_captions' in info_dict:
05108a49
S
1850 self.list_subtitles(
1851 info_dict['id'], automatic_captions, 'automatic captions')
4bba3716 1852 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
a504ced0 1853 return
05108a49 1854
360e1ca5 1855 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 1856 info_dict['id'], subtitles, automatic_captions)
a504ced0 1857
dd82ffea
JMF
1858 # We now pick which formats have to be downloaded
1859 if info_dict.get('formats') is None:
1860 # There's only one format available
1861 formats = [info_dict]
1862 else:
1863 formats = info_dict['formats']
1864
db95dc13
PH
1865 if not formats:
1866 raise ExtractorError('No video formats found!')
1867
73af5cc8
S
1868 def is_wellformed(f):
1869 url = f.get('url')
a5ac0c47 1870 if not url:
73af5cc8
S
1871 self.report_warning(
1872 '"url" field is missing or empty - skipping format, '
1873 'there is an error in extractor')
a5ac0c47
S
1874 return False
1875 if isinstance(url, bytes):
1876 sanitize_string_field(f, 'url')
1877 return True
73af5cc8
S
1878
1879 # Filter out malformed formats for better extraction robustness
1880 formats = list(filter(is_wellformed, formats))
1881
181c7053
S
1882 formats_dict = {}
1883
dd82ffea 1884 # We check that all the formats have the format and format_id fields
db95dc13 1885 for i, format in enumerate(formats):
c9969434
S
1886 sanitize_string_field(format, 'format_id')
1887 sanitize_numeric_fields(format)
dcf77cf1 1888 format['url'] = sanitize_url(format['url'])
e74e3b63 1889 if not format.get('format_id'):
8016c922 1890 format['format_id'] = compat_str(i)
e2effb08
S
1891 else:
1892 # Sanitize format_id from characters used in format selector expression
ec85ded8 1893 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
181c7053
S
1894 format_id = format['format_id']
1895 if format_id not in formats_dict:
1896 formats_dict[format_id] = []
1897 formats_dict[format_id].append(format)
1898
1899 # Make sure all formats have unique format_id
1900 for format_id, ambiguous_formats in formats_dict.items():
1901 if len(ambiguous_formats) > 1:
1902 for i, format in enumerate(ambiguous_formats):
1903 format['format_id'] = '%s-%d' % (format_id, i)
1904
1905 for i, format in enumerate(formats):
8c51aa65 1906 if format.get('format') is None:
6febd1c1 1907 format['format'] = '{id} - {res}{note}'.format(
8c51aa65
JMF
1908 id=format['format_id'],
1909 res=self.format_resolution(format),
6febd1c1 1910 note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
8c51aa65 1911 )
c1002e96 1912 # Automatically determine file extension if missing
5b1d8575 1913 if format.get('ext') is None:
cce929ea 1914 format['ext'] = determine_ext(format['url']).lower()
b5559424
S
1915 # Automatically determine protocol if missing (useful for format
1916 # selection purposes)
6f0be937 1917 if format.get('protocol') is None:
b5559424 1918 format['protocol'] = determine_protocol(format)
e5660ee6
JMF
1919 # Add HTTP headers, so that external programs can use them from the
1920 # json output
1921 full_format_info = info_dict.copy()
1922 full_format_info.update(format)
1923 format['http_headers'] = self._calc_headers(full_format_info)
0016b84e
S
1924 # Remove private housekeeping stuff
1925 if '__x_forwarded_for_ip' in info_dict:
1926 del info_dict['__x_forwarded_for_ip']
dd82ffea 1927
4bcc7bd1 1928 # TODO Central sorting goes here
99e206d5 1929
f89197d7 1930 if formats[0] is not info_dict:
b3d9ef88
JMF
1931 # only set the 'formats' fields if the original info_dict list them
1932 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 1933 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 1934 # which can't be exported to json
b3d9ef88 1935 info_dict['formats'] = formats
cfb56d1a 1936 if self.params.get('listformats'):
bfaae0a7 1937 self.list_formats(info_dict)
1938 return
1939
de3ef3ed 1940 req_format = self.params.get('format')
a9c58ad9 1941 if req_format is None:
0017d9ad
S
1942 req_format = self._default_format_spec(info_dict, download=download)
1943 if self.params.get('verbose'):
e8be92f9 1944 self.to_screen('[debug] Default format spec: %s' % req_format)
0017d9ad 1945
5acfa126 1946 format_selector = self.build_format_selector(req_format)
317f7ab6
S
1947
1948 # While in format selection we may need to have an access to the original
1949 # format set in order to calculate some metrics or do some processing.
1950 # For now we need to be able to guess whether original formats provided
1951 # by extractor are incomplete or not (i.e. whether extractor provides only
1952 # video-only or audio-only formats) for proper formats selection for
1953 # extractors with such incomplete formats (see
067aa17e 1954 # https://github.com/ytdl-org/youtube-dl/pull/5556).
317f7ab6
S
1955 # Since formats may be filtered during format selection and may not match
1956 # the original formats the results may be incorrect. Thus original formats
1957 # or pre-calculated metrics should be passed to format selection routines
1958 # as well.
1959 # We will pass a context object containing all necessary additional data
1960 # instead of just formats.
1961 # This fixes incorrect format selection issue (see
067aa17e 1962 # https://github.com/ytdl-org/youtube-dl/issues/10083).
2e221ca3 1963 incomplete_formats = (
317f7ab6 1964 # All formats are video-only or
3089bc74 1965 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
317f7ab6 1966 # all formats are audio-only
3089bc74 1967 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
317f7ab6
S
1968
1969 ctx = {
1970 'formats': formats,
1971 'incomplete_formats': incomplete_formats,
1972 }
1973
1974 formats_to_download = list(format_selector(ctx))
dd82ffea 1975 if not formats_to_download:
6febd1c1 1976 raise ExtractorError('requested format not available',
78a3a9f8 1977 expected=True)
dd82ffea
JMF
1978
1979 if download:
909d24dd 1980 self.to_screen('[info] Downloading format(s) %s' % ", ".join([f['format_id'] for f in formats_to_download]))
dd82ffea 1981 if len(formats_to_download) > 1:
6febd1c1 1982 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
dd82ffea
JMF
1983 for format in formats_to_download:
1984 new_info = dict(info_dict)
1985 new_info.update(format)
1986 self.process_info(new_info)
1987 # We update the info dict with the best quality format (backwards compatibility)
1988 info_dict.update(formats_to_download[-1])
1989 return info_dict
1990
98c70d6f 1991 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 1992 """Select the requested subtitles and their format"""
98c70d6f
JMF
1993 available_subs = {}
1994 if normal_subtitles and self.params.get('writesubtitles'):
1995 available_subs.update(normal_subtitles)
1996 if automatic_captions and self.params.get('writeautomaticsub'):
1997 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
1998 if lang not in available_subs:
1999 available_subs[lang] = cap_info
2000
4d171848
JMF
2001 if (not self.params.get('writesubtitles') and not
2002 self.params.get('writeautomaticsub') or not
2003 available_subs):
2004 return None
a504ced0
JMF
2005
2006 if self.params.get('allsubtitles', False):
2007 requested_langs = available_subs.keys()
2008 else:
2009 if self.params.get('subtitleslangs', False):
2010 requested_langs = self.params.get('subtitleslangs')
2011 elif 'en' in available_subs:
2012 requested_langs = ['en']
2013 else:
2014 requested_langs = [list(available_subs.keys())[0]]
2015
2016 formats_query = self.params.get('subtitlesformat', 'best')
2017 formats_preference = formats_query.split('/') if formats_query else []
2018 subs = {}
2019 for lang in requested_langs:
2020 formats = available_subs.get(lang)
2021 if formats is None:
2022 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
2023 continue
a504ced0
JMF
2024 for ext in formats_preference:
2025 if ext == 'best':
2026 f = formats[-1]
2027 break
2028 matches = list(filter(lambda f: f['ext'] == ext, formats))
2029 if matches:
2030 f = matches[-1]
2031 break
2032 else:
2033 f = formats[-1]
2034 self.report_warning(
2035 'No subtitle format found matching "%s" for language %s, '
2036 'using %s' % (formats_query, lang, f['ext']))
2037 subs[lang] = f
2038 return subs
2039
d06daf23
S
2040 def __forced_printings(self, info_dict, filename, incomplete):
2041 def print_mandatory(field):
2042 if (self.params.get('force%s' % field, False)
2043 and (not incomplete or info_dict.get(field) is not None)):
2044 self.to_stdout(info_dict[field])
2045
2046 def print_optional(field):
2047 if (self.params.get('force%s' % field, False)
2048 and info_dict.get(field) is not None):
2049 self.to_stdout(info_dict[field])
2050
2051 print_mandatory('title')
2052 print_mandatory('id')
2053 if self.params.get('forceurl', False) and not incomplete:
2054 if info_dict.get('requested_formats') is not None:
2055 for f in info_dict['requested_formats']:
2056 self.to_stdout(f['url'] + f.get('play_path', ''))
2057 else:
2058 # For RTMP URLs, also include the playpath
2059 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
2060 print_optional('thumbnail')
2061 print_optional('description')
2062 if self.params.get('forcefilename', False) and filename is not None:
2063 self.to_stdout(filename)
2064 if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
2065 self.to_stdout(formatSeconds(info_dict['duration']))
2066 print_mandatory('format')
2067 if self.params.get('forcejson', False):
277d6ff5 2068 self.post_extract(info_dict)
75d43ca0 2069 self.to_stdout(json.dumps(info_dict, default=repr))
d06daf23 2070
8222d8de
JMF
2071 def process_info(self, info_dict):
2072 """Process a single resolved IE result."""
2073
2074 assert info_dict.get('_type', 'video') == 'video'
fd288278 2075
0202b52a 2076 info_dict.setdefault('__postprocessors', [])
2077
fd288278
PH
2078 max_downloads = self.params.get('max_downloads')
2079 if max_downloads is not None:
2080 if self._num_downloads >= int(max_downloads):
2081 raise MaxDownloadsReached()
8222d8de 2082
d06daf23 2083 # TODO: backward compatibility, to be removed
8222d8de 2084 info_dict['fulltitle'] = info_dict['title']
8222d8de 2085
11b85ce6 2086 if 'format' not in info_dict:
8222d8de
JMF
2087 info_dict['format'] = info_dict['ext']
2088
8b0d7497 2089 if self._match_entry(info_dict, incomplete=False) is not None:
8222d8de
JMF
2090 return
2091
277d6ff5 2092 self.post_extract(info_dict)
fd288278 2093 self._num_downloads += 1
8222d8de 2094
5bfa4862 2095 info_dict = self.pre_process(info_dict)
2096
dcf64d43 2097 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 2098 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2099 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 2100 files_to_move = {}
de6000d9 2101 skip_dl = self.params.get('skip_download', False)
8222d8de
JMF
2102
2103 # Forced printings
0202b52a 2104 self.__forced_printings(info_dict, full_filename, incomplete=False)
8222d8de 2105
8222d8de 2106 if self.params.get('simulate', False):
2d30509f 2107 if self.params.get('force_write_download_archive', False):
2108 self.record_download_archive(info_dict)
2109
2110 # Do nothing else if in simulate mode
8222d8de
JMF
2111 return
2112
de6000d9 2113 if full_filename is None:
8222d8de
JMF
2114 return
2115
e92caff5 2116 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 2117 return
e92caff5 2118 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
2119 return
2120
2121 if self.params.get('writedescription', False):
de6000d9 2122 descfn = self.prepare_filename(info_dict, 'description')
e92caff5 2123 if not self._ensure_dir_exists(encodeFilename(descfn)):
0202b52a 2124 return
0c3d0f51 2125 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
6febd1c1 2126 self.to_screen('[info] Video description is already present')
f00fd51d
JMF
2127 elif info_dict.get('description') is None:
2128 self.report_warning('There\'s no description to write.')
7b6fefc9
PH
2129 else:
2130 try:
6febd1c1 2131 self.to_screen('[info] Writing video description to: ' + descfn)
7b6fefc9
PH
2132 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
2133 descfile.write(info_dict['description'])
7b6fefc9 2134 except (OSError, IOError):
6febd1c1 2135 self.report_error('Cannot write description file ' + descfn)
7b6fefc9 2136 return
8222d8de 2137
1fb07d10 2138 if self.params.get('writeannotations', False):
de6000d9 2139 annofn = self.prepare_filename(info_dict, 'annotation')
e92caff5 2140 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 2141 return
0c3d0f51 2142 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 2143 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
2144 elif not info_dict.get('annotations'):
2145 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
2146 else:
2147 try:
6febd1c1 2148 self.to_screen('[info] Writing video annotations to: ' + annofn)
7b6fefc9
PH
2149 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2150 annofile.write(info_dict['annotations'])
2151 except (KeyError, TypeError):
6febd1c1 2152 self.report_warning('There are no annotations to write.')
7b6fefc9 2153 except (OSError, IOError):
6febd1c1 2154 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 2155 return
1fb07d10 2156
9f448fcb 2157 def dl(name, info, subtitle=False):
98b69821 2158 fd = get_suitable_downloader(info, self.params)(self, self.params)
2159 for ph in self._progress_hooks:
2160 fd.add_progress_hook(ph)
2161 if self.params.get('verbose'):
29f7c58a 2162 self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
46906886
DA
2163 new_info = dict(info)
2164 if new_info.get('http_headers') is None:
2165 new_info['http_headers'] = self._calc_headers(new_info)
2166 return fd.download(name, new_info, subtitle)
98b69821 2167
c4a91be7 2168 subtitles_are_requested = any([self.params.get('writesubtitles', False),
0b7f3118 2169 self.params.get('writeautomaticsub')])
c4a91be7 2170
c84dd8a9 2171 if subtitles_are_requested and info_dict.get('requested_subtitles'):
8222d8de
JMF
2172 # subtitles download errors are already managed as troubles in relevant IE
2173 # that way it will silently go on when used with unsupporting IE
c84dd8a9 2174 subtitles = info_dict['requested_subtitles']
fa57af1e 2175 # ie = self.get_info_extractor(info_dict['extractor_key'])
a504ced0
JMF
2176 for sub_lang, sub_info in subtitles.items():
2177 sub_format = sub_info['ext']
de6000d9 2178 sub_fn = self.prepare_filename(info_dict, 'subtitle')
2179 sub_filename = subtitles_filename(
0fd1a2b0 2180 temp_filename if not skip_dl else sub_fn,
0202b52a 2181 sub_lang, sub_format, info_dict.get('ext'))
de6000d9 2182 sub_filename_final = subtitles_filename(sub_fn, sub_lang, sub_format, info_dict.get('ext'))
0c3d0f51 2183 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(sub_filename)):
5ff1bc0c 2184 self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
dcf64d43 2185 sub_info['filepath'] = sub_filename
0202b52a 2186 files_to_move[sub_filename] = sub_filename_final
a504ced0 2187 else:
0c9df79e 2188 self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
5ff1bc0c
RA
2189 if sub_info.get('data') is not None:
2190 try:
2191 # Use newline='' to prevent conversion of newline characters
067aa17e 2192 # See https://github.com/ytdl-org/youtube-dl/issues/10268
5ff1bc0c
RA
2193 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
2194 subfile.write(sub_info['data'])
dcf64d43 2195 sub_info['filepath'] = sub_filename
0202b52a 2196 files_to_move[sub_filename] = sub_filename_final
5ff1bc0c
RA
2197 except (OSError, IOError):
2198 self.report_error('Cannot write subtitles file ' + sub_filename)
2199 return
7b6fefc9 2200 else:
5ff1bc0c 2201 try:
dcf64d43 2202 dl(sub_filename, sub_info.copy(), subtitle=True)
2203 sub_info['filepath'] = sub_filename
0202b52a 2204 files_to_move[sub_filename] = sub_filename_final
0c9df79e 2205 except (ExtractorError, IOError, OSError, ValueError, compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
5ff1bc0c
RA
2206 self.report_warning('Unable to download subtitle for "%s": %s' %
2207 (sub_lang, error_to_compat_str(err)))
2208 continue
8222d8de 2209
de6000d9 2210 if skip_dl:
57df9f53 2211 if self.params.get('convertsubtitles', False):
0202b52a 2212 # subconv = FFmpegSubtitlesConvertorPP(self, format=self.params.get('convertsubtitles'))
de6000d9 2213 filename_real_ext = os.path.splitext(full_filename)[1][1:]
57df9f53 2214 filename_wo_ext = (
0202b52a 2215 os.path.splitext(full_filename)[0]
57df9f53 2216 if filename_real_ext == info_dict['ext']
0202b52a 2217 else full_filename)
57df9f53 2218 afilename = '%s.%s' % (filename_wo_ext, self.params.get('convertsubtitles'))
0202b52a 2219 # if subconv.available:
2220 # info_dict['__postprocessors'].append(subconv)
57df9f53 2221 if os.path.exists(encodeFilename(afilename)):
f791b419
U
2222 self.to_screen(
2223 '[download] %s has already been downloaded and '
2224 'converted' % afilename)
57df9f53
U
2225 else:
2226 try:
0202b52a 2227 self.post_process(full_filename, info_dict, files_to_move)
af819c21 2228 except PostProcessingError as err:
2229 self.report_error('Postprocessing: %s' % str(err))
57df9f53
U
2230 return
2231
8222d8de 2232 if self.params.get('writeinfojson', False):
de6000d9 2233 infofn = self.prepare_filename(info_dict, 'infojson')
e92caff5 2234 if not self._ensure_dir_exists(encodeFilename(infofn)):
0202b52a 2235 return
0c3d0f51 2236 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
66c935fb 2237 self.to_screen('[info] Video metadata is already present')
7b6fefc9 2238 else:
66c935fb 2239 self.to_screen('[info] Writing video metadata as JSON to: ' + infofn)
7b6fefc9 2240 try:
75d43ca0 2241 write_json_file(self.filter_requested_info(info_dict, self.params.get('clean_infojson', True)), infofn)
7b6fefc9 2242 except (OSError, IOError):
66c935fb 2243 self.report_error('Cannot write video metadata to JSON file ' + infofn)
7b6fefc9 2244 return
de6000d9 2245 info_dict['__infojson_filename'] = infofn
8222d8de 2246
de6000d9 2247 thumbfn = self.prepare_filename(info_dict, 'thumbnail')
2248 thumb_fn_temp = temp_filename if not skip_dl else thumbfn
2249 for thumb_ext in self._write_thumbnails(info_dict, thumb_fn_temp):
2250 thumb_filename_temp = replace_extension(thumb_fn_temp, thumb_ext, info_dict.get('ext'))
2251 thumb_filename = replace_extension(thumbfn, thumb_ext, info_dict.get('ext'))
dcf64d43 2252 files_to_move[thumb_filename_temp] = thumb_filename
8222d8de 2253
732044af 2254 # Write internet shortcut files
2255 url_link = webloc_link = desktop_link = False
2256 if self.params.get('writelink', False):
2257 if sys.platform == "darwin": # macOS.
2258 webloc_link = True
2259 elif sys.platform.startswith("linux"):
2260 desktop_link = True
2261 else: # if sys.platform in ['win32', 'cygwin']:
2262 url_link = True
2263 if self.params.get('writeurllink', False):
2264 url_link = True
2265 if self.params.get('writewebloclink', False):
2266 webloc_link = True
2267 if self.params.get('writedesktoplink', False):
2268 desktop_link = True
2269
2270 if url_link or webloc_link or desktop_link:
2271 if 'webpage_url' not in info_dict:
2272 self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
2273 return
2274 ascii_url = iri_to_uri(info_dict['webpage_url'])
2275
2276 def _write_link_file(extension, template, newline, embed_filename):
0202b52a 2277 linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
10e3742e 2278 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
732044af 2279 self.to_screen('[info] Internet shortcut is already present')
2280 else:
2281 try:
2282 self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
2283 with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
2284 template_vars = {'url': ascii_url}
2285 if embed_filename:
2286 template_vars['filename'] = linkfn[:-(len(extension) + 1)]
2287 linkfile.write(template % template_vars)
2288 except (OSError, IOError):
2289 self.report_error('Cannot write internet shortcut ' + linkfn)
2290 return False
2291 return True
2292
2293 if url_link:
2294 if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
2295 return
2296 if webloc_link:
2297 if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
2298 return
2299 if desktop_link:
2300 if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
2301 return
2302
2303 # Download
2304 must_record_download_archive = False
de6000d9 2305 if not skip_dl:
4340deca 2306 try:
0202b52a 2307
6b591b29 2308 def existing_file(*filepaths):
2309 ext = info_dict.get('ext')
2310 final_ext = self.params.get('final_ext', ext)
2311 existing_files = []
2312 for file in orderedSet(filepaths):
2313 if final_ext != ext:
2314 converted = replace_extension(file, final_ext, ext)
2315 if os.path.exists(encodeFilename(converted)):
2316 existing_files.append(converted)
2317 if os.path.exists(encodeFilename(file)):
2318 existing_files.append(file)
2319
2320 if not existing_files or self.params.get('overwrites', False):
2321 for file in orderedSet(existing_files):
2322 self.report_file_delete(file)
2323 os.remove(encodeFilename(file))
2324 return None
2325
2326 self.report_file_already_downloaded(existing_files[0])
2327 info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
2328 return existing_files[0]
0202b52a 2329
2330 success = True
4340deca
P
2331 if info_dict.get('requested_formats') is not None:
2332 downloaded = []
d47aeb22 2333 merger = FFmpegMergerPP(self)
63ad4d43 2334 if self.params.get('allow_unplayable_formats'):
2335 self.report_warning(
2336 'You have requested merging of multiple formats '
2337 'while also allowing unplayable formats to be downloaded. '
2338 'The formats won\'t be merged to prevent data corruption.')
2339 elif not merger.available:
2340 self.report_warning(
2341 'You have requested merging of multiple formats but ffmpeg is not installed. '
2342 'The formats won\'t be merged.')
81cd954a
S
2343
2344 def compatible_formats(formats):
d03cfdce 2345 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
2346 video_formats = [format for format in formats if format.get('vcodec') != 'none']
2347 audio_formats = [format for format in formats if format.get('acodec') != 'none']
2348 if len(video_formats) > 2 or len(audio_formats) > 2:
2349 return False
2350
81cd954a 2351 # Check extension
d03cfdce 2352 exts = set(format.get('ext') for format in formats)
2353 COMPATIBLE_EXTS = (
2354 set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
2355 set(('webm',)),
2356 )
2357 for ext_sets in COMPATIBLE_EXTS:
2358 if ext_sets.issuperset(exts):
2359 return True
81cd954a
S
2360 # TODO: Check acodec/vcodec
2361 return False
2362
2363 requested_formats = info_dict['requested_formats']
0202b52a 2364 old_ext = info_dict['ext']
4d971a16 2365 if self.params.get('merge_output_format') is None:
2366 if not compatible_formats(requested_formats):
2367 info_dict['ext'] = 'mkv'
2368 self.report_warning(
2369 'Requested formats are incompatible for merge and will be merged into mkv.')
2370 if (info_dict['ext'] == 'webm'
2371 and self.params.get('writethumbnail', False)
2372 and info_dict.get('thumbnails')):
2373 info_dict['ext'] = 'mkv'
2374 self.report_warning(
2375 'webm doesn\'t support embedding a thumbnail, mkv will be used.')
0202b52a 2376
2377 def correct_ext(filename):
2378 filename_real_ext = os.path.splitext(filename)[1][1:]
2379 filename_wo_ext = (
2380 os.path.splitext(filename)[0]
2381 if filename_real_ext == old_ext
2382 else filename)
2383 return '%s.%s' % (filename_wo_ext, info_dict['ext'])
2384
38c6902b 2385 # Ensure filename always has a correct extension for successful merge
0202b52a 2386 full_filename = correct_ext(full_filename)
2387 temp_filename = correct_ext(temp_filename)
2388 dl_filename = existing_file(full_filename, temp_filename)
1ea24129 2389 info_dict['__real_download'] = False
0202b52a 2390 if dl_filename is None:
81cd954a 2391 for f in requested_formats:
5b5fbc08
JMF
2392 new_info = dict(info_dict)
2393 new_info.update(f)
c5c9bf0c 2394 fname = prepend_extension(
de6000d9 2395 self.prepare_filename(new_info, 'temp'),
c5c9bf0c 2396 'f%s' % f['format_id'], new_info['ext'])
e92caff5 2397 if not self._ensure_dir_exists(fname):
c5c9bf0c 2398 return
5b5fbc08 2399 downloaded.append(fname)
a9e7f546 2400 partial_success, real_download = dl(fname, new_info)
1ea24129 2401 info_dict['__real_download'] = info_dict['__real_download'] or real_download
5b5fbc08 2402 success = success and partial_success
63ad4d43 2403 if merger.available and not self.params.get('allow_unplayable_formats'):
efabc161 2404 info_dict['__postprocessors'].append(merger)
1ea24129 2405 info_dict['__files_to_merge'] = downloaded
2406 # Even if there were no downloads, it is being merged only now
2407 info_dict['__real_download'] = True
42bb0c59 2408 else:
2409 for file in downloaded:
2410 files_to_move[file] = None
4340deca
P
2411 else:
2412 # Just a single file
0202b52a 2413 dl_filename = existing_file(full_filename, temp_filename)
2414 if dl_filename is None:
2415 success, real_download = dl(temp_filename, info_dict)
2416 info_dict['__real_download'] = real_download
2417
0202b52a 2418 dl_filename = dl_filename or temp_filename
c571435f 2419 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 2420
4340deca 2421 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
7960b056 2422 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca
P
2423 return
2424 except (OSError, IOError) as err:
2425 raise UnavailableVideoError(err)
2426 except (ContentTooShortError, ) as err:
2427 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
2428 return
8222d8de 2429
de6000d9 2430 if success and full_filename != '-':
6271f1ca 2431 # Fixup content
62cd676c
PH
2432 fixup_policy = self.params.get('fixup')
2433 if fixup_policy is None:
2434 fixup_policy = 'detect_or_warn'
2435
e4172ac9 2436 INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg to fix this automatically.'
d1e4a464 2437
6271f1ca
PH
2438 stretched_ratio = info_dict.get('stretched_ratio')
2439 if stretched_ratio is not None and stretched_ratio != 1:
6271f1ca
PH
2440 if fixup_policy == 'warn':
2441 self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
2442 info_dict['id'], stretched_ratio))
2443 elif fixup_policy == 'detect_or_warn':
2444 stretched_pp = FFmpegFixupStretchedPP(self)
2445 if stretched_pp.available:
6271f1ca
PH
2446 info_dict['__postprocessors'].append(stretched_pp)
2447 else:
2448 self.report_warning(
d1e4a464
S
2449 '%s: Non-uniform pixel ratio (%s). %s'
2450 % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
6271f1ca 2451 else:
62cd676c
PH
2452 assert fixup_policy in ('ignore', 'never')
2453
3089bc74 2454 if (info_dict.get('requested_formats') is None
6b591b29 2455 and info_dict.get('container') == 'm4a_dash'
2456 and info_dict.get('ext') == 'm4a'):
62cd676c 2457 if fixup_policy == 'warn':
d1e4a464
S
2458 self.report_warning(
2459 '%s: writing DASH m4a. '
2460 'Only some players support this container.'
2461 % info_dict['id'])
62cd676c
PH
2462 elif fixup_policy == 'detect_or_warn':
2463 fixup_pp = FFmpegFixupM4aPP(self)
2464 if fixup_pp.available:
62cd676c
PH
2465 info_dict['__postprocessors'].append(fixup_pp)
2466 else:
2467 self.report_warning(
d1e4a464
S
2468 '%s: writing DASH m4a. '
2469 'Only some players support this container. %s'
2470 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
62cd676c
PH
2471 else:
2472 assert fixup_policy in ('ignore', 'never')
6271f1ca 2473
0a473f2f 2474 if ('protocol' in info_dict
2475 and get_suitable_downloader(info_dict, self.params).__name__ == 'HlsFD'):
f17f8651 2476 if fixup_policy == 'warn':
a02682fd 2477 self.report_warning('%s: malformed AAC bitstream detected.' % (
f17f8651 2478 info_dict['id']))
2479 elif fixup_policy == 'detect_or_warn':
2480 fixup_pp = FFmpegFixupM3u8PP(self)
2481 if fixup_pp.available:
f17f8651 2482 info_dict['__postprocessors'].append(fixup_pp)
2483 else:
2484 self.report_warning(
a02682fd 2485 '%s: malformed AAC bitstream detected. %s'
d1e4a464 2486 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
f17f8651 2487 else:
2488 assert fixup_policy in ('ignore', 'never')
2489
8222d8de 2490 try:
23c1a667 2491 info_dict = self.post_process(dl_filename, info_dict, files_to_move)
af819c21 2492 except PostProcessingError as err:
2493 self.report_error('Postprocessing: %s' % str(err))
8222d8de 2494 return
ab8e5e51
AM
2495 try:
2496 for ph in self._post_hooks:
23c1a667 2497 ph(info_dict['filepath'])
ab8e5e51
AM
2498 except Exception as err:
2499 self.report_error('post hooks: %s' % str(err))
2500 return
2d30509f 2501 must_record_download_archive = True
2502
2503 if must_record_download_archive or self.params.get('force_write_download_archive', False):
2504 self.record_download_archive(info_dict)
c3e6ffba 2505 max_downloads = self.params.get('max_downloads')
2506 if max_downloads is not None and self._num_downloads >= int(max_downloads):
2507 raise MaxDownloadsReached()
8222d8de
JMF
2508
2509 def download(self, url_list):
2510 """Download a given list of URLs."""
de6000d9 2511 outtmpl = self.outtmpl_dict['default']
3089bc74
S
2512 if (len(url_list) > 1
2513 and outtmpl != '-'
2514 and '%' not in outtmpl
2515 and self.params.get('max_downloads') != 1):
acd69589 2516 raise SameFileError(outtmpl)
8222d8de
JMF
2517
2518 for url in url_list:
2519 try:
5f6a1245 2520 # It also downloads the videos
61aa5ba3
S
2521 res = self.extract_info(
2522 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de 2523 except UnavailableVideoError:
6febd1c1 2524 self.report_error('unable to download video')
8222d8de 2525 except MaxDownloadsReached:
8b0d7497 2526 self.to_screen('[info] Maximum number of downloaded files reached')
2527 raise
2528 except ExistingVideoReached:
d83cb531 2529 self.to_screen('[info] Encountered a file that is already in the archive, stopping due to --break-on-existing')
8b0d7497 2530 raise
2531 except RejectedVideoReached:
d83cb531 2532 self.to_screen('[info] Encountered a file that did not match filter, stopping due to --break-on-reject')
8222d8de 2533 raise
63e0be34
PH
2534 else:
2535 if self.params.get('dump_single_json', False):
277d6ff5 2536 self.post_extract(res)
75d43ca0 2537 self.to_stdout(json.dumps(res, default=repr))
8222d8de
JMF
2538
2539 return self._download_retcode
2540
1dcc4c0c 2541 def download_with_info_file(self, info_filename):
31bd3925
JMF
2542 with contextlib.closing(fileinput.FileInput(
2543 [info_filename], mode='r',
2544 openhook=fileinput.hook_encoded('utf-8'))) as f:
2545 # FileInput doesn't have a read method, we can't call json.load
498f5606 2546 info = self.filter_requested_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
d4943898
JMF
2547 try:
2548 self.process_ie_result(info, download=True)
498f5606 2549 except (DownloadError, EntryNotInPlaylist):
d4943898
JMF
2550 webpage_url = info.get('webpage_url')
2551 if webpage_url is not None:
6febd1c1 2552 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
d4943898
JMF
2553 return self.download([webpage_url])
2554 else:
2555 raise
2556 return self._download_retcode
1dcc4c0c 2557
cb202fd2 2558 @staticmethod
75d43ca0 2559 def filter_requested_info(info_dict, actually_filter=True):
2560 if not actually_filter:
394dcd44 2561 info_dict['epoch'] = int(time.time())
75d43ca0 2562 return info_dict
5226731e 2563 exceptions = {
498f5606 2564 'remove': ['requested_formats', 'requested_subtitles', 'requested_entries', 'filepath', 'entries'],
5226731e 2565 'keep': ['_type'],
2566 }
2567 keep_key = lambda k: k in exceptions['keep'] or not (k.startswith('_') or k in exceptions['remove'])
2568 filter_fn = lambda obj: (
a515a78d 2569 list(map(filter_fn, obj)) if isinstance(obj, (list, tuple))
2570 else obj if not isinstance(obj, dict)
2571 else dict((k, filter_fn(v)) for k, v in obj.items() if keep_key(k)))
5226731e 2572 return filter_fn(info_dict)
cb202fd2 2573
dcf64d43 2574 def run_pp(self, pp, infodict):
5bfa4862 2575 files_to_delete = []
dcf64d43 2576 if '__files_to_move' not in infodict:
2577 infodict['__files_to_move'] = {}
af819c21 2578 files_to_delete, infodict = pp.run(infodict)
5bfa4862 2579 if not files_to_delete:
dcf64d43 2580 return infodict
5bfa4862 2581
2582 if self.params.get('keepvideo', False):
2583 for f in files_to_delete:
dcf64d43 2584 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 2585 else:
2586 for old_filename in set(files_to_delete):
2587 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2588 try:
2589 os.remove(encodeFilename(old_filename))
2590 except (IOError, OSError):
2591 self.report_warning('Unable to remove downloaded original file')
dcf64d43 2592 if old_filename in infodict['__files_to_move']:
2593 del infodict['__files_to_move'][old_filename]
2594 return infodict
5bfa4862 2595
277d6ff5 2596 @staticmethod
2597 def post_extract(info_dict):
2598 def actual_post_extract(info_dict):
2599 if info_dict.get('_type') in ('playlist', 'multi_video'):
2600 for video_dict in info_dict.get('entries', {}):
b050d210 2601 actual_post_extract(video_dict or {})
277d6ff5 2602 return
2603
2604 if '__post_extractor' not in info_dict:
2605 return
2606 post_extractor = info_dict['__post_extractor']
2607 if post_extractor:
2608 info_dict.update(post_extractor().items())
2609 del info_dict['__post_extractor']
2610 return
2611
b050d210 2612 actual_post_extract(info_dict or {})
277d6ff5 2613
5bfa4862 2614 def pre_process(self, ie_info):
2615 info = dict(ie_info)
2616 for pp in self._pps['beforedl']:
dcf64d43 2617 info = self.run_pp(pp, info)
5bfa4862 2618 return info
2619
dcf64d43 2620 def post_process(self, filename, ie_info, files_to_move=None):
8222d8de
JMF
2621 """Run all the postprocessors on the given file."""
2622 info = dict(ie_info)
2623 info['filepath'] = filename
dcf64d43 2624 info['__files_to_move'] = files_to_move or {}
0202b52a 2625
5bfa4862 2626 for pp in ie_info.get('__postprocessors', []) + self._pps['normal']:
dcf64d43 2627 info = self.run_pp(pp, info)
2628 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
2629 del info['__files_to_move']
5bfa4862 2630 for pp in self._pps['aftermove']:
dcf64d43 2631 info = self.run_pp(pp, info)
23c1a667 2632 return info
c1c9a79c 2633
5db07df6 2634 def _make_archive_id(self, info_dict):
e9fef7ee
S
2635 video_id = info_dict.get('id')
2636 if not video_id:
2637 return
5db07df6
PH
2638 # Future-proof against any change in case
2639 # and backwards compatibility with prior versions
e9fef7ee 2640 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 2641 if extractor is None:
1211bb6d
S
2642 url = str_or_none(info_dict.get('url'))
2643 if not url:
2644 return
e9fef7ee
S
2645 # Try to find matching extractor for the URL and take its ie_key
2646 for ie in self._ies:
1211bb6d 2647 if ie.suitable(url):
e9fef7ee
S
2648 extractor = ie.ie_key()
2649 break
2650 else:
2651 return
d0757229 2652 return '%s %s' % (extractor.lower(), video_id)
5db07df6
PH
2653
2654 def in_download_archive(self, info_dict):
2655 fn = self.params.get('download_archive')
2656 if fn is None:
2657 return False
2658
2659 vid_id = self._make_archive_id(info_dict)
e9fef7ee 2660 if not vid_id:
7012b23c 2661 return False # Incomplete video information
5db07df6 2662
a45e8619 2663 return vid_id in self.archive
c1c9a79c
PH
2664
2665 def record_download_archive(self, info_dict):
2666 fn = self.params.get('download_archive')
2667 if fn is None:
2668 return
5db07df6
PH
2669 vid_id = self._make_archive_id(info_dict)
2670 assert vid_id
c1c9a79c 2671 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
6febd1c1 2672 archive_file.write(vid_id + '\n')
a45e8619 2673 self.archive.add(vid_id)
dd82ffea 2674
8c51aa65 2675 @staticmethod
8abeeb94 2676 def format_resolution(format, default='unknown'):
fb04e403
PH
2677 if format.get('vcodec') == 'none':
2678 return 'audio only'
f49d89ee
PH
2679 if format.get('resolution') is not None:
2680 return format['resolution']
35615307
DA
2681 if format.get('width') and format.get('height'):
2682 res = '%dx%d' % (format['width'], format['height'])
2683 elif format.get('height'):
2684 res = '%sp' % format['height']
2685 elif format.get('width'):
388ae76b 2686 res = '%dx?' % format['width']
8c51aa65 2687 else:
8abeeb94 2688 res = default
8c51aa65
JMF
2689 return res
2690
c57f7757
PH
2691 def _format_note(self, fdict):
2692 res = ''
2693 if fdict.get('ext') in ['f4f', 'f4m']:
2694 res += '(unsupported) '
32f90364
PH
2695 if fdict.get('language'):
2696 if res:
2697 res += ' '
9016d76f 2698 res += '[%s] ' % fdict['language']
c57f7757
PH
2699 if fdict.get('format_note') is not None:
2700 res += fdict['format_note'] + ' '
2701 if fdict.get('tbr') is not None:
2702 res += '%4dk ' % fdict['tbr']
2703 if fdict.get('container') is not None:
2704 if res:
2705 res += ', '
2706 res += '%s container' % fdict['container']
3089bc74
S
2707 if (fdict.get('vcodec') is not None
2708 and fdict.get('vcodec') != 'none'):
c57f7757
PH
2709 if res:
2710 res += ', '
2711 res += fdict['vcodec']
91c7271a 2712 if fdict.get('vbr') is not None:
c57f7757
PH
2713 res += '@'
2714 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
2715 res += 'video@'
2716 if fdict.get('vbr') is not None:
2717 res += '%4dk' % fdict['vbr']
fbb21cf5 2718 if fdict.get('fps') is not None:
5d583bdf
S
2719 if res:
2720 res += ', '
2721 res += '%sfps' % fdict['fps']
c57f7757
PH
2722 if fdict.get('acodec') is not None:
2723 if res:
2724 res += ', '
2725 if fdict['acodec'] == 'none':
2726 res += 'video only'
2727 else:
2728 res += '%-5s' % fdict['acodec']
2729 elif fdict.get('abr') is not None:
2730 if res:
2731 res += ', '
2732 res += 'audio'
2733 if fdict.get('abr') is not None:
2734 res += '@%3dk' % fdict['abr']
2735 if fdict.get('asr') is not None:
2736 res += ' (%5dHz)' % fdict['asr']
2737 if fdict.get('filesize') is not None:
2738 if res:
2739 res += ', '
2740 res += format_bytes(fdict['filesize'])
9732d77e
PH
2741 elif fdict.get('filesize_approx') is not None:
2742 if res:
2743 res += ', '
2744 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 2745 return res
91c7271a 2746
76d321f6 2747 def _format_note_table(self, f):
2748 def join_fields(*vargs):
2749 return ', '.join((val for val in vargs if val != ''))
2750
2751 return join_fields(
2752 'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
2753 format_field(f, 'language', '[%s]'),
2754 format_field(f, 'format_note'),
2755 format_field(f, 'container', ignore=(None, f.get('ext'))),
2756 format_field(f, 'asr', '%5dHz'))
2757
c57f7757 2758 def list_formats(self, info_dict):
94badb25 2759 formats = info_dict.get('formats', [info_dict])
76d321f6 2760 new_format = self.params.get('listformats_table', False)
2761 if new_format:
2762 table = [
2763 [
2764 format_field(f, 'format_id'),
2765 format_field(f, 'ext'),
2766 self.format_resolution(f),
2767 format_field(f, 'fps', '%d'),
2768 '|',
2769 format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
2770 format_field(f, 'tbr', '%4dk'),
fb198a8a 2771 f.get('protocol').replace('http_dash_segments', 'dash').replace("native", "n").replace('niconico_', ''),
76d321f6 2772 '|',
2773 format_field(f, 'vcodec', default='unknown').replace('none', ''),
2774 format_field(f, 'vbr', '%4dk'),
2775 format_field(f, 'acodec', default='unknown').replace('none', ''),
2776 format_field(f, 'abr', '%3dk'),
2777 format_field(f, 'asr', '%5dHz'),
2778 self._format_note_table(f)]
2779 for f in formats
2780 if f.get('preference') is None or f['preference'] >= -1000]
2781 header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', ' TBR', 'PROTO',
2782 '|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'NOTE']
2783 else:
2784 table = [
2785 [
2786 format_field(f, 'format_id'),
2787 format_field(f, 'ext'),
2788 self.format_resolution(f),
2789 self._format_note(f)]
2790 for f in formats
2791 if f.get('preference') is None or f['preference'] >= -1000]
2792 header_line = ['format code', 'extension', 'resolution', 'note']
57dd9a8f 2793
cfb56d1a 2794 self.to_screen(
76d321f6 2795 '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(
2796 header_line,
2797 table,
2798 delim=new_format,
2799 extraGap=(0 if new_format else 1),
2800 hideEmpty=new_format)))
cfb56d1a
PH
2801
2802 def list_thumbnails(self, info_dict):
2803 thumbnails = info_dict.get('thumbnails')
2804 if not thumbnails:
b7b72db9 2805 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
2806 return
cfb56d1a
PH
2807
2808 self.to_screen(
2809 '[info] Thumbnails for %s:' % info_dict['id'])
2810 self.to_screen(render_table(
2811 ['ID', 'width', 'height', 'URL'],
2812 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
dca08720 2813
360e1ca5 2814 def list_subtitles(self, video_id, subtitles, name='subtitles'):
a504ced0 2815 if not subtitles:
360e1ca5 2816 self.to_screen('%s has no %s' % (video_id, name))
a504ced0 2817 return
a504ced0 2818 self.to_screen(
edab9dbf
JMF
2819 'Available %s for %s:' % (name, video_id))
2820 self.to_screen(render_table(
2821 ['Language', 'formats'],
2822 [[lang, ', '.join(f['ext'] for f in reversed(formats))]
2823 for lang, formats in subtitles.items()]))
a504ced0 2824
dca08720
PH
2825 def urlopen(self, req):
2826 """ Start an HTTP download """
82d8a8b6 2827 if isinstance(req, compat_basestring):
67dda517 2828 req = sanitized_Request(req)
19a41fc6 2829 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
2830
2831 def print_debug_header(self):
2832 if not self.params.get('verbose'):
2833 return
62fec3b2 2834
4192b51c 2835 if type('') is not compat_str:
067aa17e 2836 # Python 2.6 on SLES11 SP1 (https://github.com/ytdl-org/youtube-dl/issues/3326)
4192b51c
PH
2837 self.report_warning(
2838 'Your Python is broken! Update to a newer and supported version')
2839
c6afed48
PH
2840 stdout_encoding = getattr(
2841 sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
b0472057 2842 encoding_str = (
734f90bb
PH
2843 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
2844 locale.getpreferredencoding(),
2845 sys.getfilesystemencoding(),
c6afed48 2846 stdout_encoding,
b0472057 2847 self.get_encoding()))
4192b51c 2848 write_string(encoding_str, encoding=None)
734f90bb 2849
e5813e53 2850 source = (
2851 '(exe)' if hasattr(sys, 'frozen')
2852 else '(zip)' if isinstance(globals().get('__loader__'), zipimporter)
2853 else '(source)' if os.path.basename(sys.argv[0]) == '__main__.py'
2854 else '')
2855 self._write_string('[debug] yt-dlp version %s %s\n' % (__version__, source))
e0986e31 2856 if _LAZY_LOADER:
f74980cb 2857 self._write_string('[debug] Lazy loading extractors enabled\n')
2858 if _PLUGIN_CLASSES:
2859 self._write_string(
2860 '[debug] Plugin Extractors: %s\n' % [ie.ie_key() for ie in _PLUGIN_CLASSES])
dca08720
PH
2861 try:
2862 sp = subprocess.Popen(
2863 ['git', 'rev-parse', '--short', 'HEAD'],
2864 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2865 cwd=os.path.dirname(os.path.abspath(__file__)))
f5b1bca9 2866 out, err = process_communicate_or_kill(sp)
dca08720
PH
2867 out = out.decode().strip()
2868 if re.match('[0-9a-f]+', out):
f74980cb 2869 self._write_string('[debug] Git HEAD: %s\n' % out)
70a1165b 2870 except Exception:
dca08720
PH
2871 try:
2872 sys.exc_clear()
70a1165b 2873 except Exception:
dca08720 2874 pass
b300cda4
S
2875
2876 def python_implementation():
2877 impl_name = platform.python_implementation()
2878 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2879 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
2880 return impl_name
2881
e5813e53 2882 self._write_string('[debug] Python version %s (%s %s) - %s\n' % (
2883 platform.python_version(),
2884 python_implementation(),
2885 platform.architecture()[0],
b300cda4 2886 platform_name()))
d28b5171 2887
73fac4e9 2888 exe_versions = FFmpegPostProcessor.get_versions(self)
4c83c967 2889 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 2890 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171
PH
2891 exe_str = ', '.join(
2892 '%s %s' % (exe, v)
2893 for exe, v in sorted(exe_versions.items())
2894 if v
2895 )
2896 if not exe_str:
2897 exe_str = 'none'
2898 self._write_string('[debug] exe versions: %s\n' % exe_str)
dca08720
PH
2899
2900 proxy_map = {}
2901 for handler in self._opener.handlers:
2902 if hasattr(handler, 'proxies'):
2903 proxy_map.update(handler.proxies)
734f90bb 2904 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
dca08720 2905
58b1f00d
PH
2906 if self.params.get('call_home', False):
2907 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
2908 self._write_string('[debug] Public IP address: %s\n' % ipaddr)
f5546c0b 2909 return
58b1f00d
PH
2910 latest_version = self.urlopen(
2911 'https://yt-dl.org/latest/version').read().decode('utf-8')
2912 if version_tuple(latest_version) > version_tuple(__version__):
2913 self.report_warning(
2914 'You are using an outdated version (newest version: %s)! '
2915 'See https://yt-dl.org/update if you need help updating.' %
2916 latest_version)
2917
e344693b 2918 def _setup_opener(self):
6ad14cab 2919 timeout_val = self.params.get('socket_timeout')
19a41fc6 2920 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
6ad14cab 2921
dca08720
PH
2922 opts_cookiefile = self.params.get('cookiefile')
2923 opts_proxy = self.params.get('proxy')
2924
2925 if opts_cookiefile is None:
2926 self.cookiejar = compat_cookiejar.CookieJar()
2927 else:
590bc6f6 2928 opts_cookiefile = expand_path(opts_cookiefile)
1bab3437 2929 self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
dca08720 2930 if os.access(opts_cookiefile, os.R_OK):
1d88b3e6 2931 self.cookiejar.load(ignore_discard=True, ignore_expires=True)
dca08720 2932
6a3f4c3f 2933 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
2934 if opts_proxy is not None:
2935 if opts_proxy == '':
2936 proxies = {}
2937 else:
2938 proxies = {'http': opts_proxy, 'https': opts_proxy}
2939 else:
2940 proxies = compat_urllib_request.getproxies()
067aa17e 2941 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
2942 if 'http' in proxies and 'https' not in proxies:
2943 proxies['https'] = proxies['http']
91410c9b 2944 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
2945
2946 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
2947 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
2948 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 2949 redirect_handler = YoutubeDLRedirectHandler()
8b172c2e 2950 data_handler = compat_urllib_request_DataHandler()
6240b0a2
JMF
2951
2952 # When passing our own FileHandler instance, build_opener won't add the
2953 # default FileHandler and allows us to disable the file protocol, which
2954 # can be used for malicious purposes (see
067aa17e 2955 # https://github.com/ytdl-org/youtube-dl/issues/8227)
6240b0a2
JMF
2956 file_handler = compat_urllib_request.FileHandler()
2957
2958 def file_open(*args, **kwargs):
7a5c1cfe 2959 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
6240b0a2
JMF
2960 file_handler.file_open = file_open
2961
2962 opener = compat_urllib_request.build_opener(
fca6dba8 2963 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 2964
dca08720
PH
2965 # Delete the default user-agent header, which would otherwise apply in
2966 # cases where our custom HTTP handler doesn't come into play
067aa17e 2967 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
2968 opener.addheaders = []
2969 self._opener = opener
62fec3b2
PH
2970
2971 def encode(self, s):
2972 if isinstance(s, bytes):
2973 return s # Already encoded
2974
2975 try:
2976 return s.encode(self.get_encoding())
2977 except UnicodeEncodeError as err:
2978 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
2979 raise
2980
2981 def get_encoding(self):
2982 encoding = self.params.get('encoding')
2983 if encoding is None:
2984 encoding = preferredencoding()
2985 return encoding
ec82d85a 2986
de6000d9 2987 def _write_thumbnails(self, info_dict, filename): # return the extensions
6c4fd172 2988 write_all = self.params.get('write_all_thumbnails', False)
2989 thumbnails = []
2990 if write_all or self.params.get('writethumbnail', False):
0202b52a 2991 thumbnails = info_dict.get('thumbnails') or []
6c4fd172 2992 multiple = write_all and len(thumbnails) > 1
ec82d85a 2993
0202b52a 2994 ret = []
6c4fd172 2995 for t in thumbnails[::1 if write_all else -1]:
ec82d85a 2996 thumb_ext = determine_ext(t['url'], 'jpg')
6c4fd172 2997 suffix = '%s.' % t['id'] if multiple else ''
2998 thumb_display_id = '%s ' % t['id'] if multiple else ''
dcf64d43 2999 t['filepath'] = thumb_filename = replace_extension(filename, suffix + thumb_ext, info_dict.get('ext'))
ec82d85a 3000
0c3d0f51 3001 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(thumb_filename)):
de6000d9 3002 ret.append(suffix + thumb_ext)
ec82d85a
PH
3003 self.to_screen('[%s] %s: Thumbnail %sis already present' %
3004 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3005 else:
5ef7d9bd 3006 self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
ec82d85a
PH
3007 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3008 try:
3009 uf = self.urlopen(t['url'])
d3d89c32 3010 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 3011 shutil.copyfileobj(uf, thumbf)
de6000d9 3012 ret.append(suffix + thumb_ext)
ec82d85a
PH
3013 self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
3014 (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
3015 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
3016 self.report_warning('Unable to download thumbnail "%s": %s' %
9b9c5355 3017 (t['url'], error_to_compat_str(err)))
6c4fd172 3018 if ret and not write_all:
3019 break
0202b52a 3020 return ret