]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
Fix some typos and linter
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
8222d8de 1#!/usr/bin/env python
dcdb292f 2# coding: utf-8
8222d8de 3
6febd1c1 4from __future__ import absolute_import, unicode_literals
8222d8de 5
26e63931 6import collections
31bd3925 7import contextlib
317f7ab6 8import copy
9d2ecdbc 9import datetime
c1c9a79c 10import errno
31bd3925 11import fileinput
8222d8de 12import io
b82f815f 13import itertools
8694c600 14import json
62fec3b2 15import locale
083c9df9 16import operator
8222d8de 17import os
dca08720 18import platform
8222d8de
JMF
19import re
20import shutil
dca08720 21import subprocess
8222d8de
JMF
22import socket
23import sys
24import time
67134eab 25import tokenize
8222d8de 26import traceback
75822ca7 27import random
8222d8de 28
961ea474 29from string import ascii_letters
e5813e53 30from zipimport import zipimporter
961ea474 31
8c25f81b 32from .compat import (
82d8a8b6 33 compat_basestring,
dca08720 34 compat_cookiejar,
003c69a8 35 compat_get_terminal_size,
ce02ed60 36 compat_http_client,
4f026faf 37 compat_kwargs,
d0d9ade4 38 compat_numeric_types,
e9c0cdd3 39 compat_os_name,
ce02ed60 40 compat_str,
67134eab 41 compat_tokenize_tokenize,
ce02ed60
PH
42 compat_urllib_error,
43 compat_urllib_request,
8b172c2e 44 compat_urllib_request_DataHandler,
8c25f81b
PH
45)
46from .utils import (
eedb7ba5
S
47 age_restricted,
48 args_to_str,
ce02ed60
PH
49 ContentTooShortError,
50 date_from_str,
51 DateRange,
acd69589 52 DEFAULT_OUTTMPL,
de6000d9 53 OUTTMPL_TYPES,
ce02ed60 54 determine_ext,
b5559424 55 determine_protocol,
732044af 56 DOT_DESKTOP_LINK_TEMPLATE,
57 DOT_URL_LINK_TEMPLATE,
58 DOT_WEBLOC_LINK_TEMPLATE,
ce02ed60 59 DownloadError,
c0384f22 60 encode_compat_str,
ce02ed60 61 encodeFilename,
9b9c5355 62 error_to_compat_str,
498f5606 63 EntryNotInPlaylist,
8b0d7497 64 ExistingVideoReached,
590bc6f6 65 expand_path,
ce02ed60 66 ExtractorError,
e29663c6 67 float_or_none,
02dbf93f 68 format_bytes,
76d321f6 69 format_field,
143db31d 70 FORMAT_RE,
525ef922 71 formatSeconds,
773f291d 72 GeoRestrictedError,
c9969434 73 int_or_none,
732044af 74 iri_to_uri,
773f291d 75 ISO3166Utils,
ce02ed60 76 locked_file,
0202b52a 77 make_dir,
dca08720 78 make_HTTPS_handler,
ce02ed60 79 MaxDownloadsReached,
cd6fc19e 80 orderedSet,
b7ab0590 81 PagedList,
083c9df9 82 parse_filesize,
91410c9b 83 PerRequestProxyHandler,
dca08720 84 platform_name,
eedb7ba5 85 PostProcessingError,
ce02ed60 86 preferredencoding,
eedb7ba5 87 prepend_extension,
51fb4995 88 register_socks_protocols,
cfb56d1a 89 render_table,
eedb7ba5 90 replace_extension,
8b0d7497 91 RejectedVideoReached,
ce02ed60
PH
92 SameFileError,
93 sanitize_filename,
1bb5c511 94 sanitize_path,
dcf77cf1 95 sanitize_url,
67dda517 96 sanitized_Request,
e5660ee6 97 std_headers,
1211bb6d 98 str_or_none,
e29663c6 99 strftime_or_none,
ce02ed60 100 subtitles_filename,
732044af 101 to_high_limit_path,
ce02ed60 102 UnavailableVideoError,
29eb5174 103 url_basename,
58b1f00d 104 version_tuple,
ce02ed60
PH
105 write_json_file,
106 write_string,
1bab3437 107 YoutubeDLCookieJar,
6a3f4c3f 108 YoutubeDLCookieProcessor,
dca08720 109 YoutubeDLHandler,
fca6dba8 110 YoutubeDLRedirectHandler,
f5b1bca9 111 process_communicate_or_kill,
ce02ed60 112)
a0e07d31 113from .cache import Cache
f74980cb 114from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER, _PLUGIN_CLASSES
4c54b89e 115from .extractor.openload import PhantomJSwrapper
3bc2ddcc 116from .downloader import get_suitable_downloader
4c83c967 117from .downloader.rtmp import rtmpdump_version
4f026faf 118from .postprocessor import (
f17f8651 119 FFmpegFixupM3u8PP,
62cd676c 120 FFmpegFixupM4aPP,
6271f1ca 121 FFmpegFixupStretchedPP,
4f026faf
PH
122 FFmpegMergerPP,
123 FFmpegPostProcessor,
0202b52a 124 # FFmpegSubtitlesConvertorPP,
4f026faf 125 get_postprocessor,
0202b52a 126 MoveFilesAfterDownloadPP,
4f026faf 127)
dca08720 128from .version import __version__
8222d8de 129
e9c0cdd3
YCH
130if compat_os_name == 'nt':
131 import ctypes
132
2459b6e1 133
8222d8de
JMF
134class YoutubeDL(object):
135 """YoutubeDL class.
136
137 YoutubeDL objects are the ones responsible of downloading the
138 actual video file and writing it to disk if the user has requested
139 it, among some other tasks. In most cases there should be one per
140 program. As, given a video URL, the downloader doesn't know how to
141 extract all the needed information, task that InfoExtractors do, it
142 has to pass the URL to one of them.
143
144 For this, YoutubeDL objects have a method that allows
145 InfoExtractors to be registered in a given order. When it is passed
146 a URL, the YoutubeDL object handles it to the first InfoExtractor it
147 finds that reports being able to handle it. The InfoExtractor extracts
148 all the information about the video or videos the URL refers to, and
149 YoutubeDL process the extracted information, possibly using a File
150 Downloader to download the video.
151
152 YoutubeDL objects accept a lot of parameters. In order not to saturate
153 the object constructor with arguments, it receives a dictionary of
154 options instead. These options are available through the params
155 attribute for the InfoExtractors to use. The YoutubeDL also
156 registers itself as the downloader in charge for the InfoExtractors
157 that are added to it, so this is a "mutual registration".
158
159 Available options:
160
161 username: Username for authentication purposes.
162 password: Password for authentication purposes.
180940e0 163 videopassword: Password for accessing a video.
1da50aa3
S
164 ap_mso: Adobe Pass multiple-system operator identifier.
165 ap_username: Multiple-system operator account username.
166 ap_password: Multiple-system operator account password.
8222d8de
JMF
167 usenetrc: Use netrc for authentication instead.
168 verbose: Print additional info to stdout.
169 quiet: Do not print messages to stdout.
ad8915b7 170 no_warnings: Do not print out anything for warnings.
8222d8de
JMF
171 forceurl: Force printing final URL.
172 forcetitle: Force printing title.
173 forceid: Force printing ID.
174 forcethumbnail: Force printing thumbnail URL.
175 forcedescription: Force printing description.
176 forcefilename: Force printing final filename.
525ef922 177 forceduration: Force printing duration.
8694c600 178 forcejson: Force printing info_dict as JSON.
63e0be34
PH
179 dump_single_json: Force printing the info_dict of the whole playlist
180 (or video) as a single JSON line.
c25228e5 181 force_write_download_archive: Force writing download archive regardless
182 of 'skip_download' or 'simulate'.
8222d8de 183 simulate: Do not download the video files.
eb8a4433 184 format: Video format code. see "FORMAT SELECTION" for more details.
63ad4d43 185 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
c25228e5 186 format_sort: How to sort the video formats. see "Sorting Formats"
187 for more details.
188 format_sort_force: Force the given format_sort. see "Sorting Formats"
189 for more details.
190 allow_multiple_video_streams: Allow multiple video streams to be merged
191 into a single file
192 allow_multiple_audio_streams: Allow multiple audio streams to be merged
193 into a single file
4524baf0 194 paths: Dictionary of output paths. The allowed keys are 'home'
195 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 196 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 197 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
198 A string a also accepted for backward compatibility
a820dc72
RA
199 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
200 restrictfilenames: Do not allow "&" and spaces in file names
201 trim_file_name: Limit length of filename (extension excluded)
4524baf0 202 windowsfilenames: Force the filenames to be windows compatible
a820dc72 203 ignoreerrors: Do not stop on download errors
7a5c1cfe 204 (Default True when running yt-dlp,
a820dc72 205 but False when directly accessing YoutubeDL class)
d22dec74 206 force_generic_extractor: Force downloader to use the generic extractor
0c3d0f51 207 overwrites: Overwrite all video and metadata files if True,
208 overwrite only non-video files if None
209 and don't overwrite any file if False
8222d8de
JMF
210 playliststart: Playlist item to start at.
211 playlistend: Playlist item to end at.
c14e88f0 212 playlist_items: Specific indices of playlist to download.
ff815fe6 213 playlistreverse: Download playlist items in reverse order.
75822ca7 214 playlistrandom: Download playlist items in random order.
8222d8de
JMF
215 matchtitle: Download only matching titles.
216 rejecttitle: Reject downloads for matching titles.
8bf9319e 217 logger: Log messages to a logging.Logger instance.
8222d8de
JMF
218 logtostderr: Log messages to stderr instead of stdout.
219 writedescription: Write the video description to a .description file
220 writeinfojson: Write the video description to a .info.json file
75d43ca0 221 clean_infojson: Remove private fields from the infojson
06167fbb 222 writecomments: Extract video comments. This will not be written to disk
223 unless writeinfojson is also given
1fb07d10 224 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 225 writethumbnail: Write the thumbnail image to a file
c25228e5 226 allow_playlist_files: Whether to write playlists' description, infojson etc
227 also to disk when using the 'write*' options
ec82d85a 228 write_all_thumbnails: Write all thumbnail formats to files
732044af 229 writelink: Write an internet shortcut file, depending on the
230 current platform (.url/.webloc/.desktop)
231 writeurllink: Write a Windows internet shortcut file (.url)
232 writewebloclink: Write a macOS internet shortcut file (.webloc)
233 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 234 writesubtitles: Write the video subtitles to a file
741dd8ea 235 writeautomaticsub: Write the automatically generated subtitles to a file
8222d8de 236 allsubtitles: Downloads all the subtitles of the video
0b7f3118 237 (requires writesubtitles or writeautomaticsub)
8222d8de 238 listsubtitles: Lists all available subtitles for the video
a504ced0 239 subtitlesformat: The format code for subtitles
aa6a10c4 240 subtitleslangs: List of languages of the subtitles to download
8222d8de
JMF
241 keepvideo: Keep the video file after post-processing
242 daterange: A DateRange object, download only if the upload_date is in the range.
243 skip_download: Skip the actual download of the video file
c35f9e72 244 cachedir: Location of the cache files in the filesystem.
a0e07d31 245 False to disable filesystem cache.
47192f92 246 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
247 age_limit: An integer representing the user's age in years.
248 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
249 min_views: An integer representing the minimum view count the video
250 must have in order to not be skipped.
251 Videos without view count information are always
252 downloaded. None for no limit.
253 max_views: An integer representing the maximum view count.
254 Videos that are more popular than that are not
255 downloaded.
256 Videos without view count information are always
257 downloaded. None for no limit.
258 download_archive: File name of a file where all downloads are recorded.
c1c9a79c
PH
259 Videos already present in the file are not downloaded
260 again.
8a51f564 261 break_on_existing: Stop the download process after attempting to download a
262 file that is in the archive.
263 break_on_reject: Stop the download process when encountering a video that
264 has been filtered out.
265 cookiefile: File name where cookies should be read from and dumped to
a1ee09e8 266 nocheckcertificate:Do not verify SSL certificates
7e8c0af0
PH
267 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
268 At the moment, this is only supported by YouTube.
a1ee09e8 269 proxy: URL of the proxy server to use
38cce791 270 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 271 on geo-restricted sites.
e344693b 272 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
273 bidi_workaround: Work around buggy terminals without bidirectional text
274 support, using fridibi
a0ddb8a2 275 debug_printtraffic:Print out sent and received HTTP traffic
7b0817e8 276 include_ads: Download ads as well
04b4d394
PH
277 default_search: Prepend this string if an input url is not valid.
278 'auto' for elaborate guessing
62fec3b2 279 encoding: Use this encoding instead of the system-specified.
e8ee972c 280 extract_flat: Do not resolve URLs, return the immediate result.
057a5206
PH
281 Pass in 'in_playlist' to only show this behavior for
282 playlist items.
4f026faf 283 postprocessors: A list of dictionaries, each with an entry
71b640cc 284 * key: The name of the postprocessor. See
7a5c1cfe 285 yt_dlp/postprocessor/__init__.py for a list.
0202b52a 286 * _after_move: Optional. If True, run this post_processor
287 after 'MoveFilesAfterDownload'
4f026faf
PH
288 as well as any further keyword arguments for the
289 postprocessor.
ab8e5e51
AM
290 post_hooks: A list of functions that get called as the final step
291 for each video file, after all postprocessors have been
292 called. The filename will be passed as the only argument.
71b640cc
PH
293 progress_hooks: A list of functions that get called on download
294 progress, with a dictionary with the entries
5cda4eda 295 * status: One of "downloading", "error", or "finished".
ee69b99a 296 Check this first and ignore unknown values.
71b640cc 297
5cda4eda 298 If status is one of "downloading", or "finished", the
ee69b99a
PH
299 following properties may also be present:
300 * filename: The final filename (always present)
5cda4eda 301 * tmpfilename: The filename we're currently writing to
71b640cc
PH
302 * downloaded_bytes: Bytes on disk
303 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
304 * total_bytes_estimate: Guess of the eventual file size,
305 None if unavailable.
306 * elapsed: The number of seconds since download started.
71b640cc
PH
307 * eta: The estimated time in seconds, None if unknown
308 * speed: The download speed in bytes/second, None if
309 unknown
5cda4eda
PH
310 * fragment_index: The counter of the currently
311 downloaded video fragment.
312 * fragment_count: The number of fragments (= individual
313 files that will be merged)
71b640cc
PH
314
315 Progress hooks are guaranteed to be called at least once
316 (with status "finished") if the download is successful.
45598f15 317 merge_output_format: Extension to use when merging formats.
6b591b29 318 final_ext: Expected final extension; used to detect when the file was
319 already downloaded and converted. "merge_output_format" is
320 replaced by this extension when given
6271f1ca
PH
321 fixup: Automatically correct known faults of the file.
322 One of:
323 - "never": do nothing
324 - "warn": only emit a warning
325 - "detect_or_warn": check whether we can do anything
62cd676c 326 about it, warn otherwise (default)
504f20dd 327 source_address: Client-side IP address to bind to.
6ec6cb4e 328 call_home: Boolean, true iff we are allowed to contact the
7a5c1cfe 329 yt-dlp servers for debugging. (BROKEN)
1cf376f5 330 sleep_interval_requests: Number of seconds to sleep between requests
331 during extraction
7aa589a5
S
332 sleep_interval: Number of seconds to sleep before each download when
333 used alone or a lower bound of a range for randomized
334 sleep before each download (minimum possible number
335 of seconds to sleep) when used along with
336 max_sleep_interval.
337 max_sleep_interval:Upper bound of a range for randomized sleep before each
338 download (maximum possible number of seconds to sleep).
339 Must only be used along with sleep_interval.
340 Actual sleep time will be a random float from range
341 [sleep_interval; max_sleep_interval].
1cf376f5 342 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
343 listformats: Print an overview of available video formats and exit.
344 list_thumbnails: Print a table of all thumbnails and exit.
347de493
PH
345 match_filter: A function that gets called with the info_dict of
346 every video.
347 If it returns a message, the video is ignored.
348 If it returns None, the video is downloaded.
349 match_filter_func in utils.py is one example for this.
7e5db8c9 350 no_color: Do not emit color codes in output.
0a840f58 351 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 352 HTTP header
0a840f58 353 geo_bypass_country:
773f291d
S
354 Two-letter ISO 3166-2 country code that will be used for
355 explicit geographic restriction bypassing via faking
504f20dd 356 X-Forwarded-For HTTP header
5f95927a
S
357 geo_bypass_ip_block:
358 IP range in CIDR notation that will be used similarly to
504f20dd 359 geo_bypass_country
71b640cc 360
85729c51
PH
361 The following options determine which downloader is picked:
362 external_downloader: Executable of the external downloader to call.
363 None or unset for standard (built-in) downloader.
bf09af3a
S
364 hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
365 if True, otherwise use ffmpeg/avconv if False, otherwise
366 use downloader suggested by extractor if None.
fe7e0c98 367
8222d8de 368 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 369 the downloader (see yt_dlp/downloader/common.py):
8222d8de 370 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
881e6a1f 371 noresizebuffer, retries, continuedl, noprogress, consoletitle,
b54d4a5c 372 xattr_set_filesize, external_downloader_args, hls_use_mpegts,
e409895f 373 http_chunk_size.
76b1bd67
JMF
374
375 The following options are used by the post processors:
d4a24f40 376 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
e4172ac9 377 otherwise prefer ffmpeg. (avconv support is deprecated)
c0b7d117
S
378 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
379 to the binary or its containing directory.
43820c03 380 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
381 and a list of additional command-line arguments for the
382 postprocessor/executable. The dict can also have "PP+EXE" keys
383 which are used when the given exe is used by the given PP.
384 Use 'default' as the name for arguments to passed to all PP
e409895f 385
386 The following options are used by the extractors:
62bff2c1 387 extractor_retries: Number of times to retry for known errors
388 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 389 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 390 discontinuities such as ad breaks (default: False)
3600fd59 391 youtube_include_dash_manifest: If True (default), DASH manifests and related
62bff2c1 392 data will be downloaded and processed by extractor.
393 You can reduce network I/O by disabling it if you don't
394 care about DASH. (only for youtube)
e409895f 395 youtube_include_hls_manifest: If True (default), HLS manifests and related
62bff2c1 396 data will be downloaded and processed by extractor.
397 You can reduce network I/O by disabling it if you don't
398 care about HLS. (only for youtube)
8222d8de
JMF
399 """
400
c9969434
S
401 _NUMERIC_FIELDS = set((
402 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
403 'timestamp', 'upload_year', 'upload_month', 'upload_day',
404 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
405 'average_rating', 'comment_count', 'age_limit',
406 'start_time', 'end_time',
407 'chapter_number', 'season_number', 'episode_number',
408 'track_number', 'disc_number', 'release_year',
409 'playlist_index',
410 ))
411
8222d8de
JMF
412 params = None
413 _ies = []
5bfa4862 414 _pps = {'beforedl': [], 'aftermove': [], 'normal': []}
0202b52a 415 __prepare_filename_warned = False
1cf376f5 416 _first_webpage_request = True
8222d8de
JMF
417 _download_retcode = None
418 _num_downloads = None
30a074c2 419 _playlist_level = 0
420 _playlist_urls = set()
8222d8de
JMF
421 _screen_file = None
422
3511266b 423 def __init__(self, params=None, auto_init=True):
8222d8de 424 """Create a FileDownloader object with the given options."""
e9f9a10f
JMF
425 if params is None:
426 params = {}
8222d8de 427 self._ies = []
56c73665 428 self._ies_instances = {}
5bfa4862 429 self._pps = {'beforedl': [], 'aftermove': [], 'normal': []}
0202b52a 430 self.__prepare_filename_warned = False
1cf376f5 431 self._first_webpage_request = True
ab8e5e51 432 self._post_hooks = []
933605d7 433 self._progress_hooks = []
8222d8de
JMF
434 self._download_retcode = 0
435 self._num_downloads = 0
436 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
0783b09b 437 self._err_file = sys.stderr
4abf617b
S
438 self.params = {
439 # Default parameters
440 'nocheckcertificate': False,
441 }
442 self.params.update(params)
a0e07d31 443 self.cache = Cache(self)
a45e8619 444 self.archive = set()
ecdec191
JB
445
446 """Preload the archive, if any is specified"""
447 def preload_download_archive(self):
448 fn = self.params.get('download_archive')
449 if fn is None:
450 return False
451 try:
452 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
453 for line in archive_file:
a45e8619 454 self.archive.add(line.strip())
ecdec191
JB
455 except IOError as ioe:
456 if ioe.errno != errno.ENOENT:
457 raise
1d74d8d9 458 return False
ecdec191 459 return True
34308b30 460
be5df5ee
S
461 def check_deprecated(param, option, suggestion):
462 if self.params.get(param) is not None:
463 self.report_warning(
464 '%s is deprecated. Use %s instead.' % (option, suggestion))
465 return True
466 return False
467
1de7ea76
JB
468 if self.params.get('verbose'):
469 self.to_stdout('[debug] Loading archive file %r' % self.params.get('download_archive'))
470
ecdec191
JB
471 preload_download_archive(self)
472
be5df5ee 473 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
474 if self.params.get('geo_verification_proxy') is None:
475 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
476
6b591b29 477 if self.params.get('final_ext'):
478 if self.params.get('merge_output_format'):
479 self.report_warning('--merge-output-format will be ignored since --remux-video or --recode-video is given')
480 self.params['merge_output_format'] = self.params['final_ext']
481
b9d973be 482 if 'overwrites' in self.params and self.params['overwrites'] is None:
483 del self.params['overwrites']
484
be5df5ee
S
485 check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
486 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
487 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
488
0783b09b 489 if params.get('bidi_workaround', False):
1c088fa8
PH
490 try:
491 import pty
492 master, slave = pty.openpty()
003c69a8 493 width = compat_get_terminal_size().columns
1c088fa8
PH
494 if width is None:
495 width_args = []
496 else:
497 width_args = ['-w', str(width)]
5d681e96 498 sp_kwargs = dict(
1c088fa8
PH
499 stdin=subprocess.PIPE,
500 stdout=slave,
501 stderr=self._err_file)
5d681e96
PH
502 try:
503 self._output_process = subprocess.Popen(
504 ['bidiv'] + width_args, **sp_kwargs
505 )
506 except OSError:
5d681e96
PH
507 self._output_process = subprocess.Popen(
508 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
509 self._output_channel = os.fdopen(master, 'rb')
1c088fa8 510 except OSError as ose:
66e7ace1 511 if ose.errno == errno.ENOENT:
6febd1c1 512 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
1c088fa8
PH
513 else:
514 raise
0783b09b 515
3089bc74
S
516 if (sys.platform != 'win32'
517 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
518 and not params.get('restrictfilenames', False)):
e9137224 519 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 520 self.report_warning(
6febd1c1 521 'Assuming --restrict-filenames since file system encoding '
1b725173 522 'cannot encode all characters. '
6febd1c1 523 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 524 self.params['restrictfilenames'] = True
34308b30 525
de6000d9 526 self.outtmpl_dict = self.parse_outtmpl()
486dd09e 527
dca08720
PH
528 self._setup_opener()
529
3511266b
PH
530 if auto_init:
531 self.print_debug_header()
532 self.add_default_info_extractors()
533
4f026faf
PH
534 for pp_def_raw in self.params.get('postprocessors', []):
535 pp_class = get_postprocessor(pp_def_raw['key'])
536 pp_def = dict(pp_def_raw)
537 del pp_def['key']
5bfa4862 538 if 'when' in pp_def:
539 when = pp_def['when']
540 del pp_def['when']
541 else:
542 when = 'normal'
4f026faf 543 pp = pp_class(self, **compat_kwargs(pp_def))
5bfa4862 544 self.add_post_processor(pp, when=when)
4f026faf 545
ab8e5e51
AM
546 for ph in self.params.get('post_hooks', []):
547 self.add_post_hook(ph)
548
71b640cc
PH
549 for ph in self.params.get('progress_hooks', []):
550 self.add_progress_hook(ph)
551
51fb4995
YCH
552 register_socks_protocols()
553
7d4111ed
PH
554 def warn_if_short_id(self, argv):
555 # short YouTube ID starting with dash?
556 idxs = [
557 i for i, a in enumerate(argv)
558 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
559 if idxs:
560 correct_argv = (
7a5c1cfe 561 ['yt-dlp']
3089bc74
S
562 + [a for i, a in enumerate(argv) if i not in idxs]
563 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
564 )
565 self.report_warning(
566 'Long argument string detected. '
567 'Use -- to separate parameters and URLs, like this:\n%s\n' %
568 args_to_str(correct_argv))
569
8222d8de
JMF
570 def add_info_extractor(self, ie):
571 """Add an InfoExtractor object to the end of the list."""
572 self._ies.append(ie)
e52d7f85
JMF
573 if not isinstance(ie, type):
574 self._ies_instances[ie.ie_key()] = ie
575 ie.set_downloader(self)
8222d8de 576
56c73665
JMF
577 def get_info_extractor(self, ie_key):
578 """
579 Get an instance of an IE with name ie_key, it will try to get one from
580 the _ies list, if there's no instance it will create a new one and add
581 it to the extractor list.
582 """
583 ie = self._ies_instances.get(ie_key)
584 if ie is None:
585 ie = get_info_extractor(ie_key)()
586 self.add_info_extractor(ie)
587 return ie
588
023fa8c4
JMF
589 def add_default_info_extractors(self):
590 """
591 Add the InfoExtractors returned by gen_extractors to the end of the list
592 """
e52d7f85 593 for ie in gen_extractor_classes():
023fa8c4
JMF
594 self.add_info_extractor(ie)
595
5bfa4862 596 def add_post_processor(self, pp, when='normal'):
8222d8de 597 """Add a PostProcessor object to the end of the chain."""
5bfa4862 598 self._pps[when].append(pp)
8222d8de
JMF
599 pp.set_downloader(self)
600
ab8e5e51
AM
601 def add_post_hook(self, ph):
602 """Add the post hook"""
603 self._post_hooks.append(ph)
604
933605d7
JMF
605 def add_progress_hook(self, ph):
606 """Add the progress hook (currently only for the file downloader)"""
607 self._progress_hooks.append(ph)
8ab470f1 608
1c088fa8 609 def _bidi_workaround(self, message):
5d681e96 610 if not hasattr(self, '_output_channel'):
1c088fa8
PH
611 return message
612
5d681e96 613 assert hasattr(self, '_output_process')
11b85ce6 614 assert isinstance(message, compat_str)
6febd1c1
PH
615 line_count = message.count('\n') + 1
616 self._output_process.stdin.write((message + '\n').encode('utf-8'))
5d681e96 617 self._output_process.stdin.flush()
6febd1c1 618 res = ''.join(self._output_channel.readline().decode('utf-8')
9e1a5b84 619 for _ in range(line_count))
6febd1c1 620 return res[:-len('\n')]
1c088fa8 621
8222d8de 622 def to_screen(self, message, skip_eol=False):
0783b09b
PH
623 """Print message to stdout if not in quiet mode."""
624 return self.to_stdout(message, skip_eol, check_quiet=True)
625
734f90bb 626 def _write_string(self, s, out=None):
b58ddb32 627 write_string(s, out=out, encoding=self.params.get('encoding'))
734f90bb 628
0783b09b 629 def to_stdout(self, message, skip_eol=False, check_quiet=False):
8222d8de 630 """Print message to stdout if not in quiet mode."""
8bf9319e 631 if self.params.get('logger'):
43afe285 632 self.params['logger'].debug(message)
0783b09b 633 elif not check_quiet or not self.params.get('quiet', False):
1c088fa8 634 message = self._bidi_workaround(message)
6febd1c1 635 terminator = ['\n', ''][skip_eol]
8222d8de 636 output = message + terminator
1c088fa8 637
734f90bb 638 self._write_string(output, self._screen_file)
8222d8de
JMF
639
640 def to_stderr(self, message):
641 """Print message to stderr."""
11b85ce6 642 assert isinstance(message, compat_str)
8bf9319e 643 if self.params.get('logger'):
43afe285
IB
644 self.params['logger'].error(message)
645 else:
1c088fa8 646 message = self._bidi_workaround(message)
6febd1c1 647 output = message + '\n'
734f90bb 648 self._write_string(output, self._err_file)
8222d8de 649
1e5b9a95
PH
650 def to_console_title(self, message):
651 if not self.params.get('consoletitle', False):
652 return
4bede0d8
C
653 if compat_os_name == 'nt':
654 if ctypes.windll.kernel32.GetConsoleWindow():
655 # c_wchar_p() might not be necessary if `message` is
656 # already of type unicode()
657 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
1e5b9a95 658 elif 'TERM' in os.environ:
b46696bd 659 self._write_string('\033]0;%s\007' % message, self._screen_file)
1e5b9a95 660
bdde425c
PH
661 def save_console_title(self):
662 if not self.params.get('consoletitle', False):
663 return
94c3442e
S
664 if self.params.get('simulate', False):
665 return
4bede0d8 666 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 667 # Save the title on stack
734f90bb 668 self._write_string('\033[22;0t', self._screen_file)
bdde425c
PH
669
670 def restore_console_title(self):
671 if not self.params.get('consoletitle', False):
672 return
94c3442e
S
673 if self.params.get('simulate', False):
674 return
4bede0d8 675 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 676 # Restore the title from stack
734f90bb 677 self._write_string('\033[23;0t', self._screen_file)
bdde425c
PH
678
679 def __enter__(self):
680 self.save_console_title()
681 return self
682
683 def __exit__(self, *args):
684 self.restore_console_title()
f89197d7 685
dca08720 686 if self.params.get('cookiefile') is not None:
1bab3437 687 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 688
8222d8de
JMF
689 def trouble(self, message=None, tb=None):
690 """Determine action to take when a download problem appears.
691
692 Depending on if the downloader has been configured to ignore
693 download errors or not, this method may throw an exception or
694 not when errors are found, after printing the message.
695
696 tb, if given, is additional traceback information.
697 """
698 if message is not None:
699 self.to_stderr(message)
700 if self.params.get('verbose'):
701 if tb is None:
702 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 703 tb = ''
8222d8de 704 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 705 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 706 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
707 else:
708 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 709 tb = ''.join(tb_data)
8222d8de
JMF
710 self.to_stderr(tb)
711 if not self.params.get('ignoreerrors', False):
712 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
713 exc_info = sys.exc_info()[1].exc_info
714 else:
715 exc_info = sys.exc_info()
716 raise DownloadError(message, exc_info)
717 self._download_retcode = 1
718
719 def report_warning(self, message):
720 '''
721 Print the message to stderr, it will be prefixed with 'WARNING:'
722 If stderr is a tty file the 'WARNING:' will be colored
723 '''
6d07ce01
JMF
724 if self.params.get('logger') is not None:
725 self.params['logger'].warning(message)
8222d8de 726 else:
ad8915b7
PH
727 if self.params.get('no_warnings'):
728 return
e9c0cdd3 729 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6d07ce01
JMF
730 _msg_header = '\033[0;33mWARNING:\033[0m'
731 else:
732 _msg_header = 'WARNING:'
733 warning_message = '%s %s' % (_msg_header, message)
734 self.to_stderr(warning_message)
8222d8de
JMF
735
736 def report_error(self, message, tb=None):
737 '''
738 Do the same as trouble, but prefixes the message with 'ERROR:', colored
739 in red if stderr is a tty file.
740 '''
e9c0cdd3 741 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6febd1c1 742 _msg_header = '\033[0;31mERROR:\033[0m'
8222d8de 743 else:
6febd1c1
PH
744 _msg_header = 'ERROR:'
745 error_message = '%s %s' % (_msg_header, message)
8222d8de
JMF
746 self.trouble(error_message, tb)
747
8222d8de
JMF
748 def report_file_already_downloaded(self, file_name):
749 """Report file has already been fully downloaded."""
750 try:
6febd1c1 751 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 752 except UnicodeEncodeError:
6febd1c1 753 self.to_screen('[download] The file has already been downloaded')
8222d8de 754
0c3d0f51 755 def report_file_delete(self, file_name):
756 """Report that existing file will be deleted."""
757 try:
c25228e5 758 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 759 except UnicodeEncodeError:
c25228e5 760 self.to_screen('Deleting existing file')
0c3d0f51 761
de6000d9 762 def parse_outtmpl(self):
763 outtmpl_dict = self.params.get('outtmpl', {})
764 if not isinstance(outtmpl_dict, dict):
765 outtmpl_dict = {'default': outtmpl_dict}
766 outtmpl_dict.update({
767 k: v for k, v in DEFAULT_OUTTMPL.items()
768 if not outtmpl_dict.get(k)})
769 for key, val in outtmpl_dict.items():
770 if isinstance(val, bytes):
771 self.report_warning(
772 'Parameter outtmpl is bytes, but should be a unicode string. '
773 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
774 return outtmpl_dict
775
143db31d 776 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
777 """ Make the template and info_dict suitable for substitution (outtmpl % info_dict)"""
778 template_dict = dict(info_dict)
779
780 # duration_string
781 template_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
782 formatSeconds(info_dict['duration'], '-')
783 if info_dict.get('duration', None) is not None
784 else None)
785
786 # epoch
787 template_dict['epoch'] = int(time.time())
788
789 # autonumber
790 autonumber_size = self.params.get('autonumber_size')
791 if autonumber_size is None:
792 autonumber_size = 5
793 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
794
795 # resolution if not defined
796 if template_dict.get('resolution') is None:
797 if template_dict.get('width') and template_dict.get('height'):
798 template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
799 elif template_dict.get('height'):
800 template_dict['resolution'] = '%sp' % template_dict['height']
801 elif template_dict.get('width'):
802 template_dict['resolution'] = '%dx?' % template_dict['width']
803
804 if sanitize is None:
805 sanitize = lambda k, v: v
806 template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
807 for k, v in template_dict.items()
808 if v is not None and not isinstance(v, (list, tuple, dict)))
809 na = self.params.get('outtmpl_na_placeholder', 'NA')
810 template_dict = collections.defaultdict(lambda: na, template_dict)
811
812 # For fields playlist_index and autonumber convert all occurrences
813 # of %(field)s to %(field)0Nd for backward compatibility
814 field_size_compat_map = {
815 'playlist_index': len(str(template_dict['n_entries'])),
816 'autonumber': autonumber_size,
817 }
818 FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
819 mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
820 if mobj:
821 outtmpl = re.sub(
822 FIELD_SIZE_COMPAT_RE,
823 r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
824 outtmpl)
825
826 numeric_fields = list(self._NUMERIC_FIELDS)
827
828 # Format date
829 FORMAT_DATE_RE = FORMAT_RE.format(r'(?P<key>(?P<field>\w+)>(?P<format>.+?))')
830 for mobj in re.finditer(FORMAT_DATE_RE, outtmpl):
831 conv_type, field, frmt, key = mobj.group('type', 'field', 'format', 'key')
832 if key in template_dict:
833 continue
834 value = strftime_or_none(template_dict.get(field), frmt, na)
835 if conv_type in 'crs': # string
836 value = sanitize(field, value)
837 else: # number
838 numeric_fields.append(key)
839 value = float_or_none(value, default=None)
840 if value is not None:
841 template_dict[key] = value
842
843 # Missing numeric fields used together with integer presentation types
844 # in format specification will break the argument substitution since
845 # string NA placeholder is returned for missing fields. We will patch
846 # output template for missing fields to meet string presentation type.
847 for numeric_field in numeric_fields:
848 if numeric_field not in template_dict:
849 outtmpl = re.sub(
850 FORMAT_RE.format(re.escape(numeric_field)),
851 r'%({0})s'.format(numeric_field), outtmpl)
852
853 return outtmpl, template_dict
854
de6000d9 855 def _prepare_filename(self, info_dict, tmpl_type='default'):
8222d8de 856 try:
586a91b6 857 sanitize = lambda k, v: sanitize_filename(
45598aab 858 compat_str(v),
1bb5c511 859 restricted=self.params.get('restrictfilenames'),
40df485f 860 is_id=(k == 'id' or k.endswith('_id')))
de6000d9 861 outtmpl = self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default'])
143db31d 862 outtmpl, template_dict = self.prepare_outtmpl(outtmpl, info_dict, sanitize)
d0d9ade4 863
15da37c7
S
864 # expand_path translates '%%' into '%' and '$$' into '$'
865 # correspondingly that is not what we want since we need to keep
866 # '%%' intact for template dict substitution step. Working around
867 # with boundary-alike separator hack.
961ea474 868 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
15da37c7
S
869 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
870
871 # outtmpl should be expand_path'ed before template dict substitution
872 # because meta fields may contain env variables we don't want to
873 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
874 # title "Hello $PATH", we don't want `$PATH` to be expanded.
875 filename = expand_path(outtmpl).replace(sep, '') % template_dict
876
143db31d 877 force_ext = OUTTMPL_TYPES.get(tmpl_type)
de6000d9 878 if force_ext is not None:
879 filename = replace_extension(filename, force_ext, template_dict.get('ext'))
880
bdc3fd2f
U
881 # https://github.com/blackjack4494/youtube-dlc/issues/85
882 trim_file_name = self.params.get('trim_file_name', False)
883 if trim_file_name:
884 fn_groups = filename.rsplit('.')
885 ext = fn_groups[-1]
886 sub_ext = ''
887 if len(fn_groups) > 2:
888 sub_ext = fn_groups[-2]
889 filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
890
0202b52a 891 return filename
8222d8de 892 except ValueError as err:
6febd1c1 893 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
894 return None
895
de6000d9 896 def prepare_filename(self, info_dict, dir_type='', warn=False):
897 """Generate the output filename."""
0202b52a 898 paths = self.params.get('paths', {})
899 assert isinstance(paths, dict)
de6000d9 900 filename = self._prepare_filename(info_dict, dir_type or 'default')
901
902 if warn and not self.__prepare_filename_warned:
903 if not paths:
904 pass
905 elif filename == '-':
906 self.report_warning('--paths is ignored when an outputting to stdout')
907 elif os.path.isabs(filename):
908 self.report_warning('--paths is ignored since an absolute path is given in output template')
909 self.__prepare_filename_warned = True
910 if filename == '-' or not filename:
911 return filename
912
0202b52a 913 homepath = expand_path(paths.get('home', '').strip())
914 assert isinstance(homepath, compat_str)
915 subdir = expand_path(paths.get(dir_type, '').strip()) if dir_type else ''
916 assert isinstance(subdir, compat_str)
c2934512 917 path = os.path.join(homepath, subdir, filename)
918
919 # Temporary fix for #4787
920 # 'Treat' all problem characters by passing filename through preferredencoding
921 # to workaround encoding issues with subprocess on python2 @ Windows
922 if sys.version_info < (3, 0) and sys.platform == 'win32':
923 path = encodeFilename(path, True).decode(preferredencoding())
924 return sanitize_path(path, force=self.params.get('windowsfilenames'))
0202b52a 925
442c37b7 926 def _match_entry(self, info_dict, incomplete):
ecdec191 927 """ Returns None if the file should be downloaded """
8222d8de 928
8b0d7497 929 def check_filter():
930 video_title = info_dict.get('title', info_dict.get('id', 'video'))
931 if 'title' in info_dict:
932 # This can happen when we're just evaluating the playlist
933 title = info_dict['title']
934 matchtitle = self.params.get('matchtitle', False)
935 if matchtitle:
936 if not re.search(matchtitle, title, re.IGNORECASE):
937 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
938 rejecttitle = self.params.get('rejecttitle', False)
939 if rejecttitle:
940 if re.search(rejecttitle, title, re.IGNORECASE):
941 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
942 date = info_dict.get('upload_date')
943 if date is not None:
944 dateRange = self.params.get('daterange', DateRange())
945 if date not in dateRange:
946 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
947 view_count = info_dict.get('view_count')
948 if view_count is not None:
949 min_views = self.params.get('min_views')
950 if min_views is not None and view_count < min_views:
951 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
952 max_views = self.params.get('max_views')
953 if max_views is not None and view_count > max_views:
954 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
955 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
956 return 'Skipping "%s" because it is age restricted' % video_title
957 if self.in_download_archive(info_dict):
958 return '%s has already been recorded in archive' % video_title
959
960 if not incomplete:
961 match_filter = self.params.get('match_filter')
962 if match_filter is not None:
963 ret = match_filter(info_dict)
964 if ret is not None:
965 return ret
966 return None
967
968 reason = check_filter()
969 if reason is not None:
970 self.to_screen('[download] ' + reason)
d83cb531 971 if reason.endswith('has already been recorded in the archive') and self.params.get('break_on_existing', False):
8b0d7497 972 raise ExistingVideoReached()
d83cb531 973 elif self.params.get('break_on_reject', False):
8b0d7497 974 raise RejectedVideoReached()
975 return reason
fe7e0c98 976
b6c45014
JMF
977 @staticmethod
978 def add_extra_info(info_dict, extra_info):
979 '''Set the keys from extra_info in info dict if they are missing'''
980 for key, value in extra_info.items():
981 info_dict.setdefault(key, value)
982
0704d222 983 def extract_info(self, url, download=True, ie_key=None, info_dict=None, extra_info={},
61aa5ba3 984 process=True, force_generic_extractor=False):
8222d8de
JMF
985 '''
986 Returns a list with a dictionary for each video we find.
987 If 'download', also downloads the videos.
988 extra_info is a dict containing the extra values to add to each result
613b2d9d 989 '''
fe7e0c98 990
61aa5ba3 991 if not ie_key and force_generic_extractor:
d22dec74
S
992 ie_key = 'Generic'
993
8222d8de 994 if ie_key:
56c73665 995 ies = [self.get_info_extractor(ie_key)]
8222d8de
JMF
996 else:
997 ies = self._ies
998
999 for ie in ies:
1000 if not ie.suitable(url):
1001 continue
1002
9a68de12 1003 ie_key = ie.ie_key()
1004 ie = self.get_info_extractor(ie_key)
8222d8de 1005 if not ie.working():
6febd1c1
PH
1006 self.report_warning('The program functionality for this site has been marked as broken, '
1007 'and will probably not work.')
8222d8de
JMF
1008
1009 try:
d0757229 1010 temp_id = str_or_none(
63be1aab 1011 ie.extract_id(url) if callable(getattr(ie, 'extract_id', None))
1012 else ie._match_id(url))
a0566bbf 1013 except (AssertionError, IndexError, AttributeError):
1014 temp_id = None
1015 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1016 self.to_screen("[%s] %s: has already been recorded in archive" % (
1017 ie_key, temp_id))
1018 break
a0566bbf 1019 return self.__extract_info(url, ie, download, extra_info, process, info_dict)
a0566bbf 1020 else:
1021 self.report_error('no suitable InfoExtractor for URL %s' % url)
1022
1023 def __handle_extraction_exceptions(func):
1024 def wrapper(self, *args, **kwargs):
1025 try:
1026 return func(self, *args, **kwargs)
773f291d
S
1027 except GeoRestrictedError as e:
1028 msg = e.msg
1029 if e.countries:
1030 msg += '\nThis video is available in %s.' % ', '.join(
1031 map(ISO3166Utils.short2full, e.countries))
1032 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1033 self.report_error(msg)
fb043a6e 1034 except ExtractorError as e: # An error we somewhat expected
2c74e6fa 1035 self.report_error(compat_str(e), e.format_traceback())
8b0d7497 1036 except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
d3e5bbf4 1037 raise
8222d8de
JMF
1038 except Exception as e:
1039 if self.params.get('ignoreerrors', False):
9b9c5355 1040 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
8222d8de
JMF
1041 else:
1042 raise
a0566bbf 1043 return wrapper
1044
1045 @__handle_extraction_exceptions
1046 def __extract_info(self, url, ie, download, extra_info, process, info_dict):
1047 ie_result = ie.extract(url)
1048 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1049 return
1050 if isinstance(ie_result, list):
1051 # Backwards compatibility: old IE result format
1052 ie_result = {
1053 '_type': 'compat_list',
1054 'entries': ie_result,
1055 }
1056 if info_dict:
1057 if info_dict.get('id'):
1058 ie_result['id'] = info_dict['id']
1059 if info_dict.get('title'):
1060 ie_result['title'] = info_dict['title']
1061 self.add_default_extra_info(ie_result, ie, url)
1062 if process:
1063 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1064 else:
a0566bbf 1065 return ie_result
fe7e0c98 1066
ea38e55f
PH
1067 def add_default_extra_info(self, ie_result, ie, url):
1068 self.add_extra_info(ie_result, {
1069 'extractor': ie.IE_NAME,
1070 'webpage_url': url,
1071 'webpage_url_basename': url_basename(url),
1072 'extractor_key': ie.ie_key(),
1073 })
1074
8222d8de
JMF
1075 def process_ie_result(self, ie_result, download=True, extra_info={}):
1076 """
1077 Take the result of the ie(may be modified) and resolve all unresolved
1078 references (URLs, playlist items).
1079
1080 It will also download the videos if 'download'.
1081 Returns the resolved ie_result.
1082 """
e8ee972c
PH
1083 result_type = ie_result.get('_type', 'video')
1084
057a5206 1085 if result_type in ('url', 'url_transparent'):
134c6ea8 1086 ie_result['url'] = sanitize_url(ie_result['url'])
057a5206 1087 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1088 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1089 or extract_flat is True):
de6000d9 1090 self.__forced_printings(ie_result, self.prepare_filename(ie_result), incomplete=True)
e8ee972c
PH
1091 return ie_result
1092
8222d8de 1093 if result_type == 'video':
b6c45014 1094 self.add_extra_info(ie_result, extra_info)
feee2ecf 1095 return self.process_video_result(ie_result, download=download)
8222d8de
JMF
1096 elif result_type == 'url':
1097 # We have to add extra_info to the results because it may be
1098 # contained in a playlist
1099 return self.extract_info(ie_result['url'],
0704d222 1100 download, info_dict=ie_result,
8222d8de
JMF
1101 ie_key=ie_result.get('ie_key'),
1102 extra_info=extra_info)
7fc3fa05
PH
1103 elif result_type == 'url_transparent':
1104 # Use the information from the embedding page
1105 info = self.extract_info(
1106 ie_result['url'], ie_key=ie_result.get('ie_key'),
1107 extra_info=extra_info, download=False, process=False)
1108
1640eb09
S
1109 # extract_info may return None when ignoreerrors is enabled and
1110 # extraction failed with an error, don't crash and return early
1111 # in this case
1112 if not info:
1113 return info
1114
412c617d
PH
1115 force_properties = dict(
1116 (k, v) for k, v in ie_result.items() if v is not None)
0396806f 1117 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
412c617d
PH
1118 if f in force_properties:
1119 del force_properties[f]
1120 new_result = info.copy()
1121 new_result.update(force_properties)
7fc3fa05 1122
0563f7ac
S
1123 # Extracted info may not be a video result (i.e.
1124 # info.get('_type', 'video') != video) but rather an url or
1125 # url_transparent. In such cases outer metadata (from ie_result)
1126 # should be propagated to inner one (info). For this to happen
1127 # _type of info should be overridden with url_transparent. This
067aa17e 1128 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1129 if new_result.get('_type') == 'url':
1130 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1131
1132 return self.process_ie_result(
1133 new_result, download=download, extra_info=extra_info)
40fcba5e 1134 elif result_type in ('playlist', 'multi_video'):
30a074c2 1135 # Protect from infinite recursion due to recursively nested playlists
1136 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1137 webpage_url = ie_result['webpage_url']
1138 if webpage_url in self._playlist_urls:
7e85e872 1139 self.to_screen(
30a074c2 1140 '[download] Skipping already downloaded playlist: %s'
1141 % ie_result.get('title') or ie_result.get('id'))
1142 return
7e85e872 1143
30a074c2 1144 self._playlist_level += 1
1145 self._playlist_urls.add(webpage_url)
1146 try:
1147 return self.__process_playlist(ie_result, download)
1148 finally:
1149 self._playlist_level -= 1
1150 if not self._playlist_level:
1151 self._playlist_urls.clear()
8222d8de 1152 elif result_type == 'compat_list':
c9bf4114
PH
1153 self.report_warning(
1154 'Extractor %s returned a compat_list result. '
1155 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1156
8222d8de 1157 def _fixup(r):
9e1a5b84
JW
1158 self.add_extra_info(
1159 r,
9103bbc5
JMF
1160 {
1161 'extractor': ie_result['extractor'],
1162 'webpage_url': ie_result['webpage_url'],
29eb5174 1163 'webpage_url_basename': url_basename(ie_result['webpage_url']),
be97abc2 1164 'extractor_key': ie_result['extractor_key'],
9e1a5b84
JW
1165 }
1166 )
8222d8de
JMF
1167 return r
1168 ie_result['entries'] = [
b6c45014 1169 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1170 for r in ie_result['entries']
1171 ]
1172 return ie_result
1173 else:
1174 raise Exception('Invalid result type: %s' % result_type)
1175
e92caff5 1176 def _ensure_dir_exists(self, path):
1177 return make_dir(path, self.report_error)
1178
30a074c2 1179 def __process_playlist(self, ie_result, download):
1180 # We process each entry in the playlist
1181 playlist = ie_result.get('title') or ie_result.get('id')
1182 self.to_screen('[download] Downloading playlist: %s' % playlist)
1183
498f5606 1184 if 'entries' not in ie_result:
1185 raise EntryNotInPlaylist()
1186 incomplete_entries = bool(ie_result.get('requested_entries'))
1187 if incomplete_entries:
1188 def fill_missing_entries(entries, indexes):
1189 ret = [None] * max(*indexes)
1190 for i, entry in zip(indexes, entries):
1191 ret[i - 1] = entry
1192 return ret
1193 ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
02fd60d3 1194
30a074c2 1195 playlist_results = []
1196
1197 playliststart = self.params.get('playliststart', 1) - 1
1198 playlistend = self.params.get('playlistend')
1199 # For backwards compatibility, interpret -1 as whole list
1200 if playlistend == -1:
1201 playlistend = None
1202
1203 playlistitems_str = self.params.get('playlist_items')
1204 playlistitems = None
1205 if playlistitems_str is not None:
1206 def iter_playlistitems(format):
1207 for string_segment in format.split(','):
1208 if '-' in string_segment:
1209 start, end = string_segment.split('-')
1210 for item in range(int(start), int(end) + 1):
1211 yield int(item)
1212 else:
1213 yield int(string_segment)
1214 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
1215
1216 ie_entries = ie_result['entries']
1217
1218 def make_playlistitems_entries(list_ie_entries):
1219 num_entries = len(list_ie_entries)
498f5606 1220 for i in playlistitems:
1221 if -num_entries < i <= num_entries:
1222 yield list_ie_entries[i - 1]
1223 elif incomplete_entries:
1224 raise EntryNotInPlaylist()
30a074c2 1225
1226 if isinstance(ie_entries, list):
1227 n_all_entries = len(ie_entries)
1228 if playlistitems:
498f5606 1229 entries = list(make_playlistitems_entries(ie_entries))
30a074c2 1230 else:
1231 entries = ie_entries[playliststart:playlistend]
1232 n_entries = len(entries)
498f5606 1233 msg = 'Collected %d videos; downloading %d of them' % (n_all_entries, n_entries)
30a074c2 1234 elif isinstance(ie_entries, PagedList):
1235 if playlistitems:
1236 entries = []
1237 for item in playlistitems:
1238 entries.extend(ie_entries.getslice(
1239 item - 1, item
1240 ))
1241 else:
1242 entries = ie_entries.getslice(
1243 playliststart, playlistend)
1244 n_entries = len(entries)
498f5606 1245 msg = 'Downloading %d videos' % n_entries
30a074c2 1246 else: # iterable
1247 if playlistitems:
498f5606 1248 entries = list(make_playlistitems_entries(list(itertools.islice(
1249 ie_entries, 0, max(playlistitems)))))
30a074c2 1250 else:
1251 entries = list(itertools.islice(
1252 ie_entries, playliststart, playlistend))
1253 n_entries = len(entries)
498f5606 1254 msg = 'Downloading %d videos' % n_entries
1255
1256 if any((entry is None for entry in entries)):
1257 raise EntryNotInPlaylist()
1258 if not playlistitems and (playliststart or playlistend):
1259 playlistitems = list(range(1 + playliststart, 1 + playliststart + len(entries)))
1260 ie_result['entries'] = entries
1261 ie_result['requested_entries'] = playlistitems
1262
1263 if self.params.get('allow_playlist_files', True):
1264 ie_copy = {
1265 'playlist': playlist,
1266 'playlist_id': ie_result.get('id'),
1267 'playlist_title': ie_result.get('title'),
1268 'playlist_uploader': ie_result.get('uploader'),
1269 'playlist_uploader_id': ie_result.get('uploader_id'),
1270 'playlist_index': 0
1271 }
1272 ie_copy.update(dict(ie_result))
1273
1274 if self.params.get('writeinfojson', False):
1275 infofn = self.prepare_filename(ie_copy, 'pl_infojson')
1276 if not self._ensure_dir_exists(encodeFilename(infofn)):
1277 return
1278 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
1279 self.to_screen('[info] Playlist metadata is already present')
1280 else:
1281 self.to_screen('[info] Writing playlist metadata as JSON to: ' + infofn)
1282 try:
1283 write_json_file(self.filter_requested_info(ie_result, self.params.get('clean_infojson', True)), infofn)
1284 except (OSError, IOError):
1285 self.report_error('Cannot write playlist metadata to JSON file ' + infofn)
1286
1287 if self.params.get('writedescription', False):
1288 descfn = self.prepare_filename(ie_copy, 'pl_description')
1289 if not self._ensure_dir_exists(encodeFilename(descfn)):
1290 return
1291 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
1292 self.to_screen('[info] Playlist description is already present')
1293 elif ie_result.get('description') is None:
1294 self.report_warning('There\'s no playlist description to write.')
1295 else:
1296 try:
1297 self.to_screen('[info] Writing playlist description to: ' + descfn)
1298 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1299 descfile.write(ie_result['description'])
1300 except (OSError, IOError):
1301 self.report_error('Cannot write playlist description file ' + descfn)
1302 return
30a074c2 1303
1304 if self.params.get('playlistreverse', False):
1305 entries = entries[::-1]
30a074c2 1306 if self.params.get('playlistrandom', False):
1307 random.shuffle(entries)
1308
1309 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
1310
498f5606 1311 self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg))
30a074c2 1312 for i, entry in enumerate(entries, 1):
1313 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
1314 # This __x_forwarded_for_ip thing is a bit ugly but requires
1315 # minimal changes
1316 if x_forwarded_for:
1317 entry['__x_forwarded_for_ip'] = x_forwarded_for
1318 extra = {
1319 'n_entries': n_entries,
1320 'playlist': playlist,
1321 'playlist_id': ie_result.get('id'),
1322 'playlist_title': ie_result.get('title'),
1323 'playlist_uploader': ie_result.get('uploader'),
1324 'playlist_uploader_id': ie_result.get('uploader_id'),
498f5606 1325 'playlist_index': playlistitems[i - 1] if playlistitems else i,
30a074c2 1326 'extractor': ie_result['extractor'],
1327 'webpage_url': ie_result['webpage_url'],
1328 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1329 'extractor_key': ie_result['extractor_key'],
1330 }
1331
1332 if self._match_entry(entry, incomplete=True) is not None:
1333 continue
1334
1335 entry_result = self.__process_iterable_entry(entry, download, extra)
1336 # TODO: skip failed (empty) entries?
1337 playlist_results.append(entry_result)
1338 ie_result['entries'] = playlist_results
1339 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1340 return ie_result
1341
a0566bbf 1342 @__handle_extraction_exceptions
1343 def __process_iterable_entry(self, entry, download, extra_info):
1344 return self.process_ie_result(
1345 entry, download=download, extra_info=extra_info)
1346
67134eab
JMF
1347 def _build_format_filter(self, filter_spec):
1348 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1349
1350 OPERATORS = {
1351 '<': operator.lt,
1352 '<=': operator.le,
1353 '>': operator.gt,
1354 '>=': operator.ge,
1355 '=': operator.eq,
1356 '!=': operator.ne,
1357 }
67134eab 1358 operator_rex = re.compile(r'''(?x)\s*
a03a3c80 1359 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
083c9df9
PH
1360 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1361 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
67134eab 1362 $
083c9df9 1363 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
67134eab 1364 m = operator_rex.search(filter_spec)
9ddb6925
S
1365 if m:
1366 try:
1367 comparison_value = int(m.group('value'))
1368 except ValueError:
1369 comparison_value = parse_filesize(m.group('value'))
1370 if comparison_value is None:
1371 comparison_value = parse_filesize(m.group('value') + 'B')
1372 if comparison_value is None:
1373 raise ValueError(
1374 'Invalid value %r in format specification %r' % (
67134eab 1375 m.group('value'), filter_spec))
9ddb6925
S
1376 op = OPERATORS[m.group('op')]
1377
083c9df9 1378 if not m:
9ddb6925
S
1379 STR_OPERATORS = {
1380 '=': operator.eq,
10d33b34
YCH
1381 '^=': lambda attr, value: attr.startswith(value),
1382 '$=': lambda attr, value: attr.endswith(value),
1383 '*=': lambda attr, value: value in attr,
9ddb6925 1384 }
67134eab 1385 str_operator_rex = re.compile(r'''(?x)
f96bff99 1386 \s*(?P<key>[a-zA-Z0-9._-]+)
2cc779f4 1387 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
b0df5223 1388 \s*(?P<value>[a-zA-Z0-9._-]+)
67134eab 1389 \s*$
9ddb6925 1390 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
67134eab 1391 m = str_operator_rex.search(filter_spec)
9ddb6925
S
1392 if m:
1393 comparison_value = m.group('value')
2cc779f4
S
1394 str_op = STR_OPERATORS[m.group('op')]
1395 if m.group('negation'):
e118a879 1396 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1397 else:
1398 op = str_op
083c9df9 1399
9ddb6925 1400 if not m:
67134eab 1401 raise ValueError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1402
1403 def _filter(f):
1404 actual_value = f.get(m.group('key'))
1405 if actual_value is None:
1406 return m.group('none_inclusive')
1407 return op(actual_value, comparison_value)
67134eab
JMF
1408 return _filter
1409
0017d9ad 1410 def _default_format_spec(self, info_dict, download=True):
0017d9ad 1411
af0f7428
S
1412 def can_merge():
1413 merger = FFmpegMergerPP(self)
1414 return merger.available and merger.can_merge()
1415
91ebc640 1416 prefer_best = (
1417 not self.params.get('simulate', False)
1418 and download
1419 and (
1420 not can_merge()
19807826 1421 or info_dict.get('is_live', False)
de6000d9 1422 or self.outtmpl_dict['default'] == '-'))
91ebc640 1423
1424 return (
1425 'best/bestvideo+bestaudio'
1426 if prefer_best
1427 else 'bestvideo*+bestaudio/best'
19807826 1428 if not self.params.get('allow_multiple_audio_streams', False)
91ebc640 1429 else 'bestvideo+bestaudio/best')
0017d9ad 1430
67134eab
JMF
1431 def build_format_selector(self, format_spec):
1432 def syntax_error(note, start):
1433 message = (
1434 'Invalid format specification: '
1435 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1436 return SyntaxError(message)
1437
1438 PICKFIRST = 'PICKFIRST'
1439 MERGE = 'MERGE'
1440 SINGLE = 'SINGLE'
0130afb7 1441 GROUP = 'GROUP'
67134eab
JMF
1442 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1443
91ebc640 1444 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1445 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 1446
67134eab
JMF
1447 def _parse_filter(tokens):
1448 filter_parts = []
1449 for type, string, start, _, _ in tokens:
1450 if type == tokenize.OP and string == ']':
1451 return ''.join(filter_parts)
1452 else:
1453 filter_parts.append(string)
1454
232541df 1455 def _remove_unused_ops(tokens):
17cc1534 1456 # Remove operators that we don't use and join them with the surrounding strings
232541df
JMF
1457 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1458 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1459 last_string, last_start, last_end, last_line = None, None, None, None
1460 for type, string, start, end, line in tokens:
1461 if type == tokenize.OP and string == '[':
1462 if last_string:
1463 yield tokenize.NAME, last_string, last_start, last_end, last_line
1464 last_string = None
1465 yield type, string, start, end, line
1466 # everything inside brackets will be handled by _parse_filter
1467 for type, string, start, end, line in tokens:
1468 yield type, string, start, end, line
1469 if type == tokenize.OP and string == ']':
1470 break
1471 elif type == tokenize.OP and string in ALLOWED_OPS:
1472 if last_string:
1473 yield tokenize.NAME, last_string, last_start, last_end, last_line
1474 last_string = None
1475 yield type, string, start, end, line
1476 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1477 if not last_string:
1478 last_string = string
1479 last_start = start
1480 last_end = end
1481 else:
1482 last_string += string
1483 if last_string:
1484 yield tokenize.NAME, last_string, last_start, last_end, last_line
1485
cf2ac6df 1486 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
1487 selectors = []
1488 current_selector = None
1489 for type, string, start, _, _ in tokens:
1490 # ENCODING is only defined in python 3.x
1491 if type == getattr(tokenize, 'ENCODING', None):
1492 continue
1493 elif type in [tokenize.NAME, tokenize.NUMBER]:
1494 current_selector = FormatSelector(SINGLE, string, [])
1495 elif type == tokenize.OP:
cf2ac6df
JMF
1496 if string == ')':
1497 if not inside_group:
1498 # ')' will be handled by the parentheses group
1499 tokens.restore_last_token()
67134eab 1500 break
cf2ac6df 1501 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
1502 tokens.restore_last_token()
1503 break
cf2ac6df
JMF
1504 elif inside_choice and string == ',':
1505 tokens.restore_last_token()
1506 break
1507 elif string == ',':
0a31a350
JMF
1508 if not current_selector:
1509 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
1510 selectors.append(current_selector)
1511 current_selector = None
1512 elif string == '/':
d96d604e
JMF
1513 if not current_selector:
1514 raise syntax_error('"/" must follow a format selector', start)
67134eab 1515 first_choice = current_selector
cf2ac6df 1516 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 1517 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
1518 elif string == '[':
1519 if not current_selector:
1520 current_selector = FormatSelector(SINGLE, 'best', [])
1521 format_filter = _parse_filter(tokens)
1522 current_selector.filters.append(format_filter)
0130afb7
JMF
1523 elif string == '(':
1524 if current_selector:
1525 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
1526 group = _parse_format_selection(tokens, inside_group=True)
1527 current_selector = FormatSelector(GROUP, group, [])
67134eab 1528 elif string == '+':
d03cfdce 1529 if not current_selector:
1530 raise syntax_error('Unexpected "+"', start)
1531 selector_1 = current_selector
1532 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1533 if not selector_2:
1534 raise syntax_error('Expected a selector', start)
1535 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab
JMF
1536 else:
1537 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1538 elif type == tokenize.ENDMARKER:
1539 break
1540 if current_selector:
1541 selectors.append(current_selector)
1542 return selectors
1543
1544 def _build_selector_function(selector):
909d24dd 1545 if isinstance(selector, list): # ,
67134eab
JMF
1546 fs = [_build_selector_function(s) for s in selector]
1547
317f7ab6 1548 def selector_function(ctx):
67134eab 1549 for f in fs:
317f7ab6 1550 for format in f(ctx):
67134eab
JMF
1551 yield format
1552 return selector_function
909d24dd 1553
1554 elif selector.type == GROUP: # ()
0130afb7 1555 selector_function = _build_selector_function(selector.selector)
909d24dd 1556
1557 elif selector.type == PICKFIRST: # /
67134eab
JMF
1558 fs = [_build_selector_function(s) for s in selector.selector]
1559
317f7ab6 1560 def selector_function(ctx):
67134eab 1561 for f in fs:
317f7ab6 1562 picked_formats = list(f(ctx))
67134eab
JMF
1563 if picked_formats:
1564 return picked_formats
1565 return []
67134eab 1566
909d24dd 1567 elif selector.type == SINGLE: # atom
1568 format_spec = selector.selector if selector.selector is not None else 'best'
1569
1570 if format_spec == 'all':
1571 def selector_function(ctx):
1572 formats = list(ctx['formats'])
1573 if formats:
1574 for f in formats:
1575 yield f
1576
1577 else:
1578 format_fallback = False
1579 format_spec_obj = re.match(r'(best|worst|b|w)(video|audio|v|a)?(\*)?$', format_spec)
1580 if format_spec_obj is not None:
1581 format_idx = 0 if format_spec_obj.group(1)[0] == 'w' else -1
1582 format_type = format_spec_obj.group(2)[0] if format_spec_obj.group(2) else False
1583 not_format_type = 'v' if format_type == 'a' else 'a'
1584 format_modified = format_spec_obj.group(3) is not None
1585
1586 format_fallback = not format_type and not format_modified # for b, w
1587 filter_f = ((lambda f: f.get(format_type + 'codec') != 'none')
1588 if format_type and format_modified # bv*, ba*, wv*, wa*
1589 else (lambda f: f.get(not_format_type + 'codec') == 'none')
1590 if format_type # bv, ba, wv, wa
1591 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
1592 if not format_modified # b, w
1593 else None) # b*, w*
67134eab 1594 else:
909d24dd 1595 format_idx = -1
1596 filter_f = ((lambda f: f.get('ext') == format_spec)
1597 if format_spec in ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] # extension
1598 else (lambda f: f.get('format_id') == format_spec)) # id
1599
1600 def selector_function(ctx):
1601 formats = list(ctx['formats'])
1602 if not formats:
1603 return
1604 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
67134eab 1605 if matches:
909d24dd 1606 yield matches[format_idx]
1607 elif format_fallback == 'force' or (format_fallback and ctx['incomplete_formats']):
1608 # for extractors with incomplete formats (audio only (soundcloud)
1609 # or video only (imgur)) best/worst will fallback to
1610 # best/worst {video,audio}-only format
1611 yield formats[format_idx]
1612
1613 elif selector.type == MERGE: # +
d03cfdce 1614 def _merge(formats_pair):
1615 format_1, format_2 = formats_pair
1616
1617 formats_info = []
1618 formats_info.extend(format_1.get('requested_formats', (format_1,)))
1619 formats_info.extend(format_2.get('requested_formats', (format_2,)))
1620
909d24dd 1621 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
1622 get_no_more = {"video": False, "audio": False}
1623 for (i, fmt_info) in enumerate(formats_info):
1624 for aud_vid in ["audio", "video"]:
1625 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
1626 if get_no_more[aud_vid]:
1627 formats_info.pop(i)
1628 get_no_more[aud_vid] = True
1629
1630 if len(formats_info) == 1:
1631 return formats_info[0]
1632
d03cfdce 1633 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
1634 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
1635
1636 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
1637 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
1638
1639 output_ext = self.params.get('merge_output_format')
1640 if not output_ext:
1641 if the_only_video:
1642 output_ext = the_only_video['ext']
1643 elif the_only_audio and not video_fmts:
1644 output_ext = the_only_audio['ext']
1645 else:
1646 output_ext = 'mkv'
1647
1648 new_dict = {
67134eab 1649 'requested_formats': formats_info,
d03cfdce 1650 'format': '+'.join(fmt_info.get('format') for fmt_info in formats_info),
1651 'format_id': '+'.join(fmt_info.get('format_id') for fmt_info in formats_info),
67134eab
JMF
1652 'ext': output_ext,
1653 }
d03cfdce 1654
1655 if the_only_video:
1656 new_dict.update({
1657 'width': the_only_video.get('width'),
1658 'height': the_only_video.get('height'),
35615307 1659 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
d03cfdce 1660 'fps': the_only_video.get('fps'),
1661 'vcodec': the_only_video.get('vcodec'),
1662 'vbr': the_only_video.get('vbr'),
1663 'stretched_ratio': the_only_video.get('stretched_ratio'),
1664 })
1665
1666 if the_only_audio:
1667 new_dict.update({
1668 'acodec': the_only_audio.get('acodec'),
1669 'abr': the_only_audio.get('abr'),
1670 })
1671
1672 return new_dict
1673
1674 selector_1, selector_2 = map(_build_selector_function, selector.selector)
083c9df9 1675
317f7ab6
S
1676 def selector_function(ctx):
1677 for pair in itertools.product(
d03cfdce 1678 selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
67134eab 1679 yield _merge(pair)
083c9df9 1680
67134eab 1681 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 1682
317f7ab6
S
1683 def final_selector(ctx):
1684 ctx_copy = copy.deepcopy(ctx)
67134eab 1685 for _filter in filters:
317f7ab6
S
1686 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1687 return selector_function(ctx_copy)
67134eab 1688 return final_selector
083c9df9 1689
67134eab 1690 stream = io.BytesIO(format_spec.encode('utf-8'))
0130afb7 1691 try:
232541df 1692 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
0130afb7
JMF
1693 except tokenize.TokenError:
1694 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1695
1696 class TokenIterator(object):
1697 def __init__(self, tokens):
1698 self.tokens = tokens
1699 self.counter = 0
1700
1701 def __iter__(self):
1702 return self
1703
1704 def __next__(self):
1705 if self.counter >= len(self.tokens):
1706 raise StopIteration()
1707 value = self.tokens[self.counter]
1708 self.counter += 1
1709 return value
1710
1711 next = __next__
1712
1713 def restore_last_token(self):
1714 self.counter -= 1
1715
1716 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 1717 return _build_selector_function(parsed_selector)
a9c58ad9 1718
e5660ee6
JMF
1719 def _calc_headers(self, info_dict):
1720 res = std_headers.copy()
1721
1722 add_headers = info_dict.get('http_headers')
1723 if add_headers:
1724 res.update(add_headers)
1725
1726 cookies = self._calc_cookies(info_dict)
1727 if cookies:
1728 res['Cookie'] = cookies
1729
0016b84e
S
1730 if 'X-Forwarded-For' not in res:
1731 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
1732 if x_forwarded_for_ip:
1733 res['X-Forwarded-For'] = x_forwarded_for_ip
1734
e5660ee6
JMF
1735 return res
1736
1737 def _calc_cookies(self, info_dict):
5c2266df 1738 pr = sanitized_Request(info_dict['url'])
e5660ee6 1739 self.cookiejar.add_cookie_header(pr)
662435f7 1740 return pr.get_header('Cookie')
e5660ee6 1741
dd82ffea
JMF
1742 def process_video_result(self, info_dict, download=True):
1743 assert info_dict.get('_type', 'video') == 'video'
1744
bec1fad2
PH
1745 if 'id' not in info_dict:
1746 raise ExtractorError('Missing "id" field in extractor result')
1747 if 'title' not in info_dict:
1748 raise ExtractorError('Missing "title" field in extractor result')
1749
c9969434
S
1750 def report_force_conversion(field, field_not, conversion):
1751 self.report_warning(
1752 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
1753 % (field, field_not, conversion))
1754
1755 def sanitize_string_field(info, string_field):
1756 field = info.get(string_field)
1757 if field is None or isinstance(field, compat_str):
1758 return
1759 report_force_conversion(string_field, 'a string', 'string')
1760 info[string_field] = compat_str(field)
1761
1762 def sanitize_numeric_fields(info):
1763 for numeric_field in self._NUMERIC_FIELDS:
1764 field = info.get(numeric_field)
1765 if field is None or isinstance(field, compat_numeric_types):
1766 continue
1767 report_force_conversion(numeric_field, 'numeric', 'int')
1768 info[numeric_field] = int_or_none(field)
1769
1770 sanitize_string_field(info_dict, 'id')
1771 sanitize_numeric_fields(info_dict)
be6217b2 1772
dd82ffea
JMF
1773 if 'playlist' not in info_dict:
1774 # It isn't part of a playlist
1775 info_dict['playlist'] = None
1776 info_dict['playlist_index'] = None
1777
d5519808 1778 thumbnails = info_dict.get('thumbnails')
cfb56d1a
PH
1779 if thumbnails is None:
1780 thumbnail = info_dict.get('thumbnail')
1781 if thumbnail:
a7a14d95 1782 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
d5519808 1783 if thumbnails:
be6d7229 1784 thumbnails.sort(key=lambda t: (
d37708fc
RA
1785 t.get('preference') if t.get('preference') is not None else -1,
1786 t.get('width') if t.get('width') is not None else -1,
1787 t.get('height') if t.get('height') is not None else -1,
1788 t.get('id') if t.get('id') is not None else '', t.get('url')))
f6c24009 1789 for i, t in enumerate(thumbnails):
dcf77cf1 1790 t['url'] = sanitize_url(t['url'])
9603e8a7 1791 if t.get('width') and t.get('height'):
d5519808 1792 t['resolution'] = '%dx%d' % (t['width'], t['height'])
f6c24009
PH
1793 if t.get('id') is None:
1794 t['id'] = '%d' % i
d5519808 1795
b7b72db9 1796 if self.params.get('list_thumbnails'):
1797 self.list_thumbnails(info_dict)
1798 return
1799
536a55da
S
1800 thumbnail = info_dict.get('thumbnail')
1801 if thumbnail:
1802 info_dict['thumbnail'] = sanitize_url(thumbnail)
1803 elif thumbnails:
d5519808
PH
1804 info_dict['thumbnail'] = thumbnails[-1]['url']
1805
c9ae7b95 1806 if 'display_id' not in info_dict and 'id' in info_dict:
0afef30b
PH
1807 info_dict['display_id'] = info_dict['id']
1808
10db0d2f 1809 for ts_key, date_key in (
1810 ('timestamp', 'upload_date'),
1811 ('release_timestamp', 'release_date'),
1812 ):
1813 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
1814 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
1815 # see http://bugs.python.org/issue1646728)
1816 try:
1817 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
1818 info_dict[date_key] = upload_date.strftime('%Y%m%d')
1819 except (ValueError, OverflowError, OSError):
1820 pass
9d2ecdbc 1821
33d2fc2f
S
1822 # Auto generate title fields corresponding to the *_number fields when missing
1823 # in order to always have clean titles. This is very common for TV series.
1824 for field in ('chapter', 'season', 'episode'):
1825 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
1826 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
1827
05108a49
S
1828 for cc_kind in ('subtitles', 'automatic_captions'):
1829 cc = info_dict.get(cc_kind)
1830 if cc:
1831 for _, subtitle in cc.items():
1832 for subtitle_format in subtitle:
1833 if subtitle_format.get('url'):
1834 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
1835 if subtitle_format.get('ext') is None:
1836 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
1837
1838 automatic_captions = info_dict.get('automatic_captions')
4bba3716 1839 subtitles = info_dict.get('subtitles')
4bba3716 1840
a504ced0 1841 if self.params.get('listsubtitles', False):
360e1ca5 1842 if 'automatic_captions' in info_dict:
05108a49
S
1843 self.list_subtitles(
1844 info_dict['id'], automatic_captions, 'automatic captions')
4bba3716 1845 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
a504ced0 1846 return
05108a49 1847
360e1ca5 1848 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 1849 info_dict['id'], subtitles, automatic_captions)
a504ced0 1850
dd82ffea
JMF
1851 # We now pick which formats have to be downloaded
1852 if info_dict.get('formats') is None:
1853 # There's only one format available
1854 formats = [info_dict]
1855 else:
1856 formats = info_dict['formats']
1857
db95dc13
PH
1858 if not formats:
1859 raise ExtractorError('No video formats found!')
1860
73af5cc8
S
1861 def is_wellformed(f):
1862 url = f.get('url')
a5ac0c47 1863 if not url:
73af5cc8
S
1864 self.report_warning(
1865 '"url" field is missing or empty - skipping format, '
1866 'there is an error in extractor')
a5ac0c47
S
1867 return False
1868 if isinstance(url, bytes):
1869 sanitize_string_field(f, 'url')
1870 return True
73af5cc8
S
1871
1872 # Filter out malformed formats for better extraction robustness
1873 formats = list(filter(is_wellformed, formats))
1874
181c7053
S
1875 formats_dict = {}
1876
dd82ffea 1877 # We check that all the formats have the format and format_id fields
db95dc13 1878 for i, format in enumerate(formats):
c9969434
S
1879 sanitize_string_field(format, 'format_id')
1880 sanitize_numeric_fields(format)
dcf77cf1 1881 format['url'] = sanitize_url(format['url'])
e74e3b63 1882 if not format.get('format_id'):
8016c922 1883 format['format_id'] = compat_str(i)
e2effb08
S
1884 else:
1885 # Sanitize format_id from characters used in format selector expression
ec85ded8 1886 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
181c7053
S
1887 format_id = format['format_id']
1888 if format_id not in formats_dict:
1889 formats_dict[format_id] = []
1890 formats_dict[format_id].append(format)
1891
1892 # Make sure all formats have unique format_id
1893 for format_id, ambiguous_formats in formats_dict.items():
1894 if len(ambiguous_formats) > 1:
1895 for i, format in enumerate(ambiguous_formats):
1896 format['format_id'] = '%s-%d' % (format_id, i)
1897
1898 for i, format in enumerate(formats):
8c51aa65 1899 if format.get('format') is None:
6febd1c1 1900 format['format'] = '{id} - {res}{note}'.format(
8c51aa65
JMF
1901 id=format['format_id'],
1902 res=self.format_resolution(format),
6febd1c1 1903 note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
8c51aa65 1904 )
c1002e96 1905 # Automatically determine file extension if missing
5b1d8575 1906 if format.get('ext') is None:
cce929ea 1907 format['ext'] = determine_ext(format['url']).lower()
b5559424
S
1908 # Automatically determine protocol if missing (useful for format
1909 # selection purposes)
6f0be937 1910 if format.get('protocol') is None:
b5559424 1911 format['protocol'] = determine_protocol(format)
e5660ee6
JMF
1912 # Add HTTP headers, so that external programs can use them from the
1913 # json output
1914 full_format_info = info_dict.copy()
1915 full_format_info.update(format)
1916 format['http_headers'] = self._calc_headers(full_format_info)
0016b84e
S
1917 # Remove private housekeeping stuff
1918 if '__x_forwarded_for_ip' in info_dict:
1919 del info_dict['__x_forwarded_for_ip']
dd82ffea 1920
4bcc7bd1 1921 # TODO Central sorting goes here
99e206d5 1922
f89197d7 1923 if formats[0] is not info_dict:
b3d9ef88
JMF
1924 # only set the 'formats' fields if the original info_dict list them
1925 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 1926 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 1927 # which can't be exported to json
b3d9ef88 1928 info_dict['formats'] = formats
cfb56d1a 1929 if self.params.get('listformats'):
bfaae0a7 1930 self.list_formats(info_dict)
1931 return
1932
de3ef3ed 1933 req_format = self.params.get('format')
a9c58ad9 1934 if req_format is None:
0017d9ad
S
1935 req_format = self._default_format_spec(info_dict, download=download)
1936 if self.params.get('verbose'):
e8be92f9 1937 self.to_screen('[debug] Default format spec: %s' % req_format)
0017d9ad 1938
5acfa126 1939 format_selector = self.build_format_selector(req_format)
317f7ab6
S
1940
1941 # While in format selection we may need to have an access to the original
1942 # format set in order to calculate some metrics or do some processing.
1943 # For now we need to be able to guess whether original formats provided
1944 # by extractor are incomplete or not (i.e. whether extractor provides only
1945 # video-only or audio-only formats) for proper formats selection for
1946 # extractors with such incomplete formats (see
067aa17e 1947 # https://github.com/ytdl-org/youtube-dl/pull/5556).
317f7ab6
S
1948 # Since formats may be filtered during format selection and may not match
1949 # the original formats the results may be incorrect. Thus original formats
1950 # or pre-calculated metrics should be passed to format selection routines
1951 # as well.
1952 # We will pass a context object containing all necessary additional data
1953 # instead of just formats.
1954 # This fixes incorrect format selection issue (see
067aa17e 1955 # https://github.com/ytdl-org/youtube-dl/issues/10083).
2e221ca3 1956 incomplete_formats = (
317f7ab6 1957 # All formats are video-only or
3089bc74 1958 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
317f7ab6 1959 # all formats are audio-only
3089bc74 1960 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
317f7ab6
S
1961
1962 ctx = {
1963 'formats': formats,
1964 'incomplete_formats': incomplete_formats,
1965 }
1966
1967 formats_to_download = list(format_selector(ctx))
dd82ffea 1968 if not formats_to_download:
6febd1c1 1969 raise ExtractorError('requested format not available',
78a3a9f8 1970 expected=True)
dd82ffea
JMF
1971
1972 if download:
909d24dd 1973 self.to_screen('[info] Downloading format(s) %s' % ", ".join([f['format_id'] for f in formats_to_download]))
dd82ffea 1974 if len(formats_to_download) > 1:
6febd1c1 1975 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
dd82ffea
JMF
1976 for format in formats_to_download:
1977 new_info = dict(info_dict)
1978 new_info.update(format)
1979 self.process_info(new_info)
1980 # We update the info dict with the best quality format (backwards compatibility)
1981 info_dict.update(formats_to_download[-1])
1982 return info_dict
1983
98c70d6f 1984 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 1985 """Select the requested subtitles and their format"""
98c70d6f
JMF
1986 available_subs = {}
1987 if normal_subtitles and self.params.get('writesubtitles'):
1988 available_subs.update(normal_subtitles)
1989 if automatic_captions and self.params.get('writeautomaticsub'):
1990 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
1991 if lang not in available_subs:
1992 available_subs[lang] = cap_info
1993
4d171848
JMF
1994 if (not self.params.get('writesubtitles') and not
1995 self.params.get('writeautomaticsub') or not
1996 available_subs):
1997 return None
a504ced0
JMF
1998
1999 if self.params.get('allsubtitles', False):
2000 requested_langs = available_subs.keys()
2001 else:
2002 if self.params.get('subtitleslangs', False):
2003 requested_langs = self.params.get('subtitleslangs')
2004 elif 'en' in available_subs:
2005 requested_langs = ['en']
2006 else:
2007 requested_langs = [list(available_subs.keys())[0]]
2008
2009 formats_query = self.params.get('subtitlesformat', 'best')
2010 formats_preference = formats_query.split('/') if formats_query else []
2011 subs = {}
2012 for lang in requested_langs:
2013 formats = available_subs.get(lang)
2014 if formats is None:
2015 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
2016 continue
a504ced0
JMF
2017 for ext in formats_preference:
2018 if ext == 'best':
2019 f = formats[-1]
2020 break
2021 matches = list(filter(lambda f: f['ext'] == ext, formats))
2022 if matches:
2023 f = matches[-1]
2024 break
2025 else:
2026 f = formats[-1]
2027 self.report_warning(
2028 'No subtitle format found matching "%s" for language %s, '
2029 'using %s' % (formats_query, lang, f['ext']))
2030 subs[lang] = f
2031 return subs
2032
d06daf23
S
2033 def __forced_printings(self, info_dict, filename, incomplete):
2034 def print_mandatory(field):
2035 if (self.params.get('force%s' % field, False)
2036 and (not incomplete or info_dict.get(field) is not None)):
2037 self.to_stdout(info_dict[field])
2038
2039 def print_optional(field):
2040 if (self.params.get('force%s' % field, False)
2041 and info_dict.get(field) is not None):
2042 self.to_stdout(info_dict[field])
2043
2044 print_mandatory('title')
2045 print_mandatory('id')
2046 if self.params.get('forceurl', False) and not incomplete:
2047 if info_dict.get('requested_formats') is not None:
2048 for f in info_dict['requested_formats']:
2049 self.to_stdout(f['url'] + f.get('play_path', ''))
2050 else:
2051 # For RTMP URLs, also include the playpath
2052 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
2053 print_optional('thumbnail')
2054 print_optional('description')
2055 if self.params.get('forcefilename', False) and filename is not None:
2056 self.to_stdout(filename)
2057 if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
2058 self.to_stdout(formatSeconds(info_dict['duration']))
2059 print_mandatory('format')
2060 if self.params.get('forcejson', False):
277d6ff5 2061 self.post_extract(info_dict)
75d43ca0 2062 self.to_stdout(json.dumps(info_dict, default=repr))
d06daf23 2063
8222d8de
JMF
2064 def process_info(self, info_dict):
2065 """Process a single resolved IE result."""
2066
2067 assert info_dict.get('_type', 'video') == 'video'
fd288278 2068
0202b52a 2069 info_dict.setdefault('__postprocessors', [])
2070
fd288278
PH
2071 max_downloads = self.params.get('max_downloads')
2072 if max_downloads is not None:
2073 if self._num_downloads >= int(max_downloads):
2074 raise MaxDownloadsReached()
8222d8de 2075
d06daf23 2076 # TODO: backward compatibility, to be removed
8222d8de 2077 info_dict['fulltitle'] = info_dict['title']
8222d8de 2078
11b85ce6 2079 if 'format' not in info_dict:
8222d8de
JMF
2080 info_dict['format'] = info_dict['ext']
2081
8b0d7497 2082 if self._match_entry(info_dict, incomplete=False) is not None:
8222d8de
JMF
2083 return
2084
277d6ff5 2085 self.post_extract(info_dict)
fd288278 2086 self._num_downloads += 1
8222d8de 2087
5bfa4862 2088 info_dict = self.pre_process(info_dict)
2089
dcf64d43 2090 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 2091 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2092 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 2093 files_to_move = {}
de6000d9 2094 skip_dl = self.params.get('skip_download', False)
8222d8de
JMF
2095
2096 # Forced printings
0202b52a 2097 self.__forced_printings(info_dict, full_filename, incomplete=False)
8222d8de 2098
8222d8de 2099 if self.params.get('simulate', False):
2d30509f 2100 if self.params.get('force_write_download_archive', False):
2101 self.record_download_archive(info_dict)
2102
2103 # Do nothing else if in simulate mode
8222d8de
JMF
2104 return
2105
de6000d9 2106 if full_filename is None:
8222d8de
JMF
2107 return
2108
e92caff5 2109 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 2110 return
e92caff5 2111 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
2112 return
2113
2114 if self.params.get('writedescription', False):
de6000d9 2115 descfn = self.prepare_filename(info_dict, 'description')
e92caff5 2116 if not self._ensure_dir_exists(encodeFilename(descfn)):
0202b52a 2117 return
0c3d0f51 2118 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
6febd1c1 2119 self.to_screen('[info] Video description is already present')
f00fd51d
JMF
2120 elif info_dict.get('description') is None:
2121 self.report_warning('There\'s no description to write.')
7b6fefc9
PH
2122 else:
2123 try:
6febd1c1 2124 self.to_screen('[info] Writing video description to: ' + descfn)
7b6fefc9
PH
2125 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
2126 descfile.write(info_dict['description'])
7b6fefc9 2127 except (OSError, IOError):
6febd1c1 2128 self.report_error('Cannot write description file ' + descfn)
7b6fefc9 2129 return
8222d8de 2130
1fb07d10 2131 if self.params.get('writeannotations', False):
de6000d9 2132 annofn = self.prepare_filename(info_dict, 'annotation')
e92caff5 2133 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 2134 return
0c3d0f51 2135 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 2136 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
2137 elif not info_dict.get('annotations'):
2138 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
2139 else:
2140 try:
6febd1c1 2141 self.to_screen('[info] Writing video annotations to: ' + annofn)
7b6fefc9
PH
2142 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2143 annofile.write(info_dict['annotations'])
2144 except (KeyError, TypeError):
6febd1c1 2145 self.report_warning('There are no annotations to write.')
7b6fefc9 2146 except (OSError, IOError):
6febd1c1 2147 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 2148 return
1fb07d10 2149
9f448fcb 2150 def dl(name, info, subtitle=False):
98b69821 2151 fd = get_suitable_downloader(info, self.params)(self, self.params)
2152 for ph in self._progress_hooks:
2153 fd.add_progress_hook(ph)
2154 if self.params.get('verbose'):
29f7c58a 2155 self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
46906886
DA
2156 new_info = dict(info)
2157 if new_info.get('http_headers') is None:
2158 new_info['http_headers'] = self._calc_headers(new_info)
2159 return fd.download(name, new_info, subtitle)
98b69821 2160
c4a91be7 2161 subtitles_are_requested = any([self.params.get('writesubtitles', False),
0b7f3118 2162 self.params.get('writeautomaticsub')])
c4a91be7 2163
c84dd8a9 2164 if subtitles_are_requested and info_dict.get('requested_subtitles'):
8222d8de
JMF
2165 # subtitles download errors are already managed as troubles in relevant IE
2166 # that way it will silently go on when used with unsupporting IE
c84dd8a9 2167 subtitles = info_dict['requested_subtitles']
fa57af1e 2168 # ie = self.get_info_extractor(info_dict['extractor_key'])
a504ced0
JMF
2169 for sub_lang, sub_info in subtitles.items():
2170 sub_format = sub_info['ext']
de6000d9 2171 sub_fn = self.prepare_filename(info_dict, 'subtitle')
2172 sub_filename = subtitles_filename(
0fd1a2b0 2173 temp_filename if not skip_dl else sub_fn,
0202b52a 2174 sub_lang, sub_format, info_dict.get('ext'))
de6000d9 2175 sub_filename_final = subtitles_filename(sub_fn, sub_lang, sub_format, info_dict.get('ext'))
0c3d0f51 2176 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(sub_filename)):
5ff1bc0c 2177 self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
dcf64d43 2178 sub_info['filepath'] = sub_filename
0202b52a 2179 files_to_move[sub_filename] = sub_filename_final
a504ced0 2180 else:
0c9df79e 2181 self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
5ff1bc0c
RA
2182 if sub_info.get('data') is not None:
2183 try:
2184 # Use newline='' to prevent conversion of newline characters
067aa17e 2185 # See https://github.com/ytdl-org/youtube-dl/issues/10268
5ff1bc0c
RA
2186 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
2187 subfile.write(sub_info['data'])
dcf64d43 2188 sub_info['filepath'] = sub_filename
0202b52a 2189 files_to_move[sub_filename] = sub_filename_final
5ff1bc0c
RA
2190 except (OSError, IOError):
2191 self.report_error('Cannot write subtitles file ' + sub_filename)
2192 return
7b6fefc9 2193 else:
5ff1bc0c 2194 try:
dcf64d43 2195 dl(sub_filename, sub_info.copy(), subtitle=True)
2196 sub_info['filepath'] = sub_filename
0202b52a 2197 files_to_move[sub_filename] = sub_filename_final
0c9df79e 2198 except (ExtractorError, IOError, OSError, ValueError, compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
5ff1bc0c
RA
2199 self.report_warning('Unable to download subtitle for "%s": %s' %
2200 (sub_lang, error_to_compat_str(err)))
2201 continue
8222d8de 2202
de6000d9 2203 if skip_dl:
57df9f53 2204 if self.params.get('convertsubtitles', False):
0202b52a 2205 # subconv = FFmpegSubtitlesConvertorPP(self, format=self.params.get('convertsubtitles'))
de6000d9 2206 filename_real_ext = os.path.splitext(full_filename)[1][1:]
57df9f53 2207 filename_wo_ext = (
0202b52a 2208 os.path.splitext(full_filename)[0]
57df9f53 2209 if filename_real_ext == info_dict['ext']
0202b52a 2210 else full_filename)
57df9f53 2211 afilename = '%s.%s' % (filename_wo_ext, self.params.get('convertsubtitles'))
0202b52a 2212 # if subconv.available:
2213 # info_dict['__postprocessors'].append(subconv)
57df9f53 2214 if os.path.exists(encodeFilename(afilename)):
f791b419
U
2215 self.to_screen(
2216 '[download] %s has already been downloaded and '
2217 'converted' % afilename)
57df9f53
U
2218 else:
2219 try:
0202b52a 2220 self.post_process(full_filename, info_dict, files_to_move)
af819c21 2221 except PostProcessingError as err:
2222 self.report_error('Postprocessing: %s' % str(err))
57df9f53
U
2223 return
2224
8222d8de 2225 if self.params.get('writeinfojson', False):
de6000d9 2226 infofn = self.prepare_filename(info_dict, 'infojson')
e92caff5 2227 if not self._ensure_dir_exists(encodeFilename(infofn)):
0202b52a 2228 return
0c3d0f51 2229 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
66c935fb 2230 self.to_screen('[info] Video metadata is already present')
7b6fefc9 2231 else:
66c935fb 2232 self.to_screen('[info] Writing video metadata as JSON to: ' + infofn)
7b6fefc9 2233 try:
75d43ca0 2234 write_json_file(self.filter_requested_info(info_dict, self.params.get('clean_infojson', True)), infofn)
7b6fefc9 2235 except (OSError, IOError):
66c935fb 2236 self.report_error('Cannot write video metadata to JSON file ' + infofn)
7b6fefc9 2237 return
de6000d9 2238 info_dict['__infojson_filename'] = infofn
8222d8de 2239
de6000d9 2240 thumbfn = self.prepare_filename(info_dict, 'thumbnail')
2241 thumb_fn_temp = temp_filename if not skip_dl else thumbfn
2242 for thumb_ext in self._write_thumbnails(info_dict, thumb_fn_temp):
2243 thumb_filename_temp = replace_extension(thumb_fn_temp, thumb_ext, info_dict.get('ext'))
2244 thumb_filename = replace_extension(thumbfn, thumb_ext, info_dict.get('ext'))
dcf64d43 2245 files_to_move[thumb_filename_temp] = thumb_filename
8222d8de 2246
732044af 2247 # Write internet shortcut files
2248 url_link = webloc_link = desktop_link = False
2249 if self.params.get('writelink', False):
2250 if sys.platform == "darwin": # macOS.
2251 webloc_link = True
2252 elif sys.platform.startswith("linux"):
2253 desktop_link = True
2254 else: # if sys.platform in ['win32', 'cygwin']:
2255 url_link = True
2256 if self.params.get('writeurllink', False):
2257 url_link = True
2258 if self.params.get('writewebloclink', False):
2259 webloc_link = True
2260 if self.params.get('writedesktoplink', False):
2261 desktop_link = True
2262
2263 if url_link or webloc_link or desktop_link:
2264 if 'webpage_url' not in info_dict:
2265 self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
2266 return
2267 ascii_url = iri_to_uri(info_dict['webpage_url'])
2268
2269 def _write_link_file(extension, template, newline, embed_filename):
0202b52a 2270 linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
10e3742e 2271 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
732044af 2272 self.to_screen('[info] Internet shortcut is already present')
2273 else:
2274 try:
2275 self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
2276 with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
2277 template_vars = {'url': ascii_url}
2278 if embed_filename:
2279 template_vars['filename'] = linkfn[:-(len(extension) + 1)]
2280 linkfile.write(template % template_vars)
2281 except (OSError, IOError):
2282 self.report_error('Cannot write internet shortcut ' + linkfn)
2283 return False
2284 return True
2285
2286 if url_link:
2287 if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
2288 return
2289 if webloc_link:
2290 if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
2291 return
2292 if desktop_link:
2293 if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
2294 return
2295
2296 # Download
2297 must_record_download_archive = False
de6000d9 2298 if not skip_dl:
4340deca 2299 try:
0202b52a 2300
6b591b29 2301 def existing_file(*filepaths):
2302 ext = info_dict.get('ext')
2303 final_ext = self.params.get('final_ext', ext)
2304 existing_files = []
2305 for file in orderedSet(filepaths):
2306 if final_ext != ext:
2307 converted = replace_extension(file, final_ext, ext)
2308 if os.path.exists(encodeFilename(converted)):
2309 existing_files.append(converted)
2310 if os.path.exists(encodeFilename(file)):
2311 existing_files.append(file)
2312
2313 if not existing_files or self.params.get('overwrites', False):
2314 for file in orderedSet(existing_files):
2315 self.report_file_delete(file)
2316 os.remove(encodeFilename(file))
2317 return None
2318
2319 self.report_file_already_downloaded(existing_files[0])
2320 info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
2321 return existing_files[0]
0202b52a 2322
2323 success = True
4340deca
P
2324 if info_dict.get('requested_formats') is not None:
2325 downloaded = []
d47aeb22 2326 merger = FFmpegMergerPP(self)
63ad4d43 2327 if self.params.get('allow_unplayable_formats'):
2328 self.report_warning(
2329 'You have requested merging of multiple formats '
2330 'while also allowing unplayable formats to be downloaded. '
2331 'The formats won\'t be merged to prevent data corruption.')
2332 elif not merger.available:
2333 self.report_warning(
2334 'You have requested merging of multiple formats but ffmpeg is not installed. '
2335 'The formats won\'t be merged.')
81cd954a
S
2336
2337 def compatible_formats(formats):
d03cfdce 2338 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
2339 video_formats = [format for format in formats if format.get('vcodec') != 'none']
2340 audio_formats = [format for format in formats if format.get('acodec') != 'none']
2341 if len(video_formats) > 2 or len(audio_formats) > 2:
2342 return False
2343
81cd954a 2344 # Check extension
d03cfdce 2345 exts = set(format.get('ext') for format in formats)
2346 COMPATIBLE_EXTS = (
2347 set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
2348 set(('webm',)),
2349 )
2350 for ext_sets in COMPATIBLE_EXTS:
2351 if ext_sets.issuperset(exts):
2352 return True
81cd954a
S
2353 # TODO: Check acodec/vcodec
2354 return False
2355
2356 requested_formats = info_dict['requested_formats']
0202b52a 2357 old_ext = info_dict['ext']
4d971a16 2358 if self.params.get('merge_output_format') is None:
2359 if not compatible_formats(requested_formats):
2360 info_dict['ext'] = 'mkv'
2361 self.report_warning(
2362 'Requested formats are incompatible for merge and will be merged into mkv.')
2363 if (info_dict['ext'] == 'webm'
2364 and self.params.get('writethumbnail', False)
2365 and info_dict.get('thumbnails')):
2366 info_dict['ext'] = 'mkv'
2367 self.report_warning(
2368 'webm doesn\'t support embedding a thumbnail, mkv will be used.')
0202b52a 2369
2370 def correct_ext(filename):
2371 filename_real_ext = os.path.splitext(filename)[1][1:]
2372 filename_wo_ext = (
2373 os.path.splitext(filename)[0]
2374 if filename_real_ext == old_ext
2375 else filename)
2376 return '%s.%s' % (filename_wo_ext, info_dict['ext'])
2377
38c6902b 2378 # Ensure filename always has a correct extension for successful merge
0202b52a 2379 full_filename = correct_ext(full_filename)
2380 temp_filename = correct_ext(temp_filename)
2381 dl_filename = existing_file(full_filename, temp_filename)
1ea24129 2382 info_dict['__real_download'] = False
0202b52a 2383 if dl_filename is None:
81cd954a 2384 for f in requested_formats:
5b5fbc08
JMF
2385 new_info = dict(info_dict)
2386 new_info.update(f)
c5c9bf0c 2387 fname = prepend_extension(
de6000d9 2388 self.prepare_filename(new_info, 'temp'),
c5c9bf0c 2389 'f%s' % f['format_id'], new_info['ext'])
e92caff5 2390 if not self._ensure_dir_exists(fname):
c5c9bf0c 2391 return
5b5fbc08 2392 downloaded.append(fname)
a9e7f546 2393 partial_success, real_download = dl(fname, new_info)
1ea24129 2394 info_dict['__real_download'] = info_dict['__real_download'] or real_download
5b5fbc08 2395 success = success and partial_success
63ad4d43 2396 if merger.available and not self.params.get('allow_unplayable_formats'):
efabc161 2397 info_dict['__postprocessors'].append(merger)
1ea24129 2398 info_dict['__files_to_merge'] = downloaded
2399 # Even if there were no downloads, it is being merged only now
2400 info_dict['__real_download'] = True
42bb0c59 2401 else:
2402 for file in downloaded:
2403 files_to_move[file] = None
4340deca
P
2404 else:
2405 # Just a single file
0202b52a 2406 dl_filename = existing_file(full_filename, temp_filename)
2407 if dl_filename is None:
2408 success, real_download = dl(temp_filename, info_dict)
2409 info_dict['__real_download'] = real_download
2410
0202b52a 2411 dl_filename = dl_filename or temp_filename
c571435f 2412 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 2413
4340deca 2414 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
7960b056 2415 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca
P
2416 return
2417 except (OSError, IOError) as err:
2418 raise UnavailableVideoError(err)
2419 except (ContentTooShortError, ) as err:
2420 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
2421 return
8222d8de 2422
de6000d9 2423 if success and full_filename != '-':
6271f1ca 2424 # Fixup content
62cd676c
PH
2425 fixup_policy = self.params.get('fixup')
2426 if fixup_policy is None:
2427 fixup_policy = 'detect_or_warn'
2428
e4172ac9 2429 INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg to fix this automatically.'
d1e4a464 2430
6271f1ca
PH
2431 stretched_ratio = info_dict.get('stretched_ratio')
2432 if stretched_ratio is not None and stretched_ratio != 1:
6271f1ca
PH
2433 if fixup_policy == 'warn':
2434 self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
2435 info_dict['id'], stretched_ratio))
2436 elif fixup_policy == 'detect_or_warn':
2437 stretched_pp = FFmpegFixupStretchedPP(self)
2438 if stretched_pp.available:
6271f1ca
PH
2439 info_dict['__postprocessors'].append(stretched_pp)
2440 else:
2441 self.report_warning(
d1e4a464
S
2442 '%s: Non-uniform pixel ratio (%s). %s'
2443 % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
6271f1ca 2444 else:
62cd676c
PH
2445 assert fixup_policy in ('ignore', 'never')
2446
3089bc74 2447 if (info_dict.get('requested_formats') is None
6b591b29 2448 and info_dict.get('container') == 'm4a_dash'
2449 and info_dict.get('ext') == 'm4a'):
62cd676c 2450 if fixup_policy == 'warn':
d1e4a464
S
2451 self.report_warning(
2452 '%s: writing DASH m4a. '
2453 'Only some players support this container.'
2454 % info_dict['id'])
62cd676c
PH
2455 elif fixup_policy == 'detect_or_warn':
2456 fixup_pp = FFmpegFixupM4aPP(self)
2457 if fixup_pp.available:
62cd676c
PH
2458 info_dict['__postprocessors'].append(fixup_pp)
2459 else:
2460 self.report_warning(
d1e4a464
S
2461 '%s: writing DASH m4a. '
2462 'Only some players support this container. %s'
2463 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
62cd676c
PH
2464 else:
2465 assert fixup_policy in ('ignore', 'never')
6271f1ca 2466
0a473f2f 2467 if ('protocol' in info_dict
2468 and get_suitable_downloader(info_dict, self.params).__name__ == 'HlsFD'):
f17f8651 2469 if fixup_policy == 'warn':
a02682fd 2470 self.report_warning('%s: malformed AAC bitstream detected.' % (
f17f8651 2471 info_dict['id']))
2472 elif fixup_policy == 'detect_or_warn':
2473 fixup_pp = FFmpegFixupM3u8PP(self)
2474 if fixup_pp.available:
f17f8651 2475 info_dict['__postprocessors'].append(fixup_pp)
2476 else:
2477 self.report_warning(
a02682fd 2478 '%s: malformed AAC bitstream detected. %s'
d1e4a464 2479 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
f17f8651 2480 else:
2481 assert fixup_policy in ('ignore', 'never')
2482
8222d8de 2483 try:
23c1a667 2484 info_dict = self.post_process(dl_filename, info_dict, files_to_move)
af819c21 2485 except PostProcessingError as err:
2486 self.report_error('Postprocessing: %s' % str(err))
8222d8de 2487 return
ab8e5e51
AM
2488 try:
2489 for ph in self._post_hooks:
23c1a667 2490 ph(info_dict['filepath'])
ab8e5e51
AM
2491 except Exception as err:
2492 self.report_error('post hooks: %s' % str(err))
2493 return
2d30509f 2494 must_record_download_archive = True
2495
2496 if must_record_download_archive or self.params.get('force_write_download_archive', False):
2497 self.record_download_archive(info_dict)
c3e6ffba 2498 max_downloads = self.params.get('max_downloads')
2499 if max_downloads is not None and self._num_downloads >= int(max_downloads):
2500 raise MaxDownloadsReached()
8222d8de
JMF
2501
2502 def download(self, url_list):
2503 """Download a given list of URLs."""
de6000d9 2504 outtmpl = self.outtmpl_dict['default']
3089bc74
S
2505 if (len(url_list) > 1
2506 and outtmpl != '-'
2507 and '%' not in outtmpl
2508 and self.params.get('max_downloads') != 1):
acd69589 2509 raise SameFileError(outtmpl)
8222d8de
JMF
2510
2511 for url in url_list:
2512 try:
5f6a1245 2513 # It also downloads the videos
61aa5ba3
S
2514 res = self.extract_info(
2515 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de 2516 except UnavailableVideoError:
6febd1c1 2517 self.report_error('unable to download video')
8222d8de 2518 except MaxDownloadsReached:
8b0d7497 2519 self.to_screen('[info] Maximum number of downloaded files reached')
2520 raise
2521 except ExistingVideoReached:
d83cb531 2522 self.to_screen('[info] Encountered a file that is already in the archive, stopping due to --break-on-existing')
8b0d7497 2523 raise
2524 except RejectedVideoReached:
d83cb531 2525 self.to_screen('[info] Encountered a file that did not match filter, stopping due to --break-on-reject')
8222d8de 2526 raise
63e0be34
PH
2527 else:
2528 if self.params.get('dump_single_json', False):
277d6ff5 2529 self.post_extract(res)
75d43ca0 2530 self.to_stdout(json.dumps(res, default=repr))
8222d8de
JMF
2531
2532 return self._download_retcode
2533
1dcc4c0c 2534 def download_with_info_file(self, info_filename):
31bd3925
JMF
2535 with contextlib.closing(fileinput.FileInput(
2536 [info_filename], mode='r',
2537 openhook=fileinput.hook_encoded('utf-8'))) as f:
2538 # FileInput doesn't have a read method, we can't call json.load
498f5606 2539 info = self.filter_requested_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
d4943898
JMF
2540 try:
2541 self.process_ie_result(info, download=True)
498f5606 2542 except (DownloadError, EntryNotInPlaylist):
d4943898
JMF
2543 webpage_url = info.get('webpage_url')
2544 if webpage_url is not None:
6febd1c1 2545 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
d4943898
JMF
2546 return self.download([webpage_url])
2547 else:
2548 raise
2549 return self._download_retcode
1dcc4c0c 2550
cb202fd2 2551 @staticmethod
75d43ca0 2552 def filter_requested_info(info_dict, actually_filter=True):
2553 if not actually_filter:
394dcd44 2554 info_dict['epoch'] = int(time.time())
75d43ca0 2555 return info_dict
5226731e 2556 exceptions = {
498f5606 2557 'remove': ['requested_formats', 'requested_subtitles', 'requested_entries', 'filepath', 'entries'],
5226731e 2558 'keep': ['_type'],
2559 }
2560 keep_key = lambda k: k in exceptions['keep'] or not (k.startswith('_') or k in exceptions['remove'])
2561 filter_fn = lambda obj: (
a515a78d 2562 list(map(filter_fn, obj)) if isinstance(obj, (list, tuple))
2563 else obj if not isinstance(obj, dict)
2564 else dict((k, filter_fn(v)) for k, v in obj.items() if keep_key(k)))
5226731e 2565 return filter_fn(info_dict)
cb202fd2 2566
dcf64d43 2567 def run_pp(self, pp, infodict):
5bfa4862 2568 files_to_delete = []
dcf64d43 2569 if '__files_to_move' not in infodict:
2570 infodict['__files_to_move'] = {}
af819c21 2571 files_to_delete, infodict = pp.run(infodict)
5bfa4862 2572 if not files_to_delete:
dcf64d43 2573 return infodict
5bfa4862 2574
2575 if self.params.get('keepvideo', False):
2576 for f in files_to_delete:
dcf64d43 2577 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 2578 else:
2579 for old_filename in set(files_to_delete):
2580 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2581 try:
2582 os.remove(encodeFilename(old_filename))
2583 except (IOError, OSError):
2584 self.report_warning('Unable to remove downloaded original file')
dcf64d43 2585 if old_filename in infodict['__files_to_move']:
2586 del infodict['__files_to_move'][old_filename]
2587 return infodict
5bfa4862 2588
277d6ff5 2589 @staticmethod
2590 def post_extract(info_dict):
2591 def actual_post_extract(info_dict):
2592 if info_dict.get('_type') in ('playlist', 'multi_video'):
2593 for video_dict in info_dict.get('entries', {}):
b050d210 2594 actual_post_extract(video_dict or {})
277d6ff5 2595 return
2596
2597 if '__post_extractor' not in info_dict:
2598 return
2599 post_extractor = info_dict['__post_extractor']
2600 if post_extractor:
2601 info_dict.update(post_extractor().items())
2602 del info_dict['__post_extractor']
2603 return
2604
b050d210 2605 actual_post_extract(info_dict or {})
277d6ff5 2606
5bfa4862 2607 def pre_process(self, ie_info):
2608 info = dict(ie_info)
2609 for pp in self._pps['beforedl']:
dcf64d43 2610 info = self.run_pp(pp, info)
5bfa4862 2611 return info
2612
dcf64d43 2613 def post_process(self, filename, ie_info, files_to_move=None):
8222d8de
JMF
2614 """Run all the postprocessors on the given file."""
2615 info = dict(ie_info)
2616 info['filepath'] = filename
dcf64d43 2617 info['__files_to_move'] = files_to_move or {}
0202b52a 2618
5bfa4862 2619 for pp in ie_info.get('__postprocessors', []) + self._pps['normal']:
dcf64d43 2620 info = self.run_pp(pp, info)
2621 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
2622 del info['__files_to_move']
5bfa4862 2623 for pp in self._pps['aftermove']:
dcf64d43 2624 info = self.run_pp(pp, info)
23c1a667 2625 return info
c1c9a79c 2626
5db07df6 2627 def _make_archive_id(self, info_dict):
e9fef7ee
S
2628 video_id = info_dict.get('id')
2629 if not video_id:
2630 return
5db07df6
PH
2631 # Future-proof against any change in case
2632 # and backwards compatibility with prior versions
e9fef7ee 2633 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 2634 if extractor is None:
1211bb6d
S
2635 url = str_or_none(info_dict.get('url'))
2636 if not url:
2637 return
e9fef7ee
S
2638 # Try to find matching extractor for the URL and take its ie_key
2639 for ie in self._ies:
1211bb6d 2640 if ie.suitable(url):
e9fef7ee
S
2641 extractor = ie.ie_key()
2642 break
2643 else:
2644 return
d0757229 2645 return '%s %s' % (extractor.lower(), video_id)
5db07df6
PH
2646
2647 def in_download_archive(self, info_dict):
2648 fn = self.params.get('download_archive')
2649 if fn is None:
2650 return False
2651
2652 vid_id = self._make_archive_id(info_dict)
e9fef7ee 2653 if not vid_id:
7012b23c 2654 return False # Incomplete video information
5db07df6 2655
a45e8619 2656 return vid_id in self.archive
c1c9a79c
PH
2657
2658 def record_download_archive(self, info_dict):
2659 fn = self.params.get('download_archive')
2660 if fn is None:
2661 return
5db07df6
PH
2662 vid_id = self._make_archive_id(info_dict)
2663 assert vid_id
c1c9a79c 2664 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
6febd1c1 2665 archive_file.write(vid_id + '\n')
a45e8619 2666 self.archive.add(vid_id)
dd82ffea 2667
8c51aa65 2668 @staticmethod
8abeeb94 2669 def format_resolution(format, default='unknown'):
fb04e403
PH
2670 if format.get('vcodec') == 'none':
2671 return 'audio only'
f49d89ee
PH
2672 if format.get('resolution') is not None:
2673 return format['resolution']
35615307
DA
2674 if format.get('width') and format.get('height'):
2675 res = '%dx%d' % (format['width'], format['height'])
2676 elif format.get('height'):
2677 res = '%sp' % format['height']
2678 elif format.get('width'):
388ae76b 2679 res = '%dx?' % format['width']
8c51aa65 2680 else:
8abeeb94 2681 res = default
8c51aa65
JMF
2682 return res
2683
c57f7757
PH
2684 def _format_note(self, fdict):
2685 res = ''
2686 if fdict.get('ext') in ['f4f', 'f4m']:
2687 res += '(unsupported) '
32f90364
PH
2688 if fdict.get('language'):
2689 if res:
2690 res += ' '
9016d76f 2691 res += '[%s] ' % fdict['language']
c57f7757
PH
2692 if fdict.get('format_note') is not None:
2693 res += fdict['format_note'] + ' '
2694 if fdict.get('tbr') is not None:
2695 res += '%4dk ' % fdict['tbr']
2696 if fdict.get('container') is not None:
2697 if res:
2698 res += ', '
2699 res += '%s container' % fdict['container']
3089bc74
S
2700 if (fdict.get('vcodec') is not None
2701 and fdict.get('vcodec') != 'none'):
c57f7757
PH
2702 if res:
2703 res += ', '
2704 res += fdict['vcodec']
91c7271a 2705 if fdict.get('vbr') is not None:
c57f7757
PH
2706 res += '@'
2707 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
2708 res += 'video@'
2709 if fdict.get('vbr') is not None:
2710 res += '%4dk' % fdict['vbr']
fbb21cf5 2711 if fdict.get('fps') is not None:
5d583bdf
S
2712 if res:
2713 res += ', '
2714 res += '%sfps' % fdict['fps']
c57f7757
PH
2715 if fdict.get('acodec') is not None:
2716 if res:
2717 res += ', '
2718 if fdict['acodec'] == 'none':
2719 res += 'video only'
2720 else:
2721 res += '%-5s' % fdict['acodec']
2722 elif fdict.get('abr') is not None:
2723 if res:
2724 res += ', '
2725 res += 'audio'
2726 if fdict.get('abr') is not None:
2727 res += '@%3dk' % fdict['abr']
2728 if fdict.get('asr') is not None:
2729 res += ' (%5dHz)' % fdict['asr']
2730 if fdict.get('filesize') is not None:
2731 if res:
2732 res += ', '
2733 res += format_bytes(fdict['filesize'])
9732d77e
PH
2734 elif fdict.get('filesize_approx') is not None:
2735 if res:
2736 res += ', '
2737 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 2738 return res
91c7271a 2739
76d321f6 2740 def _format_note_table(self, f):
2741 def join_fields(*vargs):
2742 return ', '.join((val for val in vargs if val != ''))
2743
2744 return join_fields(
2745 'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
2746 format_field(f, 'language', '[%s]'),
2747 format_field(f, 'format_note'),
2748 format_field(f, 'container', ignore=(None, f.get('ext'))),
2749 format_field(f, 'asr', '%5dHz'))
2750
c57f7757 2751 def list_formats(self, info_dict):
94badb25 2752 formats = info_dict.get('formats', [info_dict])
76d321f6 2753 new_format = self.params.get('listformats_table', False)
2754 if new_format:
2755 table = [
2756 [
2757 format_field(f, 'format_id'),
2758 format_field(f, 'ext'),
2759 self.format_resolution(f),
2760 format_field(f, 'fps', '%d'),
2761 '|',
2762 format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
2763 format_field(f, 'tbr', '%4dk'),
fb198a8a 2764 f.get('protocol').replace('http_dash_segments', 'dash').replace("native", "n").replace('niconico_', ''),
76d321f6 2765 '|',
2766 format_field(f, 'vcodec', default='unknown').replace('none', ''),
2767 format_field(f, 'vbr', '%4dk'),
2768 format_field(f, 'acodec', default='unknown').replace('none', ''),
2769 format_field(f, 'abr', '%3dk'),
2770 format_field(f, 'asr', '%5dHz'),
2771 self._format_note_table(f)]
2772 for f in formats
2773 if f.get('preference') is None or f['preference'] >= -1000]
2774 header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', ' TBR', 'PROTO',
2775 '|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'NOTE']
2776 else:
2777 table = [
2778 [
2779 format_field(f, 'format_id'),
2780 format_field(f, 'ext'),
2781 self.format_resolution(f),
2782 self._format_note(f)]
2783 for f in formats
2784 if f.get('preference') is None or f['preference'] >= -1000]
2785 header_line = ['format code', 'extension', 'resolution', 'note']
57dd9a8f 2786
cfb56d1a 2787 self.to_screen(
76d321f6 2788 '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(
2789 header_line,
2790 table,
2791 delim=new_format,
2792 extraGap=(0 if new_format else 1),
2793 hideEmpty=new_format)))
cfb56d1a
PH
2794
2795 def list_thumbnails(self, info_dict):
2796 thumbnails = info_dict.get('thumbnails')
2797 if not thumbnails:
b7b72db9 2798 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
2799 return
cfb56d1a
PH
2800
2801 self.to_screen(
2802 '[info] Thumbnails for %s:' % info_dict['id'])
2803 self.to_screen(render_table(
2804 ['ID', 'width', 'height', 'URL'],
2805 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
dca08720 2806
360e1ca5 2807 def list_subtitles(self, video_id, subtitles, name='subtitles'):
a504ced0 2808 if not subtitles:
360e1ca5 2809 self.to_screen('%s has no %s' % (video_id, name))
a504ced0 2810 return
a504ced0 2811 self.to_screen(
edab9dbf
JMF
2812 'Available %s for %s:' % (name, video_id))
2813 self.to_screen(render_table(
2814 ['Language', 'formats'],
2815 [[lang, ', '.join(f['ext'] for f in reversed(formats))]
2816 for lang, formats in subtitles.items()]))
a504ced0 2817
dca08720
PH
2818 def urlopen(self, req):
2819 """ Start an HTTP download """
82d8a8b6 2820 if isinstance(req, compat_basestring):
67dda517 2821 req = sanitized_Request(req)
19a41fc6 2822 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
2823
2824 def print_debug_header(self):
2825 if not self.params.get('verbose'):
2826 return
62fec3b2 2827
4192b51c 2828 if type('') is not compat_str:
067aa17e 2829 # Python 2.6 on SLES11 SP1 (https://github.com/ytdl-org/youtube-dl/issues/3326)
4192b51c
PH
2830 self.report_warning(
2831 'Your Python is broken! Update to a newer and supported version')
2832
c6afed48
PH
2833 stdout_encoding = getattr(
2834 sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
b0472057 2835 encoding_str = (
734f90bb
PH
2836 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
2837 locale.getpreferredencoding(),
2838 sys.getfilesystemencoding(),
c6afed48 2839 stdout_encoding,
b0472057 2840 self.get_encoding()))
4192b51c 2841 write_string(encoding_str, encoding=None)
734f90bb 2842
e5813e53 2843 source = (
2844 '(exe)' if hasattr(sys, 'frozen')
2845 else '(zip)' if isinstance(globals().get('__loader__'), zipimporter)
2846 else '(source)' if os.path.basename(sys.argv[0]) == '__main__.py'
2847 else '')
2848 self._write_string('[debug] yt-dlp version %s %s\n' % (__version__, source))
e0986e31 2849 if _LAZY_LOADER:
f74980cb 2850 self._write_string('[debug] Lazy loading extractors enabled\n')
2851 if _PLUGIN_CLASSES:
2852 self._write_string(
2853 '[debug] Plugin Extractors: %s\n' % [ie.ie_key() for ie in _PLUGIN_CLASSES])
dca08720
PH
2854 try:
2855 sp = subprocess.Popen(
2856 ['git', 'rev-parse', '--short', 'HEAD'],
2857 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2858 cwd=os.path.dirname(os.path.abspath(__file__)))
f5b1bca9 2859 out, err = process_communicate_or_kill(sp)
dca08720
PH
2860 out = out.decode().strip()
2861 if re.match('[0-9a-f]+', out):
f74980cb 2862 self._write_string('[debug] Git HEAD: %s\n' % out)
70a1165b 2863 except Exception:
dca08720
PH
2864 try:
2865 sys.exc_clear()
70a1165b 2866 except Exception:
dca08720 2867 pass
b300cda4
S
2868
2869 def python_implementation():
2870 impl_name = platform.python_implementation()
2871 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2872 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
2873 return impl_name
2874
e5813e53 2875 self._write_string('[debug] Python version %s (%s %s) - %s\n' % (
2876 platform.python_version(),
2877 python_implementation(),
2878 platform.architecture()[0],
b300cda4 2879 platform_name()))
d28b5171 2880
73fac4e9 2881 exe_versions = FFmpegPostProcessor.get_versions(self)
4c83c967 2882 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 2883 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171
PH
2884 exe_str = ', '.join(
2885 '%s %s' % (exe, v)
2886 for exe, v in sorted(exe_versions.items())
2887 if v
2888 )
2889 if not exe_str:
2890 exe_str = 'none'
2891 self._write_string('[debug] exe versions: %s\n' % exe_str)
dca08720
PH
2892
2893 proxy_map = {}
2894 for handler in self._opener.handlers:
2895 if hasattr(handler, 'proxies'):
2896 proxy_map.update(handler.proxies)
734f90bb 2897 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
dca08720 2898
58b1f00d
PH
2899 if self.params.get('call_home', False):
2900 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
2901 self._write_string('[debug] Public IP address: %s\n' % ipaddr)
f5546c0b 2902 return
58b1f00d
PH
2903 latest_version = self.urlopen(
2904 'https://yt-dl.org/latest/version').read().decode('utf-8')
2905 if version_tuple(latest_version) > version_tuple(__version__):
2906 self.report_warning(
2907 'You are using an outdated version (newest version: %s)! '
2908 'See https://yt-dl.org/update if you need help updating.' %
2909 latest_version)
2910
e344693b 2911 def _setup_opener(self):
6ad14cab 2912 timeout_val = self.params.get('socket_timeout')
19a41fc6 2913 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
6ad14cab 2914
dca08720
PH
2915 opts_cookiefile = self.params.get('cookiefile')
2916 opts_proxy = self.params.get('proxy')
2917
2918 if opts_cookiefile is None:
2919 self.cookiejar = compat_cookiejar.CookieJar()
2920 else:
590bc6f6 2921 opts_cookiefile = expand_path(opts_cookiefile)
1bab3437 2922 self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
dca08720 2923 if os.access(opts_cookiefile, os.R_OK):
1d88b3e6 2924 self.cookiejar.load(ignore_discard=True, ignore_expires=True)
dca08720 2925
6a3f4c3f 2926 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
2927 if opts_proxy is not None:
2928 if opts_proxy == '':
2929 proxies = {}
2930 else:
2931 proxies = {'http': opts_proxy, 'https': opts_proxy}
2932 else:
2933 proxies = compat_urllib_request.getproxies()
067aa17e 2934 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
2935 if 'http' in proxies and 'https' not in proxies:
2936 proxies['https'] = proxies['http']
91410c9b 2937 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
2938
2939 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
2940 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
2941 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 2942 redirect_handler = YoutubeDLRedirectHandler()
8b172c2e 2943 data_handler = compat_urllib_request_DataHandler()
6240b0a2
JMF
2944
2945 # When passing our own FileHandler instance, build_opener won't add the
2946 # default FileHandler and allows us to disable the file protocol, which
2947 # can be used for malicious purposes (see
067aa17e 2948 # https://github.com/ytdl-org/youtube-dl/issues/8227)
6240b0a2
JMF
2949 file_handler = compat_urllib_request.FileHandler()
2950
2951 def file_open(*args, **kwargs):
7a5c1cfe 2952 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
6240b0a2
JMF
2953 file_handler.file_open = file_open
2954
2955 opener = compat_urllib_request.build_opener(
fca6dba8 2956 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 2957
dca08720
PH
2958 # Delete the default user-agent header, which would otherwise apply in
2959 # cases where our custom HTTP handler doesn't come into play
067aa17e 2960 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
2961 opener.addheaders = []
2962 self._opener = opener
62fec3b2
PH
2963
2964 def encode(self, s):
2965 if isinstance(s, bytes):
2966 return s # Already encoded
2967
2968 try:
2969 return s.encode(self.get_encoding())
2970 except UnicodeEncodeError as err:
2971 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
2972 raise
2973
2974 def get_encoding(self):
2975 encoding = self.params.get('encoding')
2976 if encoding is None:
2977 encoding = preferredencoding()
2978 return encoding
ec82d85a 2979
de6000d9 2980 def _write_thumbnails(self, info_dict, filename): # return the extensions
6c4fd172 2981 write_all = self.params.get('write_all_thumbnails', False)
2982 thumbnails = []
2983 if write_all or self.params.get('writethumbnail', False):
0202b52a 2984 thumbnails = info_dict.get('thumbnails') or []
6c4fd172 2985 multiple = write_all and len(thumbnails) > 1
ec82d85a 2986
0202b52a 2987 ret = []
6c4fd172 2988 for t in thumbnails[::1 if write_all else -1]:
ec82d85a 2989 thumb_ext = determine_ext(t['url'], 'jpg')
6c4fd172 2990 suffix = '%s.' % t['id'] if multiple else ''
2991 thumb_display_id = '%s ' % t['id'] if multiple else ''
dcf64d43 2992 t['filepath'] = thumb_filename = replace_extension(filename, suffix + thumb_ext, info_dict.get('ext'))
ec82d85a 2993
0c3d0f51 2994 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(thumb_filename)):
de6000d9 2995 ret.append(suffix + thumb_ext)
ec82d85a
PH
2996 self.to_screen('[%s] %s: Thumbnail %sis already present' %
2997 (info_dict['extractor'], info_dict['id'], thumb_display_id))
2998 else:
5ef7d9bd 2999 self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
ec82d85a
PH
3000 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3001 try:
3002 uf = self.urlopen(t['url'])
d3d89c32 3003 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 3004 shutil.copyfileobj(uf, thumbf)
de6000d9 3005 ret.append(suffix + thumb_ext)
ec82d85a
PH
3006 self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
3007 (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
3008 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
3009 self.report_warning('Unable to download thumbnail "%s": %s' %
9b9c5355 3010 (t['url'], error_to_compat_str(err)))
6c4fd172 3011 if ret and not write_all:
3012 break
0202b52a 3013 return ret