]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
Release 2021.03.01
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
8222d8de 1#!/usr/bin/env python
dcdb292f 2# coding: utf-8
8222d8de 3
6febd1c1 4from __future__ import absolute_import, unicode_literals
8222d8de 5
26e63931 6import collections
31bd3925 7import contextlib
317f7ab6 8import copy
9d2ecdbc 9import datetime
c1c9a79c 10import errno
31bd3925 11import fileinput
8222d8de 12import io
b82f815f 13import itertools
8694c600 14import json
62fec3b2 15import locale
083c9df9 16import operator
8222d8de 17import os
dca08720 18import platform
8222d8de
JMF
19import re
20import shutil
dca08720 21import subprocess
8222d8de
JMF
22import socket
23import sys
24import time
67134eab 25import tokenize
8222d8de 26import traceback
75822ca7 27import random
8222d8de 28
961ea474 29from string import ascii_letters
e5813e53 30from zipimport import zipimporter
961ea474 31
8c25f81b 32from .compat import (
82d8a8b6 33 compat_basestring,
dca08720 34 compat_cookiejar,
003c69a8 35 compat_get_terminal_size,
ce02ed60 36 compat_http_client,
4f026faf 37 compat_kwargs,
d0d9ade4 38 compat_numeric_types,
e9c0cdd3 39 compat_os_name,
ce02ed60 40 compat_str,
67134eab 41 compat_tokenize_tokenize,
ce02ed60
PH
42 compat_urllib_error,
43 compat_urllib_request,
8b172c2e 44 compat_urllib_request_DataHandler,
8c25f81b
PH
45)
46from .utils import (
eedb7ba5
S
47 age_restricted,
48 args_to_str,
ce02ed60
PH
49 ContentTooShortError,
50 date_from_str,
51 DateRange,
acd69589 52 DEFAULT_OUTTMPL,
de6000d9 53 OUTTMPL_TYPES,
ce02ed60 54 determine_ext,
b5559424 55 determine_protocol,
732044af 56 DOT_DESKTOP_LINK_TEMPLATE,
57 DOT_URL_LINK_TEMPLATE,
58 DOT_WEBLOC_LINK_TEMPLATE,
ce02ed60 59 DownloadError,
c0384f22 60 encode_compat_str,
ce02ed60 61 encodeFilename,
9b9c5355 62 error_to_compat_str,
8b0d7497 63 ExistingVideoReached,
590bc6f6 64 expand_path,
ce02ed60 65 ExtractorError,
e29663c6 66 float_or_none,
02dbf93f 67 format_bytes,
76d321f6 68 format_field,
525ef922 69 formatSeconds,
773f291d 70 GeoRestrictedError,
c9969434 71 int_or_none,
732044af 72 iri_to_uri,
773f291d 73 ISO3166Utils,
ce02ed60 74 locked_file,
0202b52a 75 make_dir,
dca08720 76 make_HTTPS_handler,
ce02ed60 77 MaxDownloadsReached,
cd6fc19e 78 orderedSet,
b7ab0590 79 PagedList,
083c9df9 80 parse_filesize,
91410c9b 81 PerRequestProxyHandler,
dca08720 82 platform_name,
eedb7ba5 83 PostProcessingError,
ce02ed60 84 preferredencoding,
eedb7ba5 85 prepend_extension,
51fb4995 86 register_socks_protocols,
cfb56d1a 87 render_table,
eedb7ba5 88 replace_extension,
8b0d7497 89 RejectedVideoReached,
ce02ed60
PH
90 SameFileError,
91 sanitize_filename,
1bb5c511 92 sanitize_path,
dcf77cf1 93 sanitize_url,
67dda517 94 sanitized_Request,
e5660ee6 95 std_headers,
1211bb6d 96 str_or_none,
e29663c6 97 strftime_or_none,
ce02ed60 98 subtitles_filename,
732044af 99 to_high_limit_path,
ce02ed60 100 UnavailableVideoError,
29eb5174 101 url_basename,
58b1f00d 102 version_tuple,
ce02ed60
PH
103 write_json_file,
104 write_string,
1bab3437 105 YoutubeDLCookieJar,
6a3f4c3f 106 YoutubeDLCookieProcessor,
dca08720 107 YoutubeDLHandler,
fca6dba8 108 YoutubeDLRedirectHandler,
f5b1bca9 109 process_communicate_or_kill,
ce02ed60 110)
a0e07d31 111from .cache import Cache
f74980cb 112from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER, _PLUGIN_CLASSES
4c54b89e 113from .extractor.openload import PhantomJSwrapper
3bc2ddcc 114from .downloader import get_suitable_downloader
4c83c967 115from .downloader.rtmp import rtmpdump_version
4f026faf 116from .postprocessor import (
f17f8651 117 FFmpegFixupM3u8PP,
62cd676c 118 FFmpegFixupM4aPP,
6271f1ca 119 FFmpegFixupStretchedPP,
4f026faf
PH
120 FFmpegMergerPP,
121 FFmpegPostProcessor,
0202b52a 122 # FFmpegSubtitlesConvertorPP,
4f026faf 123 get_postprocessor,
0202b52a 124 MoveFilesAfterDownloadPP,
4f026faf 125)
dca08720 126from .version import __version__
8222d8de 127
e9c0cdd3
YCH
128if compat_os_name == 'nt':
129 import ctypes
130
2459b6e1 131
8222d8de
JMF
132class YoutubeDL(object):
133 """YoutubeDL class.
134
135 YoutubeDL objects are the ones responsible of downloading the
136 actual video file and writing it to disk if the user has requested
137 it, among some other tasks. In most cases there should be one per
138 program. As, given a video URL, the downloader doesn't know how to
139 extract all the needed information, task that InfoExtractors do, it
140 has to pass the URL to one of them.
141
142 For this, YoutubeDL objects have a method that allows
143 InfoExtractors to be registered in a given order. When it is passed
144 a URL, the YoutubeDL object handles it to the first InfoExtractor it
145 finds that reports being able to handle it. The InfoExtractor extracts
146 all the information about the video or videos the URL refers to, and
147 YoutubeDL process the extracted information, possibly using a File
148 Downloader to download the video.
149
150 YoutubeDL objects accept a lot of parameters. In order not to saturate
151 the object constructor with arguments, it receives a dictionary of
152 options instead. These options are available through the params
153 attribute for the InfoExtractors to use. The YoutubeDL also
154 registers itself as the downloader in charge for the InfoExtractors
155 that are added to it, so this is a "mutual registration".
156
157 Available options:
158
159 username: Username for authentication purposes.
160 password: Password for authentication purposes.
180940e0 161 videopassword: Password for accessing a video.
1da50aa3
S
162 ap_mso: Adobe Pass multiple-system operator identifier.
163 ap_username: Multiple-system operator account username.
164 ap_password: Multiple-system operator account password.
8222d8de
JMF
165 usenetrc: Use netrc for authentication instead.
166 verbose: Print additional info to stdout.
167 quiet: Do not print messages to stdout.
ad8915b7 168 no_warnings: Do not print out anything for warnings.
8222d8de
JMF
169 forceurl: Force printing final URL.
170 forcetitle: Force printing title.
171 forceid: Force printing ID.
172 forcethumbnail: Force printing thumbnail URL.
173 forcedescription: Force printing description.
174 forcefilename: Force printing final filename.
525ef922 175 forceduration: Force printing duration.
8694c600 176 forcejson: Force printing info_dict as JSON.
63e0be34
PH
177 dump_single_json: Force printing the info_dict of the whole playlist
178 (or video) as a single JSON line.
c25228e5 179 force_write_download_archive: Force writing download archive regardless
180 of 'skip_download' or 'simulate'.
8222d8de 181 simulate: Do not download the video files.
eb8a4433 182 format: Video format code. see "FORMAT SELECTION" for more details.
63ad4d43 183 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
c25228e5 184 format_sort: How to sort the video formats. see "Sorting Formats"
185 for more details.
186 format_sort_force: Force the given format_sort. see "Sorting Formats"
187 for more details.
188 allow_multiple_video_streams: Allow multiple video streams to be merged
189 into a single file
190 allow_multiple_audio_streams: Allow multiple audio streams to be merged
191 into a single file
4524baf0 192 paths: Dictionary of output paths. The allowed keys are 'home'
193 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 194 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 195 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
196 A string a also accepted for backward compatibility
a820dc72
RA
197 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
198 restrictfilenames: Do not allow "&" and spaces in file names
199 trim_file_name: Limit length of filename (extension excluded)
4524baf0 200 windowsfilenames: Force the filenames to be windows compatible
a820dc72 201 ignoreerrors: Do not stop on download errors
7a5c1cfe 202 (Default True when running yt-dlp,
a820dc72 203 but False when directly accessing YoutubeDL class)
d22dec74 204 force_generic_extractor: Force downloader to use the generic extractor
0c3d0f51 205 overwrites: Overwrite all video and metadata files if True,
206 overwrite only non-video files if None
207 and don't overwrite any file if False
8222d8de
JMF
208 playliststart: Playlist item to start at.
209 playlistend: Playlist item to end at.
c14e88f0 210 playlist_items: Specific indices of playlist to download.
ff815fe6 211 playlistreverse: Download playlist items in reverse order.
75822ca7 212 playlistrandom: Download playlist items in random order.
8222d8de
JMF
213 matchtitle: Download only matching titles.
214 rejecttitle: Reject downloads for matching titles.
8bf9319e 215 logger: Log messages to a logging.Logger instance.
8222d8de
JMF
216 logtostderr: Log messages to stderr instead of stdout.
217 writedescription: Write the video description to a .description file
218 writeinfojson: Write the video description to a .info.json file
06167fbb 219 writecomments: Extract video comments. This will not be written to disk
220 unless writeinfojson is also given
1fb07d10 221 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 222 writethumbnail: Write the thumbnail image to a file
c25228e5 223 allow_playlist_files: Whether to write playlists' description, infojson etc
224 also to disk when using the 'write*' options
ec82d85a 225 write_all_thumbnails: Write all thumbnail formats to files
732044af 226 writelink: Write an internet shortcut file, depending on the
227 current platform (.url/.webloc/.desktop)
228 writeurllink: Write a Windows internet shortcut file (.url)
229 writewebloclink: Write a macOS internet shortcut file (.webloc)
230 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 231 writesubtitles: Write the video subtitles to a file
741dd8ea 232 writeautomaticsub: Write the automatically generated subtitles to a file
8222d8de 233 allsubtitles: Downloads all the subtitles of the video
0b7f3118 234 (requires writesubtitles or writeautomaticsub)
8222d8de 235 listsubtitles: Lists all available subtitles for the video
a504ced0 236 subtitlesformat: The format code for subtitles
aa6a10c4 237 subtitleslangs: List of languages of the subtitles to download
8222d8de
JMF
238 keepvideo: Keep the video file after post-processing
239 daterange: A DateRange object, download only if the upload_date is in the range.
240 skip_download: Skip the actual download of the video file
c35f9e72 241 cachedir: Location of the cache files in the filesystem.
a0e07d31 242 False to disable filesystem cache.
47192f92 243 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
244 age_limit: An integer representing the user's age in years.
245 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
246 min_views: An integer representing the minimum view count the video
247 must have in order to not be skipped.
248 Videos without view count information are always
249 downloaded. None for no limit.
250 max_views: An integer representing the maximum view count.
251 Videos that are more popular than that are not
252 downloaded.
253 Videos without view count information are always
254 downloaded. None for no limit.
255 download_archive: File name of a file where all downloads are recorded.
c1c9a79c
PH
256 Videos already present in the file are not downloaded
257 again.
8a51f564 258 break_on_existing: Stop the download process after attempting to download a
259 file that is in the archive.
260 break_on_reject: Stop the download process when encountering a video that
261 has been filtered out.
262 cookiefile: File name where cookies should be read from and dumped to
a1ee09e8 263 nocheckcertificate:Do not verify SSL certificates
7e8c0af0
PH
264 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
265 At the moment, this is only supported by YouTube.
a1ee09e8 266 proxy: URL of the proxy server to use
38cce791 267 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 268 on geo-restricted sites.
e344693b 269 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
270 bidi_workaround: Work around buggy terminals without bidirectional text
271 support, using fridibi
a0ddb8a2 272 debug_printtraffic:Print out sent and received HTTP traffic
7b0817e8 273 include_ads: Download ads as well
04b4d394
PH
274 default_search: Prepend this string if an input url is not valid.
275 'auto' for elaborate guessing
62fec3b2 276 encoding: Use this encoding instead of the system-specified.
e8ee972c 277 extract_flat: Do not resolve URLs, return the immediate result.
057a5206
PH
278 Pass in 'in_playlist' to only show this behavior for
279 playlist items.
4f026faf 280 postprocessors: A list of dictionaries, each with an entry
71b640cc 281 * key: The name of the postprocessor. See
7a5c1cfe 282 yt_dlp/postprocessor/__init__.py for a list.
0202b52a 283 * _after_move: Optional. If True, run this post_processor
284 after 'MoveFilesAfterDownload'
4f026faf
PH
285 as well as any further keyword arguments for the
286 postprocessor.
ab8e5e51
AM
287 post_hooks: A list of functions that get called as the final step
288 for each video file, after all postprocessors have been
289 called. The filename will be passed as the only argument.
71b640cc
PH
290 progress_hooks: A list of functions that get called on download
291 progress, with a dictionary with the entries
5cda4eda 292 * status: One of "downloading", "error", or "finished".
ee69b99a 293 Check this first and ignore unknown values.
71b640cc 294
5cda4eda 295 If status is one of "downloading", or "finished", the
ee69b99a
PH
296 following properties may also be present:
297 * filename: The final filename (always present)
5cda4eda 298 * tmpfilename: The filename we're currently writing to
71b640cc
PH
299 * downloaded_bytes: Bytes on disk
300 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
301 * total_bytes_estimate: Guess of the eventual file size,
302 None if unavailable.
303 * elapsed: The number of seconds since download started.
71b640cc
PH
304 * eta: The estimated time in seconds, None if unknown
305 * speed: The download speed in bytes/second, None if
306 unknown
5cda4eda
PH
307 * fragment_index: The counter of the currently
308 downloaded video fragment.
309 * fragment_count: The number of fragments (= individual
310 files that will be merged)
71b640cc
PH
311
312 Progress hooks are guaranteed to be called at least once
313 (with status "finished") if the download is successful.
45598f15 314 merge_output_format: Extension to use when merging formats.
6b591b29 315 final_ext: Expected final extension; used to detect when the file was
316 already downloaded and converted. "merge_output_format" is
317 replaced by this extension when given
6271f1ca
PH
318 fixup: Automatically correct known faults of the file.
319 One of:
320 - "never": do nothing
321 - "warn": only emit a warning
322 - "detect_or_warn": check whether we can do anything
62cd676c 323 about it, warn otherwise (default)
504f20dd 324 source_address: Client-side IP address to bind to.
6ec6cb4e 325 call_home: Boolean, true iff we are allowed to contact the
7a5c1cfe 326 yt-dlp servers for debugging. (BROKEN)
1cf376f5 327 sleep_interval_requests: Number of seconds to sleep between requests
328 during extraction
7aa589a5
S
329 sleep_interval: Number of seconds to sleep before each download when
330 used alone or a lower bound of a range for randomized
331 sleep before each download (minimum possible number
332 of seconds to sleep) when used along with
333 max_sleep_interval.
334 max_sleep_interval:Upper bound of a range for randomized sleep before each
335 download (maximum possible number of seconds to sleep).
336 Must only be used along with sleep_interval.
337 Actual sleep time will be a random float from range
338 [sleep_interval; max_sleep_interval].
1cf376f5 339 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
340 listformats: Print an overview of available video formats and exit.
341 list_thumbnails: Print a table of all thumbnails and exit.
347de493
PH
342 match_filter: A function that gets called with the info_dict of
343 every video.
344 If it returns a message, the video is ignored.
345 If it returns None, the video is downloaded.
346 match_filter_func in utils.py is one example for this.
7e5db8c9 347 no_color: Do not emit color codes in output.
0a840f58 348 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 349 HTTP header
0a840f58 350 geo_bypass_country:
773f291d
S
351 Two-letter ISO 3166-2 country code that will be used for
352 explicit geographic restriction bypassing via faking
504f20dd 353 X-Forwarded-For HTTP header
5f95927a
S
354 geo_bypass_ip_block:
355 IP range in CIDR notation that will be used similarly to
504f20dd 356 geo_bypass_country
71b640cc 357
85729c51
PH
358 The following options determine which downloader is picked:
359 external_downloader: Executable of the external downloader to call.
360 None or unset for standard (built-in) downloader.
bf09af3a
S
361 hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
362 if True, otherwise use ffmpeg/avconv if False, otherwise
363 use downloader suggested by extractor if None.
fe7e0c98 364
8222d8de 365 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 366 the downloader (see yt_dlp/downloader/common.py):
8222d8de 367 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
881e6a1f 368 noresizebuffer, retries, continuedl, noprogress, consoletitle,
b54d4a5c 369 xattr_set_filesize, external_downloader_args, hls_use_mpegts,
e409895f 370 http_chunk_size.
76b1bd67
JMF
371
372 The following options are used by the post processors:
d4a24f40 373 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
e4172ac9 374 otherwise prefer ffmpeg. (avconv support is deprecated)
c0b7d117
S
375 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
376 to the binary or its containing directory.
43820c03 377 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
378 and a list of additional command-line arguments for the
379 postprocessor/executable. The dict can also have "PP+EXE" keys
380 which are used when the given exe is used by the given PP.
381 Use 'default' as the name for arguments to passed to all PP
e409895f 382
383 The following options are used by the extractors:
62bff2c1 384 extractor_retries: Number of times to retry for known errors
385 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 386 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 387 discontinuities such as ad breaks (default: False)
3600fd59 388 youtube_include_dash_manifest: If True (default), DASH manifests and related
62bff2c1 389 data will be downloaded and processed by extractor.
390 You can reduce network I/O by disabling it if you don't
391 care about DASH. (only for youtube)
e409895f 392 youtube_include_hls_manifest: If True (default), HLS manifests and related
62bff2c1 393 data will be downloaded and processed by extractor.
394 You can reduce network I/O by disabling it if you don't
395 care about HLS. (only for youtube)
8222d8de
JMF
396 """
397
c9969434
S
398 _NUMERIC_FIELDS = set((
399 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
400 'timestamp', 'upload_year', 'upload_month', 'upload_day',
401 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
402 'average_rating', 'comment_count', 'age_limit',
403 'start_time', 'end_time',
404 'chapter_number', 'season_number', 'episode_number',
405 'track_number', 'disc_number', 'release_year',
406 'playlist_index',
407 ))
408
8222d8de
JMF
409 params = None
410 _ies = []
5bfa4862 411 _pps = {'beforedl': [], 'aftermove': [], 'normal': []}
0202b52a 412 __prepare_filename_warned = False
1cf376f5 413 _first_webpage_request = True
8222d8de
JMF
414 _download_retcode = None
415 _num_downloads = None
30a074c2 416 _playlist_level = 0
417 _playlist_urls = set()
8222d8de
JMF
418 _screen_file = None
419
3511266b 420 def __init__(self, params=None, auto_init=True):
8222d8de 421 """Create a FileDownloader object with the given options."""
e9f9a10f
JMF
422 if params is None:
423 params = {}
8222d8de 424 self._ies = []
56c73665 425 self._ies_instances = {}
5bfa4862 426 self._pps = {'beforedl': [], 'aftermove': [], 'normal': []}
0202b52a 427 self.__prepare_filename_warned = False
1cf376f5 428 self._first_webpage_request = True
ab8e5e51 429 self._post_hooks = []
933605d7 430 self._progress_hooks = []
8222d8de
JMF
431 self._download_retcode = 0
432 self._num_downloads = 0
433 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
0783b09b 434 self._err_file = sys.stderr
4abf617b
S
435 self.params = {
436 # Default parameters
437 'nocheckcertificate': False,
438 }
439 self.params.update(params)
a0e07d31 440 self.cache = Cache(self)
a45e8619 441 self.archive = set()
ecdec191
JB
442
443 """Preload the archive, if any is specified"""
444 def preload_download_archive(self):
445 fn = self.params.get('download_archive')
446 if fn is None:
447 return False
448 try:
449 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
450 for line in archive_file:
a45e8619 451 self.archive.add(line.strip())
ecdec191
JB
452 except IOError as ioe:
453 if ioe.errno != errno.ENOENT:
454 raise
1d74d8d9 455 return False
ecdec191 456 return True
34308b30 457
be5df5ee
S
458 def check_deprecated(param, option, suggestion):
459 if self.params.get(param) is not None:
460 self.report_warning(
461 '%s is deprecated. Use %s instead.' % (option, suggestion))
462 return True
463 return False
464
1de7ea76
JB
465 if self.params.get('verbose'):
466 self.to_stdout('[debug] Loading archive file %r' % self.params.get('download_archive'))
467
ecdec191
JB
468 preload_download_archive(self)
469
be5df5ee 470 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
471 if self.params.get('geo_verification_proxy') is None:
472 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
473
6b591b29 474 if self.params.get('final_ext'):
475 if self.params.get('merge_output_format'):
476 self.report_warning('--merge-output-format will be ignored since --remux-video or --recode-video is given')
477 self.params['merge_output_format'] = self.params['final_ext']
478
b9d973be 479 if 'overwrites' in self.params and self.params['overwrites'] is None:
480 del self.params['overwrites']
481
be5df5ee
S
482 check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
483 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
484 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
485
0783b09b 486 if params.get('bidi_workaround', False):
1c088fa8
PH
487 try:
488 import pty
489 master, slave = pty.openpty()
003c69a8 490 width = compat_get_terminal_size().columns
1c088fa8
PH
491 if width is None:
492 width_args = []
493 else:
494 width_args = ['-w', str(width)]
5d681e96 495 sp_kwargs = dict(
1c088fa8
PH
496 stdin=subprocess.PIPE,
497 stdout=slave,
498 stderr=self._err_file)
5d681e96
PH
499 try:
500 self._output_process = subprocess.Popen(
501 ['bidiv'] + width_args, **sp_kwargs
502 )
503 except OSError:
5d681e96
PH
504 self._output_process = subprocess.Popen(
505 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
506 self._output_channel = os.fdopen(master, 'rb')
1c088fa8 507 except OSError as ose:
66e7ace1 508 if ose.errno == errno.ENOENT:
6febd1c1 509 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
1c088fa8
PH
510 else:
511 raise
0783b09b 512
3089bc74
S
513 if (sys.platform != 'win32'
514 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
515 and not params.get('restrictfilenames', False)):
e9137224 516 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 517 self.report_warning(
6febd1c1 518 'Assuming --restrict-filenames since file system encoding '
1b725173 519 'cannot encode all characters. '
6febd1c1 520 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 521 self.params['restrictfilenames'] = True
34308b30 522
de6000d9 523 self.outtmpl_dict = self.parse_outtmpl()
486dd09e 524
dca08720
PH
525 self._setup_opener()
526
3511266b
PH
527 if auto_init:
528 self.print_debug_header()
529 self.add_default_info_extractors()
530
4f026faf
PH
531 for pp_def_raw in self.params.get('postprocessors', []):
532 pp_class = get_postprocessor(pp_def_raw['key'])
533 pp_def = dict(pp_def_raw)
534 del pp_def['key']
5bfa4862 535 if 'when' in pp_def:
536 when = pp_def['when']
537 del pp_def['when']
538 else:
539 when = 'normal'
4f026faf 540 pp = pp_class(self, **compat_kwargs(pp_def))
5bfa4862 541 self.add_post_processor(pp, when=when)
4f026faf 542
ab8e5e51
AM
543 for ph in self.params.get('post_hooks', []):
544 self.add_post_hook(ph)
545
71b640cc
PH
546 for ph in self.params.get('progress_hooks', []):
547 self.add_progress_hook(ph)
548
51fb4995
YCH
549 register_socks_protocols()
550
7d4111ed
PH
551 def warn_if_short_id(self, argv):
552 # short YouTube ID starting with dash?
553 idxs = [
554 i for i, a in enumerate(argv)
555 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
556 if idxs:
557 correct_argv = (
7a5c1cfe 558 ['yt-dlp']
3089bc74
S
559 + [a for i, a in enumerate(argv) if i not in idxs]
560 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
561 )
562 self.report_warning(
563 'Long argument string detected. '
564 'Use -- to separate parameters and URLs, like this:\n%s\n' %
565 args_to_str(correct_argv))
566
8222d8de
JMF
567 def add_info_extractor(self, ie):
568 """Add an InfoExtractor object to the end of the list."""
569 self._ies.append(ie)
e52d7f85
JMF
570 if not isinstance(ie, type):
571 self._ies_instances[ie.ie_key()] = ie
572 ie.set_downloader(self)
8222d8de 573
56c73665
JMF
574 def get_info_extractor(self, ie_key):
575 """
576 Get an instance of an IE with name ie_key, it will try to get one from
577 the _ies list, if there's no instance it will create a new one and add
578 it to the extractor list.
579 """
580 ie = self._ies_instances.get(ie_key)
581 if ie is None:
582 ie = get_info_extractor(ie_key)()
583 self.add_info_extractor(ie)
584 return ie
585
023fa8c4
JMF
586 def add_default_info_extractors(self):
587 """
588 Add the InfoExtractors returned by gen_extractors to the end of the list
589 """
e52d7f85 590 for ie in gen_extractor_classes():
023fa8c4
JMF
591 self.add_info_extractor(ie)
592
5bfa4862 593 def add_post_processor(self, pp, when='normal'):
8222d8de 594 """Add a PostProcessor object to the end of the chain."""
5bfa4862 595 self._pps[when].append(pp)
8222d8de
JMF
596 pp.set_downloader(self)
597
ab8e5e51
AM
598 def add_post_hook(self, ph):
599 """Add the post hook"""
600 self._post_hooks.append(ph)
601
933605d7
JMF
602 def add_progress_hook(self, ph):
603 """Add the progress hook (currently only for the file downloader)"""
604 self._progress_hooks.append(ph)
8ab470f1 605
1c088fa8 606 def _bidi_workaround(self, message):
5d681e96 607 if not hasattr(self, '_output_channel'):
1c088fa8
PH
608 return message
609
5d681e96 610 assert hasattr(self, '_output_process')
11b85ce6 611 assert isinstance(message, compat_str)
6febd1c1
PH
612 line_count = message.count('\n') + 1
613 self._output_process.stdin.write((message + '\n').encode('utf-8'))
5d681e96 614 self._output_process.stdin.flush()
6febd1c1 615 res = ''.join(self._output_channel.readline().decode('utf-8')
9e1a5b84 616 for _ in range(line_count))
6febd1c1 617 return res[:-len('\n')]
1c088fa8 618
8222d8de 619 def to_screen(self, message, skip_eol=False):
0783b09b
PH
620 """Print message to stdout if not in quiet mode."""
621 return self.to_stdout(message, skip_eol, check_quiet=True)
622
734f90bb 623 def _write_string(self, s, out=None):
b58ddb32 624 write_string(s, out=out, encoding=self.params.get('encoding'))
734f90bb 625
0783b09b 626 def to_stdout(self, message, skip_eol=False, check_quiet=False):
8222d8de 627 """Print message to stdout if not in quiet mode."""
8bf9319e 628 if self.params.get('logger'):
43afe285 629 self.params['logger'].debug(message)
0783b09b 630 elif not check_quiet or not self.params.get('quiet', False):
1c088fa8 631 message = self._bidi_workaround(message)
6febd1c1 632 terminator = ['\n', ''][skip_eol]
8222d8de 633 output = message + terminator
1c088fa8 634
734f90bb 635 self._write_string(output, self._screen_file)
8222d8de
JMF
636
637 def to_stderr(self, message):
638 """Print message to stderr."""
11b85ce6 639 assert isinstance(message, compat_str)
8bf9319e 640 if self.params.get('logger'):
43afe285
IB
641 self.params['logger'].error(message)
642 else:
1c088fa8 643 message = self._bidi_workaround(message)
6febd1c1 644 output = message + '\n'
734f90bb 645 self._write_string(output, self._err_file)
8222d8de 646
1e5b9a95
PH
647 def to_console_title(self, message):
648 if not self.params.get('consoletitle', False):
649 return
4bede0d8
C
650 if compat_os_name == 'nt':
651 if ctypes.windll.kernel32.GetConsoleWindow():
652 # c_wchar_p() might not be necessary if `message` is
653 # already of type unicode()
654 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
1e5b9a95 655 elif 'TERM' in os.environ:
b46696bd 656 self._write_string('\033]0;%s\007' % message, self._screen_file)
1e5b9a95 657
bdde425c
PH
658 def save_console_title(self):
659 if not self.params.get('consoletitle', False):
660 return
94c3442e
S
661 if self.params.get('simulate', False):
662 return
4bede0d8 663 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 664 # Save the title on stack
734f90bb 665 self._write_string('\033[22;0t', self._screen_file)
bdde425c
PH
666
667 def restore_console_title(self):
668 if not self.params.get('consoletitle', False):
669 return
94c3442e
S
670 if self.params.get('simulate', False):
671 return
4bede0d8 672 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 673 # Restore the title from stack
734f90bb 674 self._write_string('\033[23;0t', self._screen_file)
bdde425c
PH
675
676 def __enter__(self):
677 self.save_console_title()
678 return self
679
680 def __exit__(self, *args):
681 self.restore_console_title()
f89197d7 682
dca08720 683 if self.params.get('cookiefile') is not None:
1bab3437 684 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 685
8222d8de
JMF
686 def trouble(self, message=None, tb=None):
687 """Determine action to take when a download problem appears.
688
689 Depending on if the downloader has been configured to ignore
690 download errors or not, this method may throw an exception or
691 not when errors are found, after printing the message.
692
693 tb, if given, is additional traceback information.
694 """
695 if message is not None:
696 self.to_stderr(message)
697 if self.params.get('verbose'):
698 if tb is None:
699 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 700 tb = ''
8222d8de 701 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 702 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 703 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
704 else:
705 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 706 tb = ''.join(tb_data)
8222d8de
JMF
707 self.to_stderr(tb)
708 if not self.params.get('ignoreerrors', False):
709 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
710 exc_info = sys.exc_info()[1].exc_info
711 else:
712 exc_info = sys.exc_info()
713 raise DownloadError(message, exc_info)
714 self._download_retcode = 1
715
716 def report_warning(self, message):
717 '''
718 Print the message to stderr, it will be prefixed with 'WARNING:'
719 If stderr is a tty file the 'WARNING:' will be colored
720 '''
6d07ce01
JMF
721 if self.params.get('logger') is not None:
722 self.params['logger'].warning(message)
8222d8de 723 else:
ad8915b7
PH
724 if self.params.get('no_warnings'):
725 return
e9c0cdd3 726 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6d07ce01
JMF
727 _msg_header = '\033[0;33mWARNING:\033[0m'
728 else:
729 _msg_header = 'WARNING:'
730 warning_message = '%s %s' % (_msg_header, message)
731 self.to_stderr(warning_message)
8222d8de
JMF
732
733 def report_error(self, message, tb=None):
734 '''
735 Do the same as trouble, but prefixes the message with 'ERROR:', colored
736 in red if stderr is a tty file.
737 '''
e9c0cdd3 738 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6febd1c1 739 _msg_header = '\033[0;31mERROR:\033[0m'
8222d8de 740 else:
6febd1c1
PH
741 _msg_header = 'ERROR:'
742 error_message = '%s %s' % (_msg_header, message)
8222d8de
JMF
743 self.trouble(error_message, tb)
744
8222d8de
JMF
745 def report_file_already_downloaded(self, file_name):
746 """Report file has already been fully downloaded."""
747 try:
6febd1c1 748 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 749 except UnicodeEncodeError:
6febd1c1 750 self.to_screen('[download] The file has already been downloaded')
8222d8de 751
0c3d0f51 752 def report_file_delete(self, file_name):
753 """Report that existing file will be deleted."""
754 try:
c25228e5 755 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 756 except UnicodeEncodeError:
c25228e5 757 self.to_screen('Deleting existing file')
0c3d0f51 758
de6000d9 759 def parse_outtmpl(self):
760 outtmpl_dict = self.params.get('outtmpl', {})
761 if not isinstance(outtmpl_dict, dict):
762 outtmpl_dict = {'default': outtmpl_dict}
763 outtmpl_dict.update({
764 k: v for k, v in DEFAULT_OUTTMPL.items()
765 if not outtmpl_dict.get(k)})
766 for key, val in outtmpl_dict.items():
767 if isinstance(val, bytes):
768 self.report_warning(
769 'Parameter outtmpl is bytes, but should be a unicode string. '
770 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
771 return outtmpl_dict
772
773 def _prepare_filename(self, info_dict, tmpl_type='default'):
8222d8de
JMF
774 try:
775 template_dict = dict(info_dict)
776
e29663c6 777 template_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
778 formatSeconds(info_dict['duration'], '-')
779 if info_dict.get('duration', None) is not None
780 else None)
781
8222d8de
JMF
782 template_dict['epoch'] = int(time.time())
783 autonumber_size = self.params.get('autonumber_size')
784 if autonumber_size is None:
785 autonumber_size = 5
89db639d 786 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
17b75c0d
PH
787 if template_dict.get('resolution') is None:
788 if template_dict.get('width') and template_dict.get('height'):
789 template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
790 elif template_dict.get('height'):
805ef3c6 791 template_dict['resolution'] = '%sp' % template_dict['height']
17b75c0d 792 elif template_dict.get('width'):
51ce9117 793 template_dict['resolution'] = '%dx?' % template_dict['width']
8222d8de 794
586a91b6 795 sanitize = lambda k, v: sanitize_filename(
45598aab 796 compat_str(v),
1bb5c511 797 restricted=self.params.get('restrictfilenames'),
40df485f 798 is_id=(k == 'id' or k.endswith('_id')))
d0d9ade4 799 template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
45598aab 800 for k, v in template_dict.items()
f0e14fdd 801 if v is not None and not isinstance(v, (list, tuple, dict)))
e29663c6 802 na = self.params.get('outtmpl_na_placeholder', 'NA')
803 template_dict = collections.defaultdict(lambda: na, template_dict)
8222d8de 804
de6000d9 805 outtmpl = self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default'])
806 force_ext = OUTTMPL_TYPES.get(tmpl_type)
d0d9ade4 807
89db639d
S
808 # For fields playlist_index and autonumber convert all occurrences
809 # of %(field)s to %(field)0Nd for backward compatibility
810 field_size_compat_map = {
811 'playlist_index': len(str(template_dict['n_entries'])),
812 'autonumber': autonumber_size,
813 }
814 FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
815 mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
816 if mobj:
817 outtmpl = re.sub(
818 FIELD_SIZE_COMPAT_RE,
819 r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
820 outtmpl)
821
e29663c6 822 # As of [1] format syntax is:
823 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
824 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
825 FORMAT_RE = r'''(?x)
826 (?<!%)
827 %
828 \({0}\) # mapping key
829 (?:[#0\-+ ]+)? # conversion flags (optional)
830 (?:\d+)? # minimum field width (optional)
831 (?:\.\d+)? # precision (optional)
832 [hlL]? # length modifier (optional)
833 (?P<type>[diouxXeEfFgGcrs%]) # conversion type
834 '''
835
836 numeric_fields = list(self._NUMERIC_FIELDS)
837
838 # Format date
839 FORMAT_DATE_RE = FORMAT_RE.format(r'(?P<key>(?P<field>\w+)>(?P<format>.+?))')
840 for mobj in re.finditer(FORMAT_DATE_RE, outtmpl):
841 conv_type, field, frmt, key = mobj.group('type', 'field', 'format', 'key')
842 if key in template_dict:
843 continue
844 value = strftime_or_none(template_dict.get(field), frmt, na)
845 if conv_type in 'crs': # string
846 value = sanitize(field, value)
847 else: # number
848 numeric_fields.append(key)
849 value = float_or_none(value, default=None)
850 if value is not None:
851 template_dict[key] = value
852
d0d9ade4
S
853 # Missing numeric fields used together with integer presentation types
854 # in format specification will break the argument substitution since
a820dc72
RA
855 # string NA placeholder is returned for missing fields. We will patch
856 # output template for missing fields to meet string presentation type.
e29663c6 857 for numeric_field in numeric_fields:
d0d9ade4 858 if numeric_field not in template_dict:
d0d9ade4 859 outtmpl = re.sub(
e29663c6 860 FORMAT_RE.format(re.escape(numeric_field)),
d0d9ade4
S
861 r'%({0})s'.format(numeric_field), outtmpl)
862
15da37c7
S
863 # expand_path translates '%%' into '%' and '$$' into '$'
864 # correspondingly that is not what we want since we need to keep
865 # '%%' intact for template dict substitution step. Working around
866 # with boundary-alike separator hack.
961ea474 867 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
15da37c7
S
868 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
869
870 # outtmpl should be expand_path'ed before template dict substitution
871 # because meta fields may contain env variables we don't want to
872 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
873 # title "Hello $PATH", we don't want `$PATH` to be expanded.
874 filename = expand_path(outtmpl).replace(sep, '') % template_dict
875
de6000d9 876 if force_ext is not None:
877 filename = replace_extension(filename, force_ext, template_dict.get('ext'))
878
bdc3fd2f
U
879 # https://github.com/blackjack4494/youtube-dlc/issues/85
880 trim_file_name = self.params.get('trim_file_name', False)
881 if trim_file_name:
882 fn_groups = filename.rsplit('.')
883 ext = fn_groups[-1]
884 sub_ext = ''
885 if len(fn_groups) > 2:
886 sub_ext = fn_groups[-2]
887 filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
888
0202b52a 889 return filename
8222d8de 890 except ValueError as err:
6febd1c1 891 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
892 return None
893
de6000d9 894 def prepare_filename(self, info_dict, dir_type='', warn=False):
895 """Generate the output filename."""
0202b52a 896 paths = self.params.get('paths', {})
897 assert isinstance(paths, dict)
de6000d9 898 filename = self._prepare_filename(info_dict, dir_type or 'default')
899
900 if warn and not self.__prepare_filename_warned:
901 if not paths:
902 pass
903 elif filename == '-':
904 self.report_warning('--paths is ignored when an outputting to stdout')
905 elif os.path.isabs(filename):
906 self.report_warning('--paths is ignored since an absolute path is given in output template')
907 self.__prepare_filename_warned = True
908 if filename == '-' or not filename:
909 return filename
910
0202b52a 911 homepath = expand_path(paths.get('home', '').strip())
912 assert isinstance(homepath, compat_str)
913 subdir = expand_path(paths.get(dir_type, '').strip()) if dir_type else ''
914 assert isinstance(subdir, compat_str)
c2934512 915 path = os.path.join(homepath, subdir, filename)
916
917 # Temporary fix for #4787
918 # 'Treat' all problem characters by passing filename through preferredencoding
919 # to workaround encoding issues with subprocess on python2 @ Windows
920 if sys.version_info < (3, 0) and sys.platform == 'win32':
921 path = encodeFilename(path, True).decode(preferredencoding())
922 return sanitize_path(path, force=self.params.get('windowsfilenames'))
0202b52a 923
442c37b7 924 def _match_entry(self, info_dict, incomplete):
ecdec191 925 """ Returns None if the file should be downloaded """
8222d8de 926
8b0d7497 927 def check_filter():
928 video_title = info_dict.get('title', info_dict.get('id', 'video'))
929 if 'title' in info_dict:
930 # This can happen when we're just evaluating the playlist
931 title = info_dict['title']
932 matchtitle = self.params.get('matchtitle', False)
933 if matchtitle:
934 if not re.search(matchtitle, title, re.IGNORECASE):
935 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
936 rejecttitle = self.params.get('rejecttitle', False)
937 if rejecttitle:
938 if re.search(rejecttitle, title, re.IGNORECASE):
939 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
940 date = info_dict.get('upload_date')
941 if date is not None:
942 dateRange = self.params.get('daterange', DateRange())
943 if date not in dateRange:
944 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
945 view_count = info_dict.get('view_count')
946 if view_count is not None:
947 min_views = self.params.get('min_views')
948 if min_views is not None and view_count < min_views:
949 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
950 max_views = self.params.get('max_views')
951 if max_views is not None and view_count > max_views:
952 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
953 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
954 return 'Skipping "%s" because it is age restricted' % video_title
955 if self.in_download_archive(info_dict):
956 return '%s has already been recorded in archive' % video_title
957
958 if not incomplete:
959 match_filter = self.params.get('match_filter')
960 if match_filter is not None:
961 ret = match_filter(info_dict)
962 if ret is not None:
963 return ret
964 return None
965
966 reason = check_filter()
967 if reason is not None:
968 self.to_screen('[download] ' + reason)
d83cb531 969 if reason.endswith('has already been recorded in the archive') and self.params.get('break_on_existing', False):
8b0d7497 970 raise ExistingVideoReached()
d83cb531 971 elif self.params.get('break_on_reject', False):
8b0d7497 972 raise RejectedVideoReached()
973 return reason
fe7e0c98 974
b6c45014
JMF
975 @staticmethod
976 def add_extra_info(info_dict, extra_info):
977 '''Set the keys from extra_info in info dict if they are missing'''
978 for key, value in extra_info.items():
979 info_dict.setdefault(key, value)
980
0704d222 981 def extract_info(self, url, download=True, ie_key=None, info_dict=None, extra_info={},
61aa5ba3 982 process=True, force_generic_extractor=False):
8222d8de
JMF
983 '''
984 Returns a list with a dictionary for each video we find.
985 If 'download', also downloads the videos.
986 extra_info is a dict containing the extra values to add to each result
613b2d9d 987 '''
fe7e0c98 988
61aa5ba3 989 if not ie_key and force_generic_extractor:
d22dec74
S
990 ie_key = 'Generic'
991
8222d8de 992 if ie_key:
56c73665 993 ies = [self.get_info_extractor(ie_key)]
8222d8de
JMF
994 else:
995 ies = self._ies
996
997 for ie in ies:
998 if not ie.suitable(url):
999 continue
1000
9a68de12 1001 ie_key = ie.ie_key()
1002 ie = self.get_info_extractor(ie_key)
8222d8de 1003 if not ie.working():
6febd1c1
PH
1004 self.report_warning('The program functionality for this site has been marked as broken, '
1005 'and will probably not work.')
8222d8de
JMF
1006
1007 try:
d0757229 1008 temp_id = str_or_none(
63be1aab 1009 ie.extract_id(url) if callable(getattr(ie, 'extract_id', None))
1010 else ie._match_id(url))
a0566bbf 1011 except (AssertionError, IndexError, AttributeError):
1012 temp_id = None
1013 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1014 self.to_screen("[%s] %s: has already been recorded in archive" % (
1015 ie_key, temp_id))
1016 break
a0566bbf 1017 return self.__extract_info(url, ie, download, extra_info, process, info_dict)
a0566bbf 1018 else:
1019 self.report_error('no suitable InfoExtractor for URL %s' % url)
1020
1021 def __handle_extraction_exceptions(func):
1022 def wrapper(self, *args, **kwargs):
1023 try:
1024 return func(self, *args, **kwargs)
773f291d
S
1025 except GeoRestrictedError as e:
1026 msg = e.msg
1027 if e.countries:
1028 msg += '\nThis video is available in %s.' % ', '.join(
1029 map(ISO3166Utils.short2full, e.countries))
1030 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1031 self.report_error(msg)
fb043a6e 1032 except ExtractorError as e: # An error we somewhat expected
2c74e6fa 1033 self.report_error(compat_str(e), e.format_traceback())
8b0d7497 1034 except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
d3e5bbf4 1035 raise
8222d8de
JMF
1036 except Exception as e:
1037 if self.params.get('ignoreerrors', False):
9b9c5355 1038 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
8222d8de
JMF
1039 else:
1040 raise
a0566bbf 1041 return wrapper
1042
1043 @__handle_extraction_exceptions
1044 def __extract_info(self, url, ie, download, extra_info, process, info_dict):
1045 ie_result = ie.extract(url)
1046 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1047 return
1048 if isinstance(ie_result, list):
1049 # Backwards compatibility: old IE result format
1050 ie_result = {
1051 '_type': 'compat_list',
1052 'entries': ie_result,
1053 }
1054 if info_dict:
1055 if info_dict.get('id'):
1056 ie_result['id'] = info_dict['id']
1057 if info_dict.get('title'):
1058 ie_result['title'] = info_dict['title']
1059 self.add_default_extra_info(ie_result, ie, url)
1060 if process:
1061 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1062 else:
a0566bbf 1063 return ie_result
fe7e0c98 1064
ea38e55f
PH
1065 def add_default_extra_info(self, ie_result, ie, url):
1066 self.add_extra_info(ie_result, {
1067 'extractor': ie.IE_NAME,
1068 'webpage_url': url,
1069 'webpage_url_basename': url_basename(url),
1070 'extractor_key': ie.ie_key(),
1071 })
1072
8222d8de
JMF
1073 def process_ie_result(self, ie_result, download=True, extra_info={}):
1074 """
1075 Take the result of the ie(may be modified) and resolve all unresolved
1076 references (URLs, playlist items).
1077
1078 It will also download the videos if 'download'.
1079 Returns the resolved ie_result.
1080 """
e8ee972c
PH
1081 result_type = ie_result.get('_type', 'video')
1082
057a5206 1083 if result_type in ('url', 'url_transparent'):
134c6ea8 1084 ie_result['url'] = sanitize_url(ie_result['url'])
057a5206 1085 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1086 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1087 or extract_flat is True):
de6000d9 1088 self.__forced_printings(ie_result, self.prepare_filename(ie_result), incomplete=True)
e8ee972c
PH
1089 return ie_result
1090
8222d8de 1091 if result_type == 'video':
b6c45014 1092 self.add_extra_info(ie_result, extra_info)
feee2ecf 1093 return self.process_video_result(ie_result, download=download)
8222d8de
JMF
1094 elif result_type == 'url':
1095 # We have to add extra_info to the results because it may be
1096 # contained in a playlist
1097 return self.extract_info(ie_result['url'],
0704d222 1098 download, info_dict=ie_result,
8222d8de
JMF
1099 ie_key=ie_result.get('ie_key'),
1100 extra_info=extra_info)
7fc3fa05
PH
1101 elif result_type == 'url_transparent':
1102 # Use the information from the embedding page
1103 info = self.extract_info(
1104 ie_result['url'], ie_key=ie_result.get('ie_key'),
1105 extra_info=extra_info, download=False, process=False)
1106
1640eb09
S
1107 # extract_info may return None when ignoreerrors is enabled and
1108 # extraction failed with an error, don't crash and return early
1109 # in this case
1110 if not info:
1111 return info
1112
412c617d
PH
1113 force_properties = dict(
1114 (k, v) for k, v in ie_result.items() if v is not None)
0396806f 1115 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
412c617d
PH
1116 if f in force_properties:
1117 del force_properties[f]
1118 new_result = info.copy()
1119 new_result.update(force_properties)
7fc3fa05 1120
0563f7ac
S
1121 # Extracted info may not be a video result (i.e.
1122 # info.get('_type', 'video') != video) but rather an url or
1123 # url_transparent. In such cases outer metadata (from ie_result)
1124 # should be propagated to inner one (info). For this to happen
1125 # _type of info should be overridden with url_transparent. This
067aa17e 1126 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1127 if new_result.get('_type') == 'url':
1128 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1129
1130 return self.process_ie_result(
1131 new_result, download=download, extra_info=extra_info)
40fcba5e 1132 elif result_type in ('playlist', 'multi_video'):
30a074c2 1133 # Protect from infinite recursion due to recursively nested playlists
1134 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1135 webpage_url = ie_result['webpage_url']
1136 if webpage_url in self._playlist_urls:
7e85e872 1137 self.to_screen(
30a074c2 1138 '[download] Skipping already downloaded playlist: %s'
1139 % ie_result.get('title') or ie_result.get('id'))
1140 return
7e85e872 1141
30a074c2 1142 self._playlist_level += 1
1143 self._playlist_urls.add(webpage_url)
1144 try:
1145 return self.__process_playlist(ie_result, download)
1146 finally:
1147 self._playlist_level -= 1
1148 if not self._playlist_level:
1149 self._playlist_urls.clear()
8222d8de 1150 elif result_type == 'compat_list':
c9bf4114
PH
1151 self.report_warning(
1152 'Extractor %s returned a compat_list result. '
1153 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1154
8222d8de 1155 def _fixup(r):
9e1a5b84
JW
1156 self.add_extra_info(
1157 r,
9103bbc5
JMF
1158 {
1159 'extractor': ie_result['extractor'],
1160 'webpage_url': ie_result['webpage_url'],
29eb5174 1161 'webpage_url_basename': url_basename(ie_result['webpage_url']),
be97abc2 1162 'extractor_key': ie_result['extractor_key'],
9e1a5b84
JW
1163 }
1164 )
8222d8de
JMF
1165 return r
1166 ie_result['entries'] = [
b6c45014 1167 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1168 for r in ie_result['entries']
1169 ]
1170 return ie_result
1171 else:
1172 raise Exception('Invalid result type: %s' % result_type)
1173
30a074c2 1174 def __process_playlist(self, ie_result, download):
1175 # We process each entry in the playlist
1176 playlist = ie_result.get('title') or ie_result.get('id')
1177 self.to_screen('[download] Downloading playlist: %s' % playlist)
1178
cac96421 1179 if self.params.get('allow_playlist_files', True):
1180 ie_copy = {
1181 'playlist': playlist,
1182 'playlist_id': ie_result.get('id'),
1183 'playlist_title': ie_result.get('title'),
1184 'playlist_uploader': ie_result.get('uploader'),
1185 'playlist_uploader_id': ie_result.get('uploader_id'),
1186 'playlist_index': 0
1187 }
1188 ie_copy.update(dict(ie_result))
02fd60d3 1189
cac96421 1190 def ensure_dir_exists(path):
1191 return make_dir(path, self.report_error)
1192
1193 if self.params.get('writeinfojson', False):
de6000d9 1194 infofn = self.prepare_filename(ie_copy, 'pl_infojson')
cac96421 1195 if not ensure_dir_exists(encodeFilename(infofn)):
02fd60d3 1196 return
b9d973be 1197 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
cac96421 1198 self.to_screen('[info] Playlist metadata is already present')
1199 else:
cac96421 1200 playlist_info = dict(ie_result)
18590cec 1201 # playlist_info['entries'] = list(playlist_info['entries']) # Entries is a generator which shouldnot be resolved here
1202 del playlist_info['entries']
1203 self.to_screen('[info] Writing playlist metadata as JSON to: ' + infofn)
cac96421 1204 try:
1205 write_json_file(self.filter_requested_info(playlist_info), infofn)
1206 except (OSError, IOError):
1207 self.report_error('Cannot write playlist metadata to JSON file ' + infofn)
1208
1209 if self.params.get('writedescription', False):
de6000d9 1210 descfn = self.prepare_filename(ie_copy, 'pl_description')
cac96421 1211 if not ensure_dir_exists(encodeFilename(descfn)):
1212 return
1213 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
1214 self.to_screen('[info] Playlist description is already present')
1215 elif ie_result.get('description') is None:
1216 self.report_warning('There\'s no playlist description to write.')
1217 else:
1218 try:
1219 self.to_screen('[info] Writing playlist description to: ' + descfn)
1220 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1221 descfile.write(ie_result['description'])
1222 except (OSError, IOError):
1223 self.report_error('Cannot write playlist description file ' + descfn)
1224 return
02fd60d3 1225
30a074c2 1226 playlist_results = []
1227
1228 playliststart = self.params.get('playliststart', 1) - 1
1229 playlistend = self.params.get('playlistend')
1230 # For backwards compatibility, interpret -1 as whole list
1231 if playlistend == -1:
1232 playlistend = None
1233
1234 playlistitems_str = self.params.get('playlist_items')
1235 playlistitems = None
1236 if playlistitems_str is not None:
1237 def iter_playlistitems(format):
1238 for string_segment in format.split(','):
1239 if '-' in string_segment:
1240 start, end = string_segment.split('-')
1241 for item in range(int(start), int(end) + 1):
1242 yield int(item)
1243 else:
1244 yield int(string_segment)
1245 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
1246
1247 ie_entries = ie_result['entries']
1248
1249 def make_playlistitems_entries(list_ie_entries):
1250 num_entries = len(list_ie_entries)
1251 return [
1252 list_ie_entries[i - 1] for i in playlistitems
1253 if -num_entries <= i - 1 < num_entries]
1254
1255 def report_download(num_entries):
1256 self.to_screen(
1257 '[%s] playlist %s: Downloading %d videos' %
1258 (ie_result['extractor'], playlist, num_entries))
1259
1260 if isinstance(ie_entries, list):
1261 n_all_entries = len(ie_entries)
1262 if playlistitems:
1263 entries = make_playlistitems_entries(ie_entries)
1264 else:
1265 entries = ie_entries[playliststart:playlistend]
1266 n_entries = len(entries)
1267 self.to_screen(
1268 '[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
1269 (ie_result['extractor'], playlist, n_all_entries, n_entries))
1270 elif isinstance(ie_entries, PagedList):
1271 if playlistitems:
1272 entries = []
1273 for item in playlistitems:
1274 entries.extend(ie_entries.getslice(
1275 item - 1, item
1276 ))
1277 else:
1278 entries = ie_entries.getslice(
1279 playliststart, playlistend)
1280 n_entries = len(entries)
1281 report_download(n_entries)
1282 else: # iterable
1283 if playlistitems:
1284 entries = make_playlistitems_entries(list(itertools.islice(
1285 ie_entries, 0, max(playlistitems))))
1286 else:
1287 entries = list(itertools.islice(
1288 ie_entries, playliststart, playlistend))
1289 n_entries = len(entries)
1290 report_download(n_entries)
1291
1292 if self.params.get('playlistreverse', False):
1293 entries = entries[::-1]
1294
1295 if self.params.get('playlistrandom', False):
1296 random.shuffle(entries)
1297
1298 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
1299
1300 for i, entry in enumerate(entries, 1):
1301 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
1302 # This __x_forwarded_for_ip thing is a bit ugly but requires
1303 # minimal changes
1304 if x_forwarded_for:
1305 entry['__x_forwarded_for_ip'] = x_forwarded_for
1306 extra = {
1307 'n_entries': n_entries,
1308 'playlist': playlist,
1309 'playlist_id': ie_result.get('id'),
1310 'playlist_title': ie_result.get('title'),
1311 'playlist_uploader': ie_result.get('uploader'),
1312 'playlist_uploader_id': ie_result.get('uploader_id'),
1313 'playlist_index': playlistitems[i - 1] if playlistitems else i + playliststart,
1314 'extractor': ie_result['extractor'],
1315 'webpage_url': ie_result['webpage_url'],
1316 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1317 'extractor_key': ie_result['extractor_key'],
1318 }
1319
1320 if self._match_entry(entry, incomplete=True) is not None:
1321 continue
1322
1323 entry_result = self.__process_iterable_entry(entry, download, extra)
1324 # TODO: skip failed (empty) entries?
1325 playlist_results.append(entry_result)
1326 ie_result['entries'] = playlist_results
1327 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1328 return ie_result
1329
a0566bbf 1330 @__handle_extraction_exceptions
1331 def __process_iterable_entry(self, entry, download, extra_info):
1332 return self.process_ie_result(
1333 entry, download=download, extra_info=extra_info)
1334
67134eab
JMF
1335 def _build_format_filter(self, filter_spec):
1336 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1337
1338 OPERATORS = {
1339 '<': operator.lt,
1340 '<=': operator.le,
1341 '>': operator.gt,
1342 '>=': operator.ge,
1343 '=': operator.eq,
1344 '!=': operator.ne,
1345 }
67134eab 1346 operator_rex = re.compile(r'''(?x)\s*
a03a3c80 1347 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
083c9df9
PH
1348 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1349 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
67134eab 1350 $
083c9df9 1351 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
67134eab 1352 m = operator_rex.search(filter_spec)
9ddb6925
S
1353 if m:
1354 try:
1355 comparison_value = int(m.group('value'))
1356 except ValueError:
1357 comparison_value = parse_filesize(m.group('value'))
1358 if comparison_value is None:
1359 comparison_value = parse_filesize(m.group('value') + 'B')
1360 if comparison_value is None:
1361 raise ValueError(
1362 'Invalid value %r in format specification %r' % (
67134eab 1363 m.group('value'), filter_spec))
9ddb6925
S
1364 op = OPERATORS[m.group('op')]
1365
083c9df9 1366 if not m:
9ddb6925
S
1367 STR_OPERATORS = {
1368 '=': operator.eq,
10d33b34
YCH
1369 '^=': lambda attr, value: attr.startswith(value),
1370 '$=': lambda attr, value: attr.endswith(value),
1371 '*=': lambda attr, value: value in attr,
9ddb6925 1372 }
67134eab 1373 str_operator_rex = re.compile(r'''(?x)
f96bff99 1374 \s*(?P<key>[a-zA-Z0-9._-]+)
2cc779f4 1375 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
b0df5223 1376 \s*(?P<value>[a-zA-Z0-9._-]+)
67134eab 1377 \s*$
9ddb6925 1378 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
67134eab 1379 m = str_operator_rex.search(filter_spec)
9ddb6925
S
1380 if m:
1381 comparison_value = m.group('value')
2cc779f4
S
1382 str_op = STR_OPERATORS[m.group('op')]
1383 if m.group('negation'):
e118a879 1384 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1385 else:
1386 op = str_op
083c9df9 1387
9ddb6925 1388 if not m:
67134eab 1389 raise ValueError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1390
1391 def _filter(f):
1392 actual_value = f.get(m.group('key'))
1393 if actual_value is None:
1394 return m.group('none_inclusive')
1395 return op(actual_value, comparison_value)
67134eab
JMF
1396 return _filter
1397
0017d9ad 1398 def _default_format_spec(self, info_dict, download=True):
0017d9ad 1399
af0f7428
S
1400 def can_merge():
1401 merger = FFmpegMergerPP(self)
1402 return merger.available and merger.can_merge()
1403
91ebc640 1404 prefer_best = (
1405 not self.params.get('simulate', False)
1406 and download
1407 and (
1408 not can_merge()
19807826 1409 or info_dict.get('is_live', False)
de6000d9 1410 or self.outtmpl_dict['default'] == '-'))
91ebc640 1411
1412 return (
1413 'best/bestvideo+bestaudio'
1414 if prefer_best
1415 else 'bestvideo*+bestaudio/best'
19807826 1416 if not self.params.get('allow_multiple_audio_streams', False)
91ebc640 1417 else 'bestvideo+bestaudio/best')
0017d9ad 1418
67134eab
JMF
1419 def build_format_selector(self, format_spec):
1420 def syntax_error(note, start):
1421 message = (
1422 'Invalid format specification: '
1423 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1424 return SyntaxError(message)
1425
1426 PICKFIRST = 'PICKFIRST'
1427 MERGE = 'MERGE'
1428 SINGLE = 'SINGLE'
0130afb7 1429 GROUP = 'GROUP'
67134eab
JMF
1430 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1431
91ebc640 1432 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1433 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 1434
67134eab
JMF
1435 def _parse_filter(tokens):
1436 filter_parts = []
1437 for type, string, start, _, _ in tokens:
1438 if type == tokenize.OP and string == ']':
1439 return ''.join(filter_parts)
1440 else:
1441 filter_parts.append(string)
1442
232541df 1443 def _remove_unused_ops(tokens):
17cc1534 1444 # Remove operators that we don't use and join them with the surrounding strings
232541df
JMF
1445 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1446 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1447 last_string, last_start, last_end, last_line = None, None, None, None
1448 for type, string, start, end, line in tokens:
1449 if type == tokenize.OP and string == '[':
1450 if last_string:
1451 yield tokenize.NAME, last_string, last_start, last_end, last_line
1452 last_string = None
1453 yield type, string, start, end, line
1454 # everything inside brackets will be handled by _parse_filter
1455 for type, string, start, end, line in tokens:
1456 yield type, string, start, end, line
1457 if type == tokenize.OP and string == ']':
1458 break
1459 elif type == tokenize.OP and string in ALLOWED_OPS:
1460 if last_string:
1461 yield tokenize.NAME, last_string, last_start, last_end, last_line
1462 last_string = None
1463 yield type, string, start, end, line
1464 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1465 if not last_string:
1466 last_string = string
1467 last_start = start
1468 last_end = end
1469 else:
1470 last_string += string
1471 if last_string:
1472 yield tokenize.NAME, last_string, last_start, last_end, last_line
1473
cf2ac6df 1474 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
1475 selectors = []
1476 current_selector = None
1477 for type, string, start, _, _ in tokens:
1478 # ENCODING is only defined in python 3.x
1479 if type == getattr(tokenize, 'ENCODING', None):
1480 continue
1481 elif type in [tokenize.NAME, tokenize.NUMBER]:
1482 current_selector = FormatSelector(SINGLE, string, [])
1483 elif type == tokenize.OP:
cf2ac6df
JMF
1484 if string == ')':
1485 if not inside_group:
1486 # ')' will be handled by the parentheses group
1487 tokens.restore_last_token()
67134eab 1488 break
cf2ac6df 1489 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
1490 tokens.restore_last_token()
1491 break
cf2ac6df
JMF
1492 elif inside_choice and string == ',':
1493 tokens.restore_last_token()
1494 break
1495 elif string == ',':
0a31a350
JMF
1496 if not current_selector:
1497 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
1498 selectors.append(current_selector)
1499 current_selector = None
1500 elif string == '/':
d96d604e
JMF
1501 if not current_selector:
1502 raise syntax_error('"/" must follow a format selector', start)
67134eab 1503 first_choice = current_selector
cf2ac6df 1504 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 1505 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
1506 elif string == '[':
1507 if not current_selector:
1508 current_selector = FormatSelector(SINGLE, 'best', [])
1509 format_filter = _parse_filter(tokens)
1510 current_selector.filters.append(format_filter)
0130afb7
JMF
1511 elif string == '(':
1512 if current_selector:
1513 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
1514 group = _parse_format_selection(tokens, inside_group=True)
1515 current_selector = FormatSelector(GROUP, group, [])
67134eab 1516 elif string == '+':
d03cfdce 1517 if not current_selector:
1518 raise syntax_error('Unexpected "+"', start)
1519 selector_1 = current_selector
1520 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1521 if not selector_2:
1522 raise syntax_error('Expected a selector', start)
1523 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab
JMF
1524 else:
1525 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1526 elif type == tokenize.ENDMARKER:
1527 break
1528 if current_selector:
1529 selectors.append(current_selector)
1530 return selectors
1531
1532 def _build_selector_function(selector):
909d24dd 1533 if isinstance(selector, list): # ,
67134eab
JMF
1534 fs = [_build_selector_function(s) for s in selector]
1535
317f7ab6 1536 def selector_function(ctx):
67134eab 1537 for f in fs:
317f7ab6 1538 for format in f(ctx):
67134eab
JMF
1539 yield format
1540 return selector_function
909d24dd 1541
1542 elif selector.type == GROUP: # ()
0130afb7 1543 selector_function = _build_selector_function(selector.selector)
909d24dd 1544
1545 elif selector.type == PICKFIRST: # /
67134eab
JMF
1546 fs = [_build_selector_function(s) for s in selector.selector]
1547
317f7ab6 1548 def selector_function(ctx):
67134eab 1549 for f in fs:
317f7ab6 1550 picked_formats = list(f(ctx))
67134eab
JMF
1551 if picked_formats:
1552 return picked_formats
1553 return []
67134eab 1554
909d24dd 1555 elif selector.type == SINGLE: # atom
1556 format_spec = selector.selector if selector.selector is not None else 'best'
1557
1558 if format_spec == 'all':
1559 def selector_function(ctx):
1560 formats = list(ctx['formats'])
1561 if formats:
1562 for f in formats:
1563 yield f
1564
1565 else:
1566 format_fallback = False
1567 format_spec_obj = re.match(r'(best|worst|b|w)(video|audio|v|a)?(\*)?$', format_spec)
1568 if format_spec_obj is not None:
1569 format_idx = 0 if format_spec_obj.group(1)[0] == 'w' else -1
1570 format_type = format_spec_obj.group(2)[0] if format_spec_obj.group(2) else False
1571 not_format_type = 'v' if format_type == 'a' else 'a'
1572 format_modified = format_spec_obj.group(3) is not None
1573
1574 format_fallback = not format_type and not format_modified # for b, w
1575 filter_f = ((lambda f: f.get(format_type + 'codec') != 'none')
1576 if format_type and format_modified # bv*, ba*, wv*, wa*
1577 else (lambda f: f.get(not_format_type + 'codec') == 'none')
1578 if format_type # bv, ba, wv, wa
1579 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
1580 if not format_modified # b, w
1581 else None) # b*, w*
67134eab 1582 else:
909d24dd 1583 format_idx = -1
1584 filter_f = ((lambda f: f.get('ext') == format_spec)
1585 if format_spec in ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] # extension
1586 else (lambda f: f.get('format_id') == format_spec)) # id
1587
1588 def selector_function(ctx):
1589 formats = list(ctx['formats'])
1590 if not formats:
1591 return
1592 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
67134eab 1593 if matches:
909d24dd 1594 yield matches[format_idx]
1595 elif format_fallback == 'force' or (format_fallback and ctx['incomplete_formats']):
1596 # for extractors with incomplete formats (audio only (soundcloud)
1597 # or video only (imgur)) best/worst will fallback to
1598 # best/worst {video,audio}-only format
1599 yield formats[format_idx]
1600
1601 elif selector.type == MERGE: # +
d03cfdce 1602 def _merge(formats_pair):
1603 format_1, format_2 = formats_pair
1604
1605 formats_info = []
1606 formats_info.extend(format_1.get('requested_formats', (format_1,)))
1607 formats_info.extend(format_2.get('requested_formats', (format_2,)))
1608
909d24dd 1609 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
1610 get_no_more = {"video": False, "audio": False}
1611 for (i, fmt_info) in enumerate(formats_info):
1612 for aud_vid in ["audio", "video"]:
1613 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
1614 if get_no_more[aud_vid]:
1615 formats_info.pop(i)
1616 get_no_more[aud_vid] = True
1617
1618 if len(formats_info) == 1:
1619 return formats_info[0]
1620
d03cfdce 1621 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
1622 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
1623
1624 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
1625 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
1626
1627 output_ext = self.params.get('merge_output_format')
1628 if not output_ext:
1629 if the_only_video:
1630 output_ext = the_only_video['ext']
1631 elif the_only_audio and not video_fmts:
1632 output_ext = the_only_audio['ext']
1633 else:
1634 output_ext = 'mkv'
1635
1636 new_dict = {
67134eab 1637 'requested_formats': formats_info,
d03cfdce 1638 'format': '+'.join(fmt_info.get('format') for fmt_info in formats_info),
1639 'format_id': '+'.join(fmt_info.get('format_id') for fmt_info in formats_info),
67134eab
JMF
1640 'ext': output_ext,
1641 }
d03cfdce 1642
1643 if the_only_video:
1644 new_dict.update({
1645 'width': the_only_video.get('width'),
1646 'height': the_only_video.get('height'),
1647 'resolution': the_only_video.get('resolution'),
1648 'fps': the_only_video.get('fps'),
1649 'vcodec': the_only_video.get('vcodec'),
1650 'vbr': the_only_video.get('vbr'),
1651 'stretched_ratio': the_only_video.get('stretched_ratio'),
1652 })
1653
1654 if the_only_audio:
1655 new_dict.update({
1656 'acodec': the_only_audio.get('acodec'),
1657 'abr': the_only_audio.get('abr'),
1658 })
1659
1660 return new_dict
1661
1662 selector_1, selector_2 = map(_build_selector_function, selector.selector)
083c9df9 1663
317f7ab6
S
1664 def selector_function(ctx):
1665 for pair in itertools.product(
d03cfdce 1666 selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
67134eab 1667 yield _merge(pair)
083c9df9 1668
67134eab 1669 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 1670
317f7ab6
S
1671 def final_selector(ctx):
1672 ctx_copy = copy.deepcopy(ctx)
67134eab 1673 for _filter in filters:
317f7ab6
S
1674 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1675 return selector_function(ctx_copy)
67134eab 1676 return final_selector
083c9df9 1677
67134eab 1678 stream = io.BytesIO(format_spec.encode('utf-8'))
0130afb7 1679 try:
232541df 1680 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
0130afb7
JMF
1681 except tokenize.TokenError:
1682 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1683
1684 class TokenIterator(object):
1685 def __init__(self, tokens):
1686 self.tokens = tokens
1687 self.counter = 0
1688
1689 def __iter__(self):
1690 return self
1691
1692 def __next__(self):
1693 if self.counter >= len(self.tokens):
1694 raise StopIteration()
1695 value = self.tokens[self.counter]
1696 self.counter += 1
1697 return value
1698
1699 next = __next__
1700
1701 def restore_last_token(self):
1702 self.counter -= 1
1703
1704 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 1705 return _build_selector_function(parsed_selector)
a9c58ad9 1706
e5660ee6
JMF
1707 def _calc_headers(self, info_dict):
1708 res = std_headers.copy()
1709
1710 add_headers = info_dict.get('http_headers')
1711 if add_headers:
1712 res.update(add_headers)
1713
1714 cookies = self._calc_cookies(info_dict)
1715 if cookies:
1716 res['Cookie'] = cookies
1717
0016b84e
S
1718 if 'X-Forwarded-For' not in res:
1719 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
1720 if x_forwarded_for_ip:
1721 res['X-Forwarded-For'] = x_forwarded_for_ip
1722
e5660ee6
JMF
1723 return res
1724
1725 def _calc_cookies(self, info_dict):
5c2266df 1726 pr = sanitized_Request(info_dict['url'])
e5660ee6 1727 self.cookiejar.add_cookie_header(pr)
662435f7 1728 return pr.get_header('Cookie')
e5660ee6 1729
dd82ffea
JMF
1730 def process_video_result(self, info_dict, download=True):
1731 assert info_dict.get('_type', 'video') == 'video'
1732
bec1fad2
PH
1733 if 'id' not in info_dict:
1734 raise ExtractorError('Missing "id" field in extractor result')
1735 if 'title' not in info_dict:
1736 raise ExtractorError('Missing "title" field in extractor result')
1737
c9969434
S
1738 def report_force_conversion(field, field_not, conversion):
1739 self.report_warning(
1740 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
1741 % (field, field_not, conversion))
1742
1743 def sanitize_string_field(info, string_field):
1744 field = info.get(string_field)
1745 if field is None or isinstance(field, compat_str):
1746 return
1747 report_force_conversion(string_field, 'a string', 'string')
1748 info[string_field] = compat_str(field)
1749
1750 def sanitize_numeric_fields(info):
1751 for numeric_field in self._NUMERIC_FIELDS:
1752 field = info.get(numeric_field)
1753 if field is None or isinstance(field, compat_numeric_types):
1754 continue
1755 report_force_conversion(numeric_field, 'numeric', 'int')
1756 info[numeric_field] = int_or_none(field)
1757
1758 sanitize_string_field(info_dict, 'id')
1759 sanitize_numeric_fields(info_dict)
be6217b2 1760
dd82ffea
JMF
1761 if 'playlist' not in info_dict:
1762 # It isn't part of a playlist
1763 info_dict['playlist'] = None
1764 info_dict['playlist_index'] = None
1765
d5519808 1766 thumbnails = info_dict.get('thumbnails')
cfb56d1a
PH
1767 if thumbnails is None:
1768 thumbnail = info_dict.get('thumbnail')
1769 if thumbnail:
a7a14d95 1770 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
d5519808 1771 if thumbnails:
be6d7229 1772 thumbnails.sort(key=lambda t: (
d37708fc
RA
1773 t.get('preference') if t.get('preference') is not None else -1,
1774 t.get('width') if t.get('width') is not None else -1,
1775 t.get('height') if t.get('height') is not None else -1,
1776 t.get('id') if t.get('id') is not None else '', t.get('url')))
f6c24009 1777 for i, t in enumerate(thumbnails):
dcf77cf1 1778 t['url'] = sanitize_url(t['url'])
9603e8a7 1779 if t.get('width') and t.get('height'):
d5519808 1780 t['resolution'] = '%dx%d' % (t['width'], t['height'])
f6c24009
PH
1781 if t.get('id') is None:
1782 t['id'] = '%d' % i
d5519808 1783
b7b72db9 1784 if self.params.get('list_thumbnails'):
1785 self.list_thumbnails(info_dict)
1786 return
1787
536a55da
S
1788 thumbnail = info_dict.get('thumbnail')
1789 if thumbnail:
1790 info_dict['thumbnail'] = sanitize_url(thumbnail)
1791 elif thumbnails:
d5519808
PH
1792 info_dict['thumbnail'] = thumbnails[-1]['url']
1793
c9ae7b95 1794 if 'display_id' not in info_dict and 'id' in info_dict:
0afef30b
PH
1795 info_dict['display_id'] = info_dict['id']
1796
955c4514 1797 if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
a55e36f4
S
1798 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
1799 # see http://bugs.python.org/issue1646728)
1800 try:
1801 upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
1802 info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
1803 except (ValueError, OverflowError, OSError):
1804 pass
9d2ecdbc 1805
33d2fc2f
S
1806 # Auto generate title fields corresponding to the *_number fields when missing
1807 # in order to always have clean titles. This is very common for TV series.
1808 for field in ('chapter', 'season', 'episode'):
1809 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
1810 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
1811
05108a49
S
1812 for cc_kind in ('subtitles', 'automatic_captions'):
1813 cc = info_dict.get(cc_kind)
1814 if cc:
1815 for _, subtitle in cc.items():
1816 for subtitle_format in subtitle:
1817 if subtitle_format.get('url'):
1818 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
1819 if subtitle_format.get('ext') is None:
1820 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
1821
1822 automatic_captions = info_dict.get('automatic_captions')
4bba3716 1823 subtitles = info_dict.get('subtitles')
4bba3716 1824
a504ced0 1825 if self.params.get('listsubtitles', False):
360e1ca5 1826 if 'automatic_captions' in info_dict:
05108a49
S
1827 self.list_subtitles(
1828 info_dict['id'], automatic_captions, 'automatic captions')
4bba3716 1829 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
a504ced0 1830 return
05108a49 1831
360e1ca5 1832 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 1833 info_dict['id'], subtitles, automatic_captions)
a504ced0 1834
dd82ffea
JMF
1835 # We now pick which formats have to be downloaded
1836 if info_dict.get('formats') is None:
1837 # There's only one format available
1838 formats = [info_dict]
1839 else:
1840 formats = info_dict['formats']
1841
db95dc13
PH
1842 if not formats:
1843 raise ExtractorError('No video formats found!')
1844
73af5cc8
S
1845 def is_wellformed(f):
1846 url = f.get('url')
a5ac0c47 1847 if not url:
73af5cc8
S
1848 self.report_warning(
1849 '"url" field is missing or empty - skipping format, '
1850 'there is an error in extractor')
a5ac0c47
S
1851 return False
1852 if isinstance(url, bytes):
1853 sanitize_string_field(f, 'url')
1854 return True
73af5cc8
S
1855
1856 # Filter out malformed formats for better extraction robustness
1857 formats = list(filter(is_wellformed, formats))
1858
181c7053
S
1859 formats_dict = {}
1860
dd82ffea 1861 # We check that all the formats have the format and format_id fields
db95dc13 1862 for i, format in enumerate(formats):
c9969434
S
1863 sanitize_string_field(format, 'format_id')
1864 sanitize_numeric_fields(format)
dcf77cf1 1865 format['url'] = sanitize_url(format['url'])
e74e3b63 1866 if not format.get('format_id'):
8016c922 1867 format['format_id'] = compat_str(i)
e2effb08
S
1868 else:
1869 # Sanitize format_id from characters used in format selector expression
ec85ded8 1870 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
181c7053
S
1871 format_id = format['format_id']
1872 if format_id not in formats_dict:
1873 formats_dict[format_id] = []
1874 formats_dict[format_id].append(format)
1875
1876 # Make sure all formats have unique format_id
1877 for format_id, ambiguous_formats in formats_dict.items():
1878 if len(ambiguous_formats) > 1:
1879 for i, format in enumerate(ambiguous_formats):
1880 format['format_id'] = '%s-%d' % (format_id, i)
1881
1882 for i, format in enumerate(formats):
8c51aa65 1883 if format.get('format') is None:
6febd1c1 1884 format['format'] = '{id} - {res}{note}'.format(
8c51aa65
JMF
1885 id=format['format_id'],
1886 res=self.format_resolution(format),
6febd1c1 1887 note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
8c51aa65 1888 )
c1002e96 1889 # Automatically determine file extension if missing
5b1d8575 1890 if format.get('ext') is None:
cce929ea 1891 format['ext'] = determine_ext(format['url']).lower()
b5559424
S
1892 # Automatically determine protocol if missing (useful for format
1893 # selection purposes)
6f0be937 1894 if format.get('protocol') is None:
b5559424 1895 format['protocol'] = determine_protocol(format)
e5660ee6
JMF
1896 # Add HTTP headers, so that external programs can use them from the
1897 # json output
1898 full_format_info = info_dict.copy()
1899 full_format_info.update(format)
1900 format['http_headers'] = self._calc_headers(full_format_info)
0016b84e
S
1901 # Remove private housekeeping stuff
1902 if '__x_forwarded_for_ip' in info_dict:
1903 del info_dict['__x_forwarded_for_ip']
dd82ffea 1904
4bcc7bd1 1905 # TODO Central sorting goes here
99e206d5 1906
f89197d7 1907 if formats[0] is not info_dict:
b3d9ef88
JMF
1908 # only set the 'formats' fields if the original info_dict list them
1909 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 1910 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 1911 # which can't be exported to json
b3d9ef88 1912 info_dict['formats'] = formats
cfb56d1a 1913 if self.params.get('listformats'):
bfaae0a7 1914 self.list_formats(info_dict)
1915 return
1916
de3ef3ed 1917 req_format = self.params.get('format')
a9c58ad9 1918 if req_format is None:
0017d9ad
S
1919 req_format = self._default_format_spec(info_dict, download=download)
1920 if self.params.get('verbose'):
e8be92f9 1921 self.to_screen('[debug] Default format spec: %s' % req_format)
0017d9ad 1922
5acfa126 1923 format_selector = self.build_format_selector(req_format)
317f7ab6
S
1924
1925 # While in format selection we may need to have an access to the original
1926 # format set in order to calculate some metrics or do some processing.
1927 # For now we need to be able to guess whether original formats provided
1928 # by extractor are incomplete or not (i.e. whether extractor provides only
1929 # video-only or audio-only formats) for proper formats selection for
1930 # extractors with such incomplete formats (see
067aa17e 1931 # https://github.com/ytdl-org/youtube-dl/pull/5556).
317f7ab6
S
1932 # Since formats may be filtered during format selection and may not match
1933 # the original formats the results may be incorrect. Thus original formats
1934 # or pre-calculated metrics should be passed to format selection routines
1935 # as well.
1936 # We will pass a context object containing all necessary additional data
1937 # instead of just formats.
1938 # This fixes incorrect format selection issue (see
067aa17e 1939 # https://github.com/ytdl-org/youtube-dl/issues/10083).
2e221ca3 1940 incomplete_formats = (
317f7ab6 1941 # All formats are video-only or
3089bc74 1942 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
317f7ab6 1943 # all formats are audio-only
3089bc74 1944 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
317f7ab6
S
1945
1946 ctx = {
1947 'formats': formats,
1948 'incomplete_formats': incomplete_formats,
1949 }
1950
1951 formats_to_download = list(format_selector(ctx))
dd82ffea 1952 if not formats_to_download:
6febd1c1 1953 raise ExtractorError('requested format not available',
78a3a9f8 1954 expected=True)
dd82ffea
JMF
1955
1956 if download:
909d24dd 1957 self.to_screen('[info] Downloading format(s) %s' % ", ".join([f['format_id'] for f in formats_to_download]))
dd82ffea 1958 if len(formats_to_download) > 1:
6febd1c1 1959 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
dd82ffea
JMF
1960 for format in formats_to_download:
1961 new_info = dict(info_dict)
1962 new_info.update(format)
1963 self.process_info(new_info)
1964 # We update the info dict with the best quality format (backwards compatibility)
1965 info_dict.update(formats_to_download[-1])
1966 return info_dict
1967
98c70d6f 1968 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 1969 """Select the requested subtitles and their format"""
98c70d6f
JMF
1970 available_subs = {}
1971 if normal_subtitles and self.params.get('writesubtitles'):
1972 available_subs.update(normal_subtitles)
1973 if automatic_captions and self.params.get('writeautomaticsub'):
1974 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
1975 if lang not in available_subs:
1976 available_subs[lang] = cap_info
1977
4d171848
JMF
1978 if (not self.params.get('writesubtitles') and not
1979 self.params.get('writeautomaticsub') or not
1980 available_subs):
1981 return None
a504ced0
JMF
1982
1983 if self.params.get('allsubtitles', False):
1984 requested_langs = available_subs.keys()
1985 else:
1986 if self.params.get('subtitleslangs', False):
1987 requested_langs = self.params.get('subtitleslangs')
1988 elif 'en' in available_subs:
1989 requested_langs = ['en']
1990 else:
1991 requested_langs = [list(available_subs.keys())[0]]
1992
1993 formats_query = self.params.get('subtitlesformat', 'best')
1994 formats_preference = formats_query.split('/') if formats_query else []
1995 subs = {}
1996 for lang in requested_langs:
1997 formats = available_subs.get(lang)
1998 if formats is None:
1999 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
2000 continue
a504ced0
JMF
2001 for ext in formats_preference:
2002 if ext == 'best':
2003 f = formats[-1]
2004 break
2005 matches = list(filter(lambda f: f['ext'] == ext, formats))
2006 if matches:
2007 f = matches[-1]
2008 break
2009 else:
2010 f = formats[-1]
2011 self.report_warning(
2012 'No subtitle format found matching "%s" for language %s, '
2013 'using %s' % (formats_query, lang, f['ext']))
2014 subs[lang] = f
2015 return subs
2016
d06daf23
S
2017 def __forced_printings(self, info_dict, filename, incomplete):
2018 def print_mandatory(field):
2019 if (self.params.get('force%s' % field, False)
2020 and (not incomplete or info_dict.get(field) is not None)):
2021 self.to_stdout(info_dict[field])
2022
2023 def print_optional(field):
2024 if (self.params.get('force%s' % field, False)
2025 and info_dict.get(field) is not None):
2026 self.to_stdout(info_dict[field])
2027
2028 print_mandatory('title')
2029 print_mandatory('id')
2030 if self.params.get('forceurl', False) and not incomplete:
2031 if info_dict.get('requested_formats') is not None:
2032 for f in info_dict['requested_formats']:
2033 self.to_stdout(f['url'] + f.get('play_path', ''))
2034 else:
2035 # For RTMP URLs, also include the playpath
2036 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
2037 print_optional('thumbnail')
2038 print_optional('description')
2039 if self.params.get('forcefilename', False) and filename is not None:
2040 self.to_stdout(filename)
2041 if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
2042 self.to_stdout(formatSeconds(info_dict['duration']))
2043 print_mandatory('format')
2044 if self.params.get('forcejson', False):
277d6ff5 2045 self.post_extract(info_dict)
d06daf23
S
2046 self.to_stdout(json.dumps(info_dict))
2047
8222d8de
JMF
2048 def process_info(self, info_dict):
2049 """Process a single resolved IE result."""
2050
2051 assert info_dict.get('_type', 'video') == 'video'
fd288278 2052
0202b52a 2053 info_dict.setdefault('__postprocessors', [])
2054
fd288278
PH
2055 max_downloads = self.params.get('max_downloads')
2056 if max_downloads is not None:
2057 if self._num_downloads >= int(max_downloads):
2058 raise MaxDownloadsReached()
8222d8de 2059
d06daf23 2060 # TODO: backward compatibility, to be removed
8222d8de 2061 info_dict['fulltitle'] = info_dict['title']
8222d8de 2062
11b85ce6 2063 if 'format' not in info_dict:
8222d8de
JMF
2064 info_dict['format'] = info_dict['ext']
2065
8b0d7497 2066 if self._match_entry(info_dict, incomplete=False) is not None:
8222d8de
JMF
2067 return
2068
277d6ff5 2069 self.post_extract(info_dict)
fd288278 2070 self._num_downloads += 1
8222d8de 2071
5bfa4862 2072 info_dict = self.pre_process(info_dict)
2073
de6000d9 2074 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2075 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 2076 files_to_move = {}
de6000d9 2077 skip_dl = self.params.get('skip_download', False)
8222d8de
JMF
2078
2079 # Forced printings
0202b52a 2080 self.__forced_printings(info_dict, full_filename, incomplete=False)
8222d8de 2081
8222d8de 2082 if self.params.get('simulate', False):
2d30509f 2083 if self.params.get('force_write_download_archive', False):
2084 self.record_download_archive(info_dict)
2085
2086 # Do nothing else if in simulate mode
8222d8de
JMF
2087 return
2088
de6000d9 2089 if full_filename is None:
8222d8de
JMF
2090 return
2091
c5c9bf0c 2092 def ensure_dir_exists(path):
0202b52a 2093 return make_dir(path, self.report_error)
c5c9bf0c 2094
0202b52a 2095 if not ensure_dir_exists(encodeFilename(full_filename)):
2096 return
2097 if not ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
2098 return
2099
2100 if self.params.get('writedescription', False):
de6000d9 2101 descfn = self.prepare_filename(info_dict, 'description')
0202b52a 2102 if not ensure_dir_exists(encodeFilename(descfn)):
2103 return
0c3d0f51 2104 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
6febd1c1 2105 self.to_screen('[info] Video description is already present')
f00fd51d
JMF
2106 elif info_dict.get('description') is None:
2107 self.report_warning('There\'s no description to write.')
7b6fefc9
PH
2108 else:
2109 try:
6febd1c1 2110 self.to_screen('[info] Writing video description to: ' + descfn)
7b6fefc9
PH
2111 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
2112 descfile.write(info_dict['description'])
7b6fefc9 2113 except (OSError, IOError):
6febd1c1 2114 self.report_error('Cannot write description file ' + descfn)
7b6fefc9 2115 return
8222d8de 2116
1fb07d10 2117 if self.params.get('writeannotations', False):
de6000d9 2118 annofn = self.prepare_filename(info_dict, 'annotation')
0202b52a 2119 if not ensure_dir_exists(encodeFilename(annofn)):
2120 return
0c3d0f51 2121 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 2122 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
2123 elif not info_dict.get('annotations'):
2124 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
2125 else:
2126 try:
6febd1c1 2127 self.to_screen('[info] Writing video annotations to: ' + annofn)
7b6fefc9
PH
2128 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2129 annofile.write(info_dict['annotations'])
2130 except (KeyError, TypeError):
6febd1c1 2131 self.report_warning('There are no annotations to write.')
7b6fefc9 2132 except (OSError, IOError):
6febd1c1 2133 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 2134 return
1fb07d10 2135
9f448fcb 2136 def dl(name, info, subtitle=False):
98b69821 2137 fd = get_suitable_downloader(info, self.params)(self, self.params)
2138 for ph in self._progress_hooks:
2139 fd.add_progress_hook(ph)
2140 if self.params.get('verbose'):
29f7c58a 2141 self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
9f448fcb 2142 return fd.download(name, info, subtitle)
98b69821 2143
c4a91be7 2144 subtitles_are_requested = any([self.params.get('writesubtitles', False),
0b7f3118 2145 self.params.get('writeautomaticsub')])
c4a91be7 2146
c84dd8a9 2147 if subtitles_are_requested and info_dict.get('requested_subtitles'):
8222d8de
JMF
2148 # subtitles download errors are already managed as troubles in relevant IE
2149 # that way it will silently go on when used with unsupporting IE
c84dd8a9 2150 subtitles = info_dict['requested_subtitles']
fa57af1e 2151 # ie = self.get_info_extractor(info_dict['extractor_key'])
a504ced0
JMF
2152 for sub_lang, sub_info in subtitles.items():
2153 sub_format = sub_info['ext']
de6000d9 2154 sub_fn = self.prepare_filename(info_dict, 'subtitle')
2155 sub_filename = subtitles_filename(
0fd1a2b0 2156 temp_filename if not skip_dl else sub_fn,
0202b52a 2157 sub_lang, sub_format, info_dict.get('ext'))
de6000d9 2158 sub_filename_final = subtitles_filename(sub_fn, sub_lang, sub_format, info_dict.get('ext'))
0c3d0f51 2159 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(sub_filename)):
5ff1bc0c 2160 self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
0202b52a 2161 files_to_move[sub_filename] = sub_filename_final
a504ced0 2162 else:
0c9df79e 2163 self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
5ff1bc0c
RA
2164 if sub_info.get('data') is not None:
2165 try:
2166 # Use newline='' to prevent conversion of newline characters
067aa17e 2167 # See https://github.com/ytdl-org/youtube-dl/issues/10268
5ff1bc0c
RA
2168 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
2169 subfile.write(sub_info['data'])
0202b52a 2170 files_to_move[sub_filename] = sub_filename_final
5ff1bc0c
RA
2171 except (OSError, IOError):
2172 self.report_error('Cannot write subtitles file ' + sub_filename)
2173 return
7b6fefc9 2174 else:
5ff1bc0c 2175 try:
9f448fcb 2176 dl(sub_filename, sub_info, subtitle=True)
0202b52a 2177 files_to_move[sub_filename] = sub_filename_final
0c9df79e 2178 except (ExtractorError, IOError, OSError, ValueError, compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
5ff1bc0c
RA
2179 self.report_warning('Unable to download subtitle for "%s": %s' %
2180 (sub_lang, error_to_compat_str(err)))
2181 continue
8222d8de 2182
de6000d9 2183 if skip_dl:
57df9f53 2184 if self.params.get('convertsubtitles', False):
0202b52a 2185 # subconv = FFmpegSubtitlesConvertorPP(self, format=self.params.get('convertsubtitles'))
de6000d9 2186 filename_real_ext = os.path.splitext(full_filename)[1][1:]
57df9f53 2187 filename_wo_ext = (
0202b52a 2188 os.path.splitext(full_filename)[0]
57df9f53 2189 if filename_real_ext == info_dict['ext']
0202b52a 2190 else full_filename)
57df9f53 2191 afilename = '%s.%s' % (filename_wo_ext, self.params.get('convertsubtitles'))
0202b52a 2192 # if subconv.available:
2193 # info_dict['__postprocessors'].append(subconv)
57df9f53 2194 if os.path.exists(encodeFilename(afilename)):
f791b419
U
2195 self.to_screen(
2196 '[download] %s has already been downloaded and '
2197 'converted' % afilename)
57df9f53
U
2198 else:
2199 try:
0202b52a 2200 self.post_process(full_filename, info_dict, files_to_move)
af819c21 2201 except PostProcessingError as err:
2202 self.report_error('Postprocessing: %s' % str(err))
57df9f53
U
2203 return
2204
8222d8de 2205 if self.params.get('writeinfojson', False):
de6000d9 2206 infofn = self.prepare_filename(info_dict, 'infojson')
0202b52a 2207 if not ensure_dir_exists(encodeFilename(infofn)):
2208 return
0c3d0f51 2209 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
66c935fb 2210 self.to_screen('[info] Video metadata is already present')
7b6fefc9 2211 else:
66c935fb 2212 self.to_screen('[info] Writing video metadata as JSON to: ' + infofn)
7b6fefc9 2213 try:
cb202fd2 2214 write_json_file(self.filter_requested_info(info_dict), infofn)
7b6fefc9 2215 except (OSError, IOError):
66c935fb 2216 self.report_error('Cannot write video metadata to JSON file ' + infofn)
7b6fefc9 2217 return
de6000d9 2218 info_dict['__infojson_filename'] = infofn
8222d8de 2219
de6000d9 2220 thumbfn = self.prepare_filename(info_dict, 'thumbnail')
2221 thumb_fn_temp = temp_filename if not skip_dl else thumbfn
2222 for thumb_ext in self._write_thumbnails(info_dict, thumb_fn_temp):
2223 thumb_filename_temp = replace_extension(thumb_fn_temp, thumb_ext, info_dict.get('ext'))
2224 thumb_filename = replace_extension(thumbfn, thumb_ext, info_dict.get('ext'))
2225 files_to_move[thumb_filename_temp] = info_dict['__thumbnail_filename'] = thumb_filename
8222d8de 2226
732044af 2227 # Write internet shortcut files
2228 url_link = webloc_link = desktop_link = False
2229 if self.params.get('writelink', False):
2230 if sys.platform == "darwin": # macOS.
2231 webloc_link = True
2232 elif sys.platform.startswith("linux"):
2233 desktop_link = True
2234 else: # if sys.platform in ['win32', 'cygwin']:
2235 url_link = True
2236 if self.params.get('writeurllink', False):
2237 url_link = True
2238 if self.params.get('writewebloclink', False):
2239 webloc_link = True
2240 if self.params.get('writedesktoplink', False):
2241 desktop_link = True
2242
2243 if url_link or webloc_link or desktop_link:
2244 if 'webpage_url' not in info_dict:
2245 self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
2246 return
2247 ascii_url = iri_to_uri(info_dict['webpage_url'])
2248
2249 def _write_link_file(extension, template, newline, embed_filename):
0202b52a 2250 linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
10e3742e 2251 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
732044af 2252 self.to_screen('[info] Internet shortcut is already present')
2253 else:
2254 try:
2255 self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
2256 with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
2257 template_vars = {'url': ascii_url}
2258 if embed_filename:
2259 template_vars['filename'] = linkfn[:-(len(extension) + 1)]
2260 linkfile.write(template % template_vars)
2261 except (OSError, IOError):
2262 self.report_error('Cannot write internet shortcut ' + linkfn)
2263 return False
2264 return True
2265
2266 if url_link:
2267 if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
2268 return
2269 if webloc_link:
2270 if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
2271 return
2272 if desktop_link:
2273 if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
2274 return
2275
2276 # Download
2277 must_record_download_archive = False
de6000d9 2278 if not skip_dl:
4340deca 2279 try:
0202b52a 2280
6b591b29 2281 def existing_file(*filepaths):
2282 ext = info_dict.get('ext')
2283 final_ext = self.params.get('final_ext', ext)
2284 existing_files = []
2285 for file in orderedSet(filepaths):
2286 if final_ext != ext:
2287 converted = replace_extension(file, final_ext, ext)
2288 if os.path.exists(encodeFilename(converted)):
2289 existing_files.append(converted)
2290 if os.path.exists(encodeFilename(file)):
2291 existing_files.append(file)
2292
2293 if not existing_files or self.params.get('overwrites', False):
2294 for file in orderedSet(existing_files):
2295 self.report_file_delete(file)
2296 os.remove(encodeFilename(file))
2297 return None
2298
2299 self.report_file_already_downloaded(existing_files[0])
2300 info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
2301 return existing_files[0]
0202b52a 2302
2303 success = True
4340deca
P
2304 if info_dict.get('requested_formats') is not None:
2305 downloaded = []
d47aeb22 2306 merger = FFmpegMergerPP(self)
63ad4d43 2307 if self.params.get('allow_unplayable_formats'):
2308 self.report_warning(
2309 'You have requested merging of multiple formats '
2310 'while also allowing unplayable formats to be downloaded. '
2311 'The formats won\'t be merged to prevent data corruption.')
2312 elif not merger.available:
2313 self.report_warning(
2314 'You have requested merging of multiple formats but ffmpeg is not installed. '
2315 'The formats won\'t be merged.')
81cd954a
S
2316
2317 def compatible_formats(formats):
d03cfdce 2318 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
2319 video_formats = [format for format in formats if format.get('vcodec') != 'none']
2320 audio_formats = [format for format in formats if format.get('acodec') != 'none']
2321 if len(video_formats) > 2 or len(audio_formats) > 2:
2322 return False
2323
81cd954a 2324 # Check extension
d03cfdce 2325 exts = set(format.get('ext') for format in formats)
2326 COMPATIBLE_EXTS = (
2327 set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
2328 set(('webm',)),
2329 )
2330 for ext_sets in COMPATIBLE_EXTS:
2331 if ext_sets.issuperset(exts):
2332 return True
81cd954a
S
2333 # TODO: Check acodec/vcodec
2334 return False
2335
2336 requested_formats = info_dict['requested_formats']
0202b52a 2337 old_ext = info_dict['ext']
c0dea0a7 2338 if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
38c6902b 2339 info_dict['ext'] = 'mkv'
4a5a898a
S
2340 self.report_warning(
2341 'Requested formats are incompatible for merge and will be merged into mkv.')
0202b52a 2342
2343 def correct_ext(filename):
2344 filename_real_ext = os.path.splitext(filename)[1][1:]
2345 filename_wo_ext = (
2346 os.path.splitext(filename)[0]
2347 if filename_real_ext == old_ext
2348 else filename)
2349 return '%s.%s' % (filename_wo_ext, info_dict['ext'])
2350
38c6902b 2351 # Ensure filename always has a correct extension for successful merge
0202b52a 2352 full_filename = correct_ext(full_filename)
2353 temp_filename = correct_ext(temp_filename)
2354 dl_filename = existing_file(full_filename, temp_filename)
1ea24129 2355 info_dict['__real_download'] = False
0202b52a 2356 if dl_filename is None:
81cd954a 2357 for f in requested_formats:
5b5fbc08
JMF
2358 new_info = dict(info_dict)
2359 new_info.update(f)
c5c9bf0c 2360 fname = prepend_extension(
de6000d9 2361 self.prepare_filename(new_info, 'temp'),
c5c9bf0c
S
2362 'f%s' % f['format_id'], new_info['ext'])
2363 if not ensure_dir_exists(fname):
2364 return
5b5fbc08 2365 downloaded.append(fname)
a9e7f546 2366 partial_success, real_download = dl(fname, new_info)
1ea24129 2367 info_dict['__real_download'] = info_dict['__real_download'] or real_download
5b5fbc08 2368 success = success and partial_success
63ad4d43 2369 if merger.available and not self.params.get('allow_unplayable_formats'):
efabc161 2370 info_dict['__postprocessors'].append(merger)
1ea24129 2371 info_dict['__files_to_merge'] = downloaded
2372 # Even if there were no downloads, it is being merged only now
2373 info_dict['__real_download'] = True
42bb0c59 2374 else:
2375 for file in downloaded:
2376 files_to_move[file] = None
4340deca
P
2377 else:
2378 # Just a single file
0202b52a 2379 dl_filename = existing_file(full_filename, temp_filename)
2380 if dl_filename is None:
2381 success, real_download = dl(temp_filename, info_dict)
2382 info_dict['__real_download'] = real_download
2383
0202b52a 2384 dl_filename = dl_filename or temp_filename
c571435f 2385 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 2386
4340deca 2387 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
7960b056 2388 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca
P
2389 return
2390 except (OSError, IOError) as err:
2391 raise UnavailableVideoError(err)
2392 except (ContentTooShortError, ) as err:
2393 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
2394 return
8222d8de 2395
de6000d9 2396 if success and full_filename != '-':
6271f1ca 2397 # Fixup content
62cd676c
PH
2398 fixup_policy = self.params.get('fixup')
2399 if fixup_policy is None:
2400 fixup_policy = 'detect_or_warn'
2401
e4172ac9 2402 INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg to fix this automatically.'
d1e4a464 2403
6271f1ca
PH
2404 stretched_ratio = info_dict.get('stretched_ratio')
2405 if stretched_ratio is not None and stretched_ratio != 1:
6271f1ca
PH
2406 if fixup_policy == 'warn':
2407 self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
2408 info_dict['id'], stretched_ratio))
2409 elif fixup_policy == 'detect_or_warn':
2410 stretched_pp = FFmpegFixupStretchedPP(self)
2411 if stretched_pp.available:
6271f1ca
PH
2412 info_dict['__postprocessors'].append(stretched_pp)
2413 else:
2414 self.report_warning(
d1e4a464
S
2415 '%s: Non-uniform pixel ratio (%s). %s'
2416 % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
6271f1ca 2417 else:
62cd676c
PH
2418 assert fixup_policy in ('ignore', 'never')
2419
3089bc74 2420 if (info_dict.get('requested_formats') is None
6b591b29 2421 and info_dict.get('container') == 'm4a_dash'
2422 and info_dict.get('ext') == 'm4a'):
62cd676c 2423 if fixup_policy == 'warn':
d1e4a464
S
2424 self.report_warning(
2425 '%s: writing DASH m4a. '
2426 'Only some players support this container.'
2427 % info_dict['id'])
62cd676c
PH
2428 elif fixup_policy == 'detect_or_warn':
2429 fixup_pp = FFmpegFixupM4aPP(self)
2430 if fixup_pp.available:
62cd676c
PH
2431 info_dict['__postprocessors'].append(fixup_pp)
2432 else:
2433 self.report_warning(
d1e4a464
S
2434 '%s: writing DASH m4a. '
2435 'Only some players support this container. %s'
2436 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
62cd676c
PH
2437 else:
2438 assert fixup_policy in ('ignore', 'never')
6271f1ca 2439
3089bc74
S
2440 if (info_dict.get('protocol') == 'm3u8_native'
2441 or info_dict.get('protocol') == 'm3u8'
2442 and self.params.get('hls_prefer_native')):
f17f8651 2443 if fixup_policy == 'warn':
a02682fd 2444 self.report_warning('%s: malformed AAC bitstream detected.' % (
f17f8651 2445 info_dict['id']))
2446 elif fixup_policy == 'detect_or_warn':
2447 fixup_pp = FFmpegFixupM3u8PP(self)
2448 if fixup_pp.available:
f17f8651 2449 info_dict['__postprocessors'].append(fixup_pp)
2450 else:
2451 self.report_warning(
a02682fd 2452 '%s: malformed AAC bitstream detected. %s'
d1e4a464 2453 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
f17f8651 2454 else:
2455 assert fixup_policy in ('ignore', 'never')
2456
8222d8de 2457 try:
0202b52a 2458 self.post_process(dl_filename, info_dict, files_to_move)
af819c21 2459 except PostProcessingError as err:
2460 self.report_error('Postprocessing: %s' % str(err))
8222d8de 2461 return
ab8e5e51
AM
2462 try:
2463 for ph in self._post_hooks:
0202b52a 2464 ph(full_filename)
ab8e5e51
AM
2465 except Exception as err:
2466 self.report_error('post hooks: %s' % str(err))
2467 return
2d30509f 2468 must_record_download_archive = True
2469
2470 if must_record_download_archive or self.params.get('force_write_download_archive', False):
2471 self.record_download_archive(info_dict)
c3e6ffba 2472 max_downloads = self.params.get('max_downloads')
2473 if max_downloads is not None and self._num_downloads >= int(max_downloads):
2474 raise MaxDownloadsReached()
8222d8de
JMF
2475
2476 def download(self, url_list):
2477 """Download a given list of URLs."""
de6000d9 2478 outtmpl = self.outtmpl_dict['default']
3089bc74
S
2479 if (len(url_list) > 1
2480 and outtmpl != '-'
2481 and '%' not in outtmpl
2482 and self.params.get('max_downloads') != 1):
acd69589 2483 raise SameFileError(outtmpl)
8222d8de
JMF
2484
2485 for url in url_list:
2486 try:
5f6a1245 2487 # It also downloads the videos
61aa5ba3
S
2488 res = self.extract_info(
2489 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de 2490 except UnavailableVideoError:
6febd1c1 2491 self.report_error('unable to download video')
8222d8de 2492 except MaxDownloadsReached:
8b0d7497 2493 self.to_screen('[info] Maximum number of downloaded files reached')
2494 raise
2495 except ExistingVideoReached:
d83cb531 2496 self.to_screen('[info] Encountered a file that is already in the archive, stopping due to --break-on-existing')
8b0d7497 2497 raise
2498 except RejectedVideoReached:
d83cb531 2499 self.to_screen('[info] Encountered a file that did not match filter, stopping due to --break-on-reject')
8222d8de 2500 raise
63e0be34
PH
2501 else:
2502 if self.params.get('dump_single_json', False):
277d6ff5 2503 self.post_extract(res)
63e0be34 2504 self.to_stdout(json.dumps(res))
8222d8de
JMF
2505
2506 return self._download_retcode
2507
1dcc4c0c 2508 def download_with_info_file(self, info_filename):
31bd3925
JMF
2509 with contextlib.closing(fileinput.FileInput(
2510 [info_filename], mode='r',
2511 openhook=fileinput.hook_encoded('utf-8'))) as f:
2512 # FileInput doesn't have a read method, we can't call json.load
cb202fd2 2513 info = self.filter_requested_info(json.loads('\n'.join(f)))
d4943898
JMF
2514 try:
2515 self.process_ie_result(info, download=True)
2516 except DownloadError:
2517 webpage_url = info.get('webpage_url')
2518 if webpage_url is not None:
6febd1c1 2519 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
d4943898
JMF
2520 return self.download([webpage_url])
2521 else:
2522 raise
2523 return self._download_retcode
1dcc4c0c 2524
cb202fd2
S
2525 @staticmethod
2526 def filter_requested_info(info_dict):
18590cec 2527 fields_to_remove = ('requested_formats', 'requested_subtitles')
cb202fd2
S
2528 return dict(
2529 (k, v) for k, v in info_dict.items()
18590cec 2530 if (k[0] != '_' or k == '_type') and k not in fields_to_remove)
cb202fd2 2531
5bfa4862 2532 def run_pp(self, pp, infodict, files_to_move={}):
2533 files_to_delete = []
af819c21 2534 files_to_delete, infodict = pp.run(infodict)
5bfa4862 2535 if not files_to_delete:
2536 return files_to_move, infodict
2537
2538 if self.params.get('keepvideo', False):
2539 for f in files_to_delete:
2540 files_to_move.setdefault(f, '')
2541 else:
2542 for old_filename in set(files_to_delete):
2543 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2544 try:
2545 os.remove(encodeFilename(old_filename))
2546 except (IOError, OSError):
2547 self.report_warning('Unable to remove downloaded original file')
2548 if old_filename in files_to_move:
2549 del files_to_move[old_filename]
2550 return files_to_move, infodict
2551
277d6ff5 2552 @staticmethod
2553 def post_extract(info_dict):
2554 def actual_post_extract(info_dict):
2555 if info_dict.get('_type') in ('playlist', 'multi_video'):
2556 for video_dict in info_dict.get('entries', {}):
2557 actual_post_extract(video_dict)
2558 return
2559
2560 if '__post_extractor' not in info_dict:
2561 return
2562 post_extractor = info_dict['__post_extractor']
2563 if post_extractor:
2564 info_dict.update(post_extractor().items())
2565 del info_dict['__post_extractor']
2566 return
2567
2568 actual_post_extract(info_dict)
2569
5bfa4862 2570 def pre_process(self, ie_info):
2571 info = dict(ie_info)
2572 for pp in self._pps['beforedl']:
2573 info = self.run_pp(pp, info)[1]
2574 return info
2575
0202b52a 2576 def post_process(self, filename, ie_info, files_to_move={}):
8222d8de
JMF
2577 """Run all the postprocessors on the given file."""
2578 info = dict(ie_info)
2579 info['filepath'] = filename
de6000d9 2580 info['__files_to_move'] = {}
0202b52a 2581
5bfa4862 2582 for pp in ie_info.get('__postprocessors', []) + self._pps['normal']:
2583 files_to_move, info = self.run_pp(pp, info, files_to_move)
de6000d9 2584 info = self.run_pp(MoveFilesAfterDownloadPP(self, files_to_move), info)[1]
5bfa4862 2585 for pp in self._pps['aftermove']:
de6000d9 2586 info = self.run_pp(pp, info, {})[1]
c1c9a79c 2587
5db07df6 2588 def _make_archive_id(self, info_dict):
e9fef7ee
S
2589 video_id = info_dict.get('id')
2590 if not video_id:
2591 return
5db07df6
PH
2592 # Future-proof against any change in case
2593 # and backwards compatibility with prior versions
e9fef7ee 2594 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 2595 if extractor is None:
1211bb6d
S
2596 url = str_or_none(info_dict.get('url'))
2597 if not url:
2598 return
e9fef7ee
S
2599 # Try to find matching extractor for the URL and take its ie_key
2600 for ie in self._ies:
1211bb6d 2601 if ie.suitable(url):
e9fef7ee
S
2602 extractor = ie.ie_key()
2603 break
2604 else:
2605 return
d0757229 2606 return '%s %s' % (extractor.lower(), video_id)
5db07df6
PH
2607
2608 def in_download_archive(self, info_dict):
2609 fn = self.params.get('download_archive')
2610 if fn is None:
2611 return False
2612
2613 vid_id = self._make_archive_id(info_dict)
e9fef7ee 2614 if not vid_id:
7012b23c 2615 return False # Incomplete video information
5db07df6 2616
a45e8619 2617 return vid_id in self.archive
c1c9a79c
PH
2618
2619 def record_download_archive(self, info_dict):
2620 fn = self.params.get('download_archive')
2621 if fn is None:
2622 return
5db07df6
PH
2623 vid_id = self._make_archive_id(info_dict)
2624 assert vid_id
c1c9a79c 2625 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
6febd1c1 2626 archive_file.write(vid_id + '\n')
a45e8619 2627 self.archive.add(vid_id)
dd82ffea 2628
8c51aa65 2629 @staticmethod
8abeeb94 2630 def format_resolution(format, default='unknown'):
fb04e403
PH
2631 if format.get('vcodec') == 'none':
2632 return 'audio only'
f49d89ee
PH
2633 if format.get('resolution') is not None:
2634 return format['resolution']
8c51aa65
JMF
2635 if format.get('height') is not None:
2636 if format.get('width') is not None:
6febd1c1 2637 res = '%sx%s' % (format['width'], format['height'])
8c51aa65 2638 else:
6febd1c1 2639 res = '%sp' % format['height']
f49d89ee 2640 elif format.get('width') is not None:
388ae76b 2641 res = '%dx?' % format['width']
8c51aa65 2642 else:
8abeeb94 2643 res = default
8c51aa65
JMF
2644 return res
2645
c57f7757
PH
2646 def _format_note(self, fdict):
2647 res = ''
2648 if fdict.get('ext') in ['f4f', 'f4m']:
2649 res += '(unsupported) '
32f90364
PH
2650 if fdict.get('language'):
2651 if res:
2652 res += ' '
9016d76f 2653 res += '[%s] ' % fdict['language']
c57f7757
PH
2654 if fdict.get('format_note') is not None:
2655 res += fdict['format_note'] + ' '
2656 if fdict.get('tbr') is not None:
2657 res += '%4dk ' % fdict['tbr']
2658 if fdict.get('container') is not None:
2659 if res:
2660 res += ', '
2661 res += '%s container' % fdict['container']
3089bc74
S
2662 if (fdict.get('vcodec') is not None
2663 and fdict.get('vcodec') != 'none'):
c57f7757
PH
2664 if res:
2665 res += ', '
2666 res += fdict['vcodec']
91c7271a 2667 if fdict.get('vbr') is not None:
c57f7757
PH
2668 res += '@'
2669 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
2670 res += 'video@'
2671 if fdict.get('vbr') is not None:
2672 res += '%4dk' % fdict['vbr']
fbb21cf5 2673 if fdict.get('fps') is not None:
5d583bdf
S
2674 if res:
2675 res += ', '
2676 res += '%sfps' % fdict['fps']
c57f7757
PH
2677 if fdict.get('acodec') is not None:
2678 if res:
2679 res += ', '
2680 if fdict['acodec'] == 'none':
2681 res += 'video only'
2682 else:
2683 res += '%-5s' % fdict['acodec']
2684 elif fdict.get('abr') is not None:
2685 if res:
2686 res += ', '
2687 res += 'audio'
2688 if fdict.get('abr') is not None:
2689 res += '@%3dk' % fdict['abr']
2690 if fdict.get('asr') is not None:
2691 res += ' (%5dHz)' % fdict['asr']
2692 if fdict.get('filesize') is not None:
2693 if res:
2694 res += ', '
2695 res += format_bytes(fdict['filesize'])
9732d77e
PH
2696 elif fdict.get('filesize_approx') is not None:
2697 if res:
2698 res += ', '
2699 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 2700 return res
91c7271a 2701
76d321f6 2702 def _format_note_table(self, f):
2703 def join_fields(*vargs):
2704 return ', '.join((val for val in vargs if val != ''))
2705
2706 return join_fields(
2707 'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
2708 format_field(f, 'language', '[%s]'),
2709 format_field(f, 'format_note'),
2710 format_field(f, 'container', ignore=(None, f.get('ext'))),
2711 format_field(f, 'asr', '%5dHz'))
2712
c57f7757 2713 def list_formats(self, info_dict):
94badb25 2714 formats = info_dict.get('formats', [info_dict])
76d321f6 2715 new_format = self.params.get('listformats_table', False)
2716 if new_format:
2717 table = [
2718 [
2719 format_field(f, 'format_id'),
2720 format_field(f, 'ext'),
2721 self.format_resolution(f),
2722 format_field(f, 'fps', '%d'),
2723 '|',
2724 format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
2725 format_field(f, 'tbr', '%4dk'),
fb198a8a 2726 f.get('protocol').replace('http_dash_segments', 'dash').replace("native", "n").replace('niconico_', ''),
76d321f6 2727 '|',
2728 format_field(f, 'vcodec', default='unknown').replace('none', ''),
2729 format_field(f, 'vbr', '%4dk'),
2730 format_field(f, 'acodec', default='unknown').replace('none', ''),
2731 format_field(f, 'abr', '%3dk'),
2732 format_field(f, 'asr', '%5dHz'),
2733 self._format_note_table(f)]
2734 for f in formats
2735 if f.get('preference') is None or f['preference'] >= -1000]
2736 header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', ' TBR', 'PROTO',
2737 '|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'NOTE']
2738 else:
2739 table = [
2740 [
2741 format_field(f, 'format_id'),
2742 format_field(f, 'ext'),
2743 self.format_resolution(f),
2744 self._format_note(f)]
2745 for f in formats
2746 if f.get('preference') is None or f['preference'] >= -1000]
2747 header_line = ['format code', 'extension', 'resolution', 'note']
57dd9a8f 2748
cfb56d1a 2749 self.to_screen(
76d321f6 2750 '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(
2751 header_line,
2752 table,
2753 delim=new_format,
2754 extraGap=(0 if new_format else 1),
2755 hideEmpty=new_format)))
cfb56d1a
PH
2756
2757 def list_thumbnails(self, info_dict):
2758 thumbnails = info_dict.get('thumbnails')
2759 if not thumbnails:
b7b72db9 2760 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
2761 return
cfb56d1a
PH
2762
2763 self.to_screen(
2764 '[info] Thumbnails for %s:' % info_dict['id'])
2765 self.to_screen(render_table(
2766 ['ID', 'width', 'height', 'URL'],
2767 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
dca08720 2768
360e1ca5 2769 def list_subtitles(self, video_id, subtitles, name='subtitles'):
a504ced0 2770 if not subtitles:
360e1ca5 2771 self.to_screen('%s has no %s' % (video_id, name))
a504ced0 2772 return
a504ced0 2773 self.to_screen(
edab9dbf
JMF
2774 'Available %s for %s:' % (name, video_id))
2775 self.to_screen(render_table(
2776 ['Language', 'formats'],
2777 [[lang, ', '.join(f['ext'] for f in reversed(formats))]
2778 for lang, formats in subtitles.items()]))
a504ced0 2779
dca08720
PH
2780 def urlopen(self, req):
2781 """ Start an HTTP download """
82d8a8b6 2782 if isinstance(req, compat_basestring):
67dda517 2783 req = sanitized_Request(req)
19a41fc6 2784 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
2785
2786 def print_debug_header(self):
2787 if not self.params.get('verbose'):
2788 return
62fec3b2 2789
4192b51c 2790 if type('') is not compat_str:
067aa17e 2791 # Python 2.6 on SLES11 SP1 (https://github.com/ytdl-org/youtube-dl/issues/3326)
4192b51c
PH
2792 self.report_warning(
2793 'Your Python is broken! Update to a newer and supported version')
2794
c6afed48
PH
2795 stdout_encoding = getattr(
2796 sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
b0472057 2797 encoding_str = (
734f90bb
PH
2798 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
2799 locale.getpreferredencoding(),
2800 sys.getfilesystemencoding(),
c6afed48 2801 stdout_encoding,
b0472057 2802 self.get_encoding()))
4192b51c 2803 write_string(encoding_str, encoding=None)
734f90bb 2804
e5813e53 2805 source = (
2806 '(exe)' if hasattr(sys, 'frozen')
2807 else '(zip)' if isinstance(globals().get('__loader__'), zipimporter)
2808 else '(source)' if os.path.basename(sys.argv[0]) == '__main__.py'
2809 else '')
2810 self._write_string('[debug] yt-dlp version %s %s\n' % (__version__, source))
e0986e31 2811 if _LAZY_LOADER:
f74980cb 2812 self._write_string('[debug] Lazy loading extractors enabled\n')
2813 if _PLUGIN_CLASSES:
2814 self._write_string(
2815 '[debug] Plugin Extractors: %s\n' % [ie.ie_key() for ie in _PLUGIN_CLASSES])
dca08720
PH
2816 try:
2817 sp = subprocess.Popen(
2818 ['git', 'rev-parse', '--short', 'HEAD'],
2819 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2820 cwd=os.path.dirname(os.path.abspath(__file__)))
f5b1bca9 2821 out, err = process_communicate_or_kill(sp)
dca08720
PH
2822 out = out.decode().strip()
2823 if re.match('[0-9a-f]+', out):
f74980cb 2824 self._write_string('[debug] Git HEAD: %s\n' % out)
70a1165b 2825 except Exception:
dca08720
PH
2826 try:
2827 sys.exc_clear()
70a1165b 2828 except Exception:
dca08720 2829 pass
b300cda4
S
2830
2831 def python_implementation():
2832 impl_name = platform.python_implementation()
2833 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2834 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
2835 return impl_name
2836
e5813e53 2837 self._write_string('[debug] Python version %s (%s %s) - %s\n' % (
2838 platform.python_version(),
2839 python_implementation(),
2840 platform.architecture()[0],
b300cda4 2841 platform_name()))
d28b5171 2842
73fac4e9 2843 exe_versions = FFmpegPostProcessor.get_versions(self)
4c83c967 2844 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 2845 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171
PH
2846 exe_str = ', '.join(
2847 '%s %s' % (exe, v)
2848 for exe, v in sorted(exe_versions.items())
2849 if v
2850 )
2851 if not exe_str:
2852 exe_str = 'none'
2853 self._write_string('[debug] exe versions: %s\n' % exe_str)
dca08720
PH
2854
2855 proxy_map = {}
2856 for handler in self._opener.handlers:
2857 if hasattr(handler, 'proxies'):
2858 proxy_map.update(handler.proxies)
734f90bb 2859 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
dca08720 2860
58b1f00d
PH
2861 if self.params.get('call_home', False):
2862 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
2863 self._write_string('[debug] Public IP address: %s\n' % ipaddr)
f5546c0b 2864 return
58b1f00d
PH
2865 latest_version = self.urlopen(
2866 'https://yt-dl.org/latest/version').read().decode('utf-8')
2867 if version_tuple(latest_version) > version_tuple(__version__):
2868 self.report_warning(
2869 'You are using an outdated version (newest version: %s)! '
2870 'See https://yt-dl.org/update if you need help updating.' %
2871 latest_version)
2872
e344693b 2873 def _setup_opener(self):
6ad14cab 2874 timeout_val = self.params.get('socket_timeout')
19a41fc6 2875 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
6ad14cab 2876
dca08720
PH
2877 opts_cookiefile = self.params.get('cookiefile')
2878 opts_proxy = self.params.get('proxy')
2879
2880 if opts_cookiefile is None:
2881 self.cookiejar = compat_cookiejar.CookieJar()
2882 else:
590bc6f6 2883 opts_cookiefile = expand_path(opts_cookiefile)
1bab3437 2884 self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
dca08720 2885 if os.access(opts_cookiefile, os.R_OK):
1d88b3e6 2886 self.cookiejar.load(ignore_discard=True, ignore_expires=True)
dca08720 2887
6a3f4c3f 2888 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
2889 if opts_proxy is not None:
2890 if opts_proxy == '':
2891 proxies = {}
2892 else:
2893 proxies = {'http': opts_proxy, 'https': opts_proxy}
2894 else:
2895 proxies = compat_urllib_request.getproxies()
067aa17e 2896 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
2897 if 'http' in proxies and 'https' not in proxies:
2898 proxies['https'] = proxies['http']
91410c9b 2899 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
2900
2901 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
2902 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
2903 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 2904 redirect_handler = YoutubeDLRedirectHandler()
8b172c2e 2905 data_handler = compat_urllib_request_DataHandler()
6240b0a2
JMF
2906
2907 # When passing our own FileHandler instance, build_opener won't add the
2908 # default FileHandler and allows us to disable the file protocol, which
2909 # can be used for malicious purposes (see
067aa17e 2910 # https://github.com/ytdl-org/youtube-dl/issues/8227)
6240b0a2
JMF
2911 file_handler = compat_urllib_request.FileHandler()
2912
2913 def file_open(*args, **kwargs):
7a5c1cfe 2914 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
6240b0a2
JMF
2915 file_handler.file_open = file_open
2916
2917 opener = compat_urllib_request.build_opener(
fca6dba8 2918 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 2919
dca08720
PH
2920 # Delete the default user-agent header, which would otherwise apply in
2921 # cases where our custom HTTP handler doesn't come into play
067aa17e 2922 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
2923 opener.addheaders = []
2924 self._opener = opener
62fec3b2
PH
2925
2926 def encode(self, s):
2927 if isinstance(s, bytes):
2928 return s # Already encoded
2929
2930 try:
2931 return s.encode(self.get_encoding())
2932 except UnicodeEncodeError as err:
2933 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
2934 raise
2935
2936 def get_encoding(self):
2937 encoding = self.params.get('encoding')
2938 if encoding is None:
2939 encoding = preferredencoding()
2940 return encoding
ec82d85a 2941
de6000d9 2942 def _write_thumbnails(self, info_dict, filename): # return the extensions
6c4fd172 2943 write_all = self.params.get('write_all_thumbnails', False)
2944 thumbnails = []
2945 if write_all or self.params.get('writethumbnail', False):
0202b52a 2946 thumbnails = info_dict.get('thumbnails') or []
6c4fd172 2947 multiple = write_all and len(thumbnails) > 1
ec82d85a 2948
0202b52a 2949 ret = []
6c4fd172 2950 for t in thumbnails[::1 if write_all else -1]:
ec82d85a 2951 thumb_ext = determine_ext(t['url'], 'jpg')
6c4fd172 2952 suffix = '%s.' % t['id'] if multiple else ''
2953 thumb_display_id = '%s ' % t['id'] if multiple else ''
de6000d9 2954 t['filename'] = thumb_filename = replace_extension(filename, suffix + thumb_ext, info_dict.get('ext'))
ec82d85a 2955
0c3d0f51 2956 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(thumb_filename)):
de6000d9 2957 ret.append(suffix + thumb_ext)
ec82d85a
PH
2958 self.to_screen('[%s] %s: Thumbnail %sis already present' %
2959 (info_dict['extractor'], info_dict['id'], thumb_display_id))
2960 else:
5ef7d9bd 2961 self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
ec82d85a
PH
2962 (info_dict['extractor'], info_dict['id'], thumb_display_id))
2963 try:
2964 uf = self.urlopen(t['url'])
d3d89c32 2965 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 2966 shutil.copyfileobj(uf, thumbf)
de6000d9 2967 ret.append(suffix + thumb_ext)
ec82d85a
PH
2968 self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
2969 (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
2970 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
2971 self.report_warning('Unable to download thumbnail "%s": %s' %
9b9c5355 2972 (t['url'], error_to_compat_str(err)))
6c4fd172 2973 if ret and not write_all:
2974 break
0202b52a 2975 return ret