]> jfr.im git - yt-dlp.git/blame - yt_dlp/YoutubeDL.py
Fix `preload_download_archive` writing verbose message to `stdout`
[yt-dlp.git] / yt_dlp / YoutubeDL.py
CommitLineData
8222d8de 1#!/usr/bin/env python
dcdb292f 2# coding: utf-8
8222d8de 3
6febd1c1 4from __future__ import absolute_import, unicode_literals
8222d8de 5
26e63931 6import collections
31bd3925 7import contextlib
317f7ab6 8import copy
9d2ecdbc 9import datetime
c1c9a79c 10import errno
31bd3925 11import fileinput
8222d8de 12import io
b82f815f 13import itertools
8694c600 14import json
62fec3b2 15import locale
083c9df9 16import operator
8222d8de 17import os
dca08720 18import platform
8222d8de
JMF
19import re
20import shutil
dca08720 21import subprocess
8222d8de
JMF
22import socket
23import sys
24import time
67134eab 25import tokenize
8222d8de 26import traceback
75822ca7 27import random
8222d8de 28
961ea474 29from string import ascii_letters
e5813e53 30from zipimport import zipimporter
961ea474 31
8c25f81b 32from .compat import (
82d8a8b6 33 compat_basestring,
dca08720 34 compat_cookiejar,
003c69a8 35 compat_get_terminal_size,
ce02ed60 36 compat_http_client,
4f026faf 37 compat_kwargs,
d0d9ade4 38 compat_numeric_types,
e9c0cdd3 39 compat_os_name,
ce02ed60 40 compat_str,
67134eab 41 compat_tokenize_tokenize,
ce02ed60
PH
42 compat_urllib_error,
43 compat_urllib_request,
8b172c2e 44 compat_urllib_request_DataHandler,
8c25f81b
PH
45)
46from .utils import (
eedb7ba5
S
47 age_restricted,
48 args_to_str,
ce02ed60
PH
49 ContentTooShortError,
50 date_from_str,
51 DateRange,
acd69589 52 DEFAULT_OUTTMPL,
de6000d9 53 OUTTMPL_TYPES,
ce02ed60 54 determine_ext,
b5559424 55 determine_protocol,
732044af 56 DOT_DESKTOP_LINK_TEMPLATE,
57 DOT_URL_LINK_TEMPLATE,
58 DOT_WEBLOC_LINK_TEMPLATE,
ce02ed60 59 DownloadError,
c0384f22 60 encode_compat_str,
ce02ed60 61 encodeFilename,
9b9c5355 62 error_to_compat_str,
498f5606 63 EntryNotInPlaylist,
8b0d7497 64 ExistingVideoReached,
590bc6f6 65 expand_path,
ce02ed60 66 ExtractorError,
e29663c6 67 float_or_none,
02dbf93f 68 format_bytes,
76d321f6 69 format_field,
143db31d 70 FORMAT_RE,
525ef922 71 formatSeconds,
773f291d 72 GeoRestrictedError,
c9969434 73 int_or_none,
732044af 74 iri_to_uri,
773f291d 75 ISO3166Utils,
ce02ed60 76 locked_file,
0202b52a 77 make_dir,
dca08720 78 make_HTTPS_handler,
ce02ed60 79 MaxDownloadsReached,
cd6fc19e 80 orderedSet,
b7ab0590 81 PagedList,
083c9df9 82 parse_filesize,
91410c9b 83 PerRequestProxyHandler,
dca08720 84 platform_name,
eedb7ba5 85 PostProcessingError,
ce02ed60 86 preferredencoding,
eedb7ba5 87 prepend_extension,
51fb4995 88 register_socks_protocols,
cfb56d1a 89 render_table,
eedb7ba5 90 replace_extension,
8b0d7497 91 RejectedVideoReached,
ce02ed60
PH
92 SameFileError,
93 sanitize_filename,
1bb5c511 94 sanitize_path,
dcf77cf1 95 sanitize_url,
67dda517 96 sanitized_Request,
e5660ee6 97 std_headers,
1211bb6d 98 str_or_none,
e29663c6 99 strftime_or_none,
ce02ed60 100 subtitles_filename,
732044af 101 to_high_limit_path,
a439a3a4 102 traverse_dict,
ce02ed60 103 UnavailableVideoError,
29eb5174 104 url_basename,
58b1f00d 105 version_tuple,
ce02ed60
PH
106 write_json_file,
107 write_string,
1bab3437 108 YoutubeDLCookieJar,
6a3f4c3f 109 YoutubeDLCookieProcessor,
dca08720 110 YoutubeDLHandler,
fca6dba8 111 YoutubeDLRedirectHandler,
f5b1bca9 112 process_communicate_or_kill,
ce02ed60 113)
a0e07d31 114from .cache import Cache
52a8a1e1 115from .extractor import (
116 gen_extractor_classes,
117 get_info_extractor,
118 _LAZY_LOADER,
119 _PLUGIN_CLASSES
120)
4c54b89e 121from .extractor.openload import PhantomJSwrapper
52a8a1e1 122from .downloader import (
123 get_suitable_downloader,
124 shorten_protocol_name
125)
4c83c967 126from .downloader.rtmp import rtmpdump_version
4f026faf 127from .postprocessor import (
f17f8651 128 FFmpegFixupM3u8PP,
62cd676c 129 FFmpegFixupM4aPP,
6271f1ca 130 FFmpegFixupStretchedPP,
4f026faf
PH
131 FFmpegMergerPP,
132 FFmpegPostProcessor,
0202b52a 133 # FFmpegSubtitlesConvertorPP,
4f026faf 134 get_postprocessor,
0202b52a 135 MoveFilesAfterDownloadPP,
4f026faf 136)
dca08720 137from .version import __version__
8222d8de 138
e9c0cdd3
YCH
139if compat_os_name == 'nt':
140 import ctypes
141
2459b6e1 142
8222d8de
JMF
143class YoutubeDL(object):
144 """YoutubeDL class.
145
146 YoutubeDL objects are the ones responsible of downloading the
147 actual video file and writing it to disk if the user has requested
148 it, among some other tasks. In most cases there should be one per
149 program. As, given a video URL, the downloader doesn't know how to
150 extract all the needed information, task that InfoExtractors do, it
151 has to pass the URL to one of them.
152
153 For this, YoutubeDL objects have a method that allows
154 InfoExtractors to be registered in a given order. When it is passed
155 a URL, the YoutubeDL object handles it to the first InfoExtractor it
156 finds that reports being able to handle it. The InfoExtractor extracts
157 all the information about the video or videos the URL refers to, and
158 YoutubeDL process the extracted information, possibly using a File
159 Downloader to download the video.
160
161 YoutubeDL objects accept a lot of parameters. In order not to saturate
162 the object constructor with arguments, it receives a dictionary of
163 options instead. These options are available through the params
164 attribute for the InfoExtractors to use. The YoutubeDL also
165 registers itself as the downloader in charge for the InfoExtractors
166 that are added to it, so this is a "mutual registration".
167
168 Available options:
169
170 username: Username for authentication purposes.
171 password: Password for authentication purposes.
180940e0 172 videopassword: Password for accessing a video.
1da50aa3
S
173 ap_mso: Adobe Pass multiple-system operator identifier.
174 ap_username: Multiple-system operator account username.
175 ap_password: Multiple-system operator account password.
8222d8de
JMF
176 usenetrc: Use netrc for authentication instead.
177 verbose: Print additional info to stdout.
178 quiet: Do not print messages to stdout.
ad8915b7 179 no_warnings: Do not print out anything for warnings.
8222d8de
JMF
180 forceurl: Force printing final URL.
181 forcetitle: Force printing title.
182 forceid: Force printing ID.
183 forcethumbnail: Force printing thumbnail URL.
184 forcedescription: Force printing description.
185 forcefilename: Force printing final filename.
525ef922 186 forceduration: Force printing duration.
8694c600 187 forcejson: Force printing info_dict as JSON.
63e0be34
PH
188 dump_single_json: Force printing the info_dict of the whole playlist
189 (or video) as a single JSON line.
c25228e5 190 force_write_download_archive: Force writing download archive regardless
191 of 'skip_download' or 'simulate'.
8222d8de 192 simulate: Do not download the video files.
eb8a4433 193 format: Video format code. see "FORMAT SELECTION" for more details.
63ad4d43 194 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
b7da73eb 195 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
196 extracting metadata even if the video is not actually
197 available for download (experimental)
c25228e5 198 format_sort: How to sort the video formats. see "Sorting Formats"
199 for more details.
200 format_sort_force: Force the given format_sort. see "Sorting Formats"
201 for more details.
202 allow_multiple_video_streams: Allow multiple video streams to be merged
203 into a single file
204 allow_multiple_audio_streams: Allow multiple audio streams to be merged
205 into a single file
4524baf0 206 paths: Dictionary of output paths. The allowed keys are 'home'
207 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
de6000d9 208 outtmpl: Dictionary of templates for output names. Allowed keys
4524baf0 209 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
210 A string a also accepted for backward compatibility
a820dc72
RA
211 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
212 restrictfilenames: Do not allow "&" and spaces in file names
213 trim_file_name: Limit length of filename (extension excluded)
4524baf0 214 windowsfilenames: Force the filenames to be windows compatible
a820dc72 215 ignoreerrors: Do not stop on download errors
7a5c1cfe 216 (Default True when running yt-dlp,
a820dc72 217 but False when directly accessing YoutubeDL class)
26e2805c 218 skip_playlist_after_errors: Number of allowed failures until the rest of
219 the playlist is skipped
d22dec74 220 force_generic_extractor: Force downloader to use the generic extractor
0c3d0f51 221 overwrites: Overwrite all video and metadata files if True,
222 overwrite only non-video files if None
223 and don't overwrite any file if False
8222d8de
JMF
224 playliststart: Playlist item to start at.
225 playlistend: Playlist item to end at.
c14e88f0 226 playlist_items: Specific indices of playlist to download.
ff815fe6 227 playlistreverse: Download playlist items in reverse order.
75822ca7 228 playlistrandom: Download playlist items in random order.
8222d8de
JMF
229 matchtitle: Download only matching titles.
230 rejecttitle: Reject downloads for matching titles.
8bf9319e 231 logger: Log messages to a logging.Logger instance.
8222d8de
JMF
232 logtostderr: Log messages to stderr instead of stdout.
233 writedescription: Write the video description to a .description file
234 writeinfojson: Write the video description to a .info.json file
75d43ca0 235 clean_infojson: Remove private fields from the infojson
06167fbb 236 writecomments: Extract video comments. This will not be written to disk
237 unless writeinfojson is also given
1fb07d10 238 writeannotations: Write the video annotations to a .annotations.xml file
8222d8de 239 writethumbnail: Write the thumbnail image to a file
c25228e5 240 allow_playlist_files: Whether to write playlists' description, infojson etc
241 also to disk when using the 'write*' options
ec82d85a 242 write_all_thumbnails: Write all thumbnail formats to files
732044af 243 writelink: Write an internet shortcut file, depending on the
244 current platform (.url/.webloc/.desktop)
245 writeurllink: Write a Windows internet shortcut file (.url)
246 writewebloclink: Write a macOS internet shortcut file (.webloc)
247 writedesktoplink: Write a Linux internet shortcut file (.desktop)
8222d8de 248 writesubtitles: Write the video subtitles to a file
741dd8ea 249 writeautomaticsub: Write the automatically generated subtitles to a file
c32b0aab 250 allsubtitles: Deprecated - Use subtitlelangs = ['all']
251 Downloads all the subtitles of the video
0b7f3118 252 (requires writesubtitles or writeautomaticsub)
8222d8de 253 listsubtitles: Lists all available subtitles for the video
a504ced0 254 subtitlesformat: The format code for subtitles
c32b0aab 255 subtitleslangs: List of languages of the subtitles to download (can be regex).
256 The list may contain "all" to refer to all the available
257 subtitles. The language can be prefixed with a "-" to
258 exclude it from the requested languages. Eg: ['all', '-live_chat']
8222d8de
JMF
259 keepvideo: Keep the video file after post-processing
260 daterange: A DateRange object, download only if the upload_date is in the range.
261 skip_download: Skip the actual download of the video file
c35f9e72 262 cachedir: Location of the cache files in the filesystem.
a0e07d31 263 False to disable filesystem cache.
47192f92 264 noplaylist: Download single video instead of a playlist if in doubt.
8dbe9899
PH
265 age_limit: An integer representing the user's age in years.
266 Unsuitable videos for the given age are skipped.
5fe18bdb
PH
267 min_views: An integer representing the minimum view count the video
268 must have in order to not be skipped.
269 Videos without view count information are always
270 downloaded. None for no limit.
271 max_views: An integer representing the maximum view count.
272 Videos that are more popular than that are not
273 downloaded.
274 Videos without view count information are always
275 downloaded. None for no limit.
276 download_archive: File name of a file where all downloads are recorded.
c1c9a79c
PH
277 Videos already present in the file are not downloaded
278 again.
8a51f564 279 break_on_existing: Stop the download process after attempting to download a
280 file that is in the archive.
281 break_on_reject: Stop the download process when encountering a video that
282 has been filtered out.
283 cookiefile: File name where cookies should be read from and dumped to
a1ee09e8 284 nocheckcertificate:Do not verify SSL certificates
7e8c0af0
PH
285 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
286 At the moment, this is only supported by YouTube.
a1ee09e8 287 proxy: URL of the proxy server to use
38cce791 288 geo_verification_proxy: URL of the proxy to use for IP address verification
504f20dd 289 on geo-restricted sites.
e344693b 290 socket_timeout: Time to wait for unresponsive hosts, in seconds
0783b09b
PH
291 bidi_workaround: Work around buggy terminals without bidirectional text
292 support, using fridibi
a0ddb8a2 293 debug_printtraffic:Print out sent and received HTTP traffic
7b0817e8 294 include_ads: Download ads as well
04b4d394
PH
295 default_search: Prepend this string if an input url is not valid.
296 'auto' for elaborate guessing
62fec3b2 297 encoding: Use this encoding instead of the system-specified.
e8ee972c 298 extract_flat: Do not resolve URLs, return the immediate result.
057a5206
PH
299 Pass in 'in_playlist' to only show this behavior for
300 playlist items.
4f026faf 301 postprocessors: A list of dictionaries, each with an entry
71b640cc 302 * key: The name of the postprocessor. See
7a5c1cfe 303 yt_dlp/postprocessor/__init__.py for a list.
56d868db 304 * when: When to run the postprocessor. Can be one of
305 pre_process|before_dl|post_process|after_move.
306 Assumed to be 'post_process' if not given
ab8e5e51
AM
307 post_hooks: A list of functions that get called as the final step
308 for each video file, after all postprocessors have been
309 called. The filename will be passed as the only argument.
71b640cc
PH
310 progress_hooks: A list of functions that get called on download
311 progress, with a dictionary with the entries
5cda4eda 312 * status: One of "downloading", "error", or "finished".
ee69b99a 313 Check this first and ignore unknown values.
71b640cc 314
5cda4eda 315 If status is one of "downloading", or "finished", the
ee69b99a
PH
316 following properties may also be present:
317 * filename: The final filename (always present)
5cda4eda 318 * tmpfilename: The filename we're currently writing to
71b640cc
PH
319 * downloaded_bytes: Bytes on disk
320 * total_bytes: Size of the whole file, None if unknown
5cda4eda
PH
321 * total_bytes_estimate: Guess of the eventual file size,
322 None if unavailable.
323 * elapsed: The number of seconds since download started.
71b640cc
PH
324 * eta: The estimated time in seconds, None if unknown
325 * speed: The download speed in bytes/second, None if
326 unknown
5cda4eda
PH
327 * fragment_index: The counter of the currently
328 downloaded video fragment.
329 * fragment_count: The number of fragments (= individual
330 files that will be merged)
71b640cc
PH
331
332 Progress hooks are guaranteed to be called at least once
333 (with status "finished") if the download is successful.
45598f15 334 merge_output_format: Extension to use when merging formats.
6b591b29 335 final_ext: Expected final extension; used to detect when the file was
336 already downloaded and converted. "merge_output_format" is
337 replaced by this extension when given
6271f1ca
PH
338 fixup: Automatically correct known faults of the file.
339 One of:
340 - "never": do nothing
341 - "warn": only emit a warning
342 - "detect_or_warn": check whether we can do anything
62cd676c 343 about it, warn otherwise (default)
504f20dd 344 source_address: Client-side IP address to bind to.
6ec6cb4e 345 call_home: Boolean, true iff we are allowed to contact the
7a5c1cfe 346 yt-dlp servers for debugging. (BROKEN)
1cf376f5 347 sleep_interval_requests: Number of seconds to sleep between requests
348 during extraction
7aa589a5
S
349 sleep_interval: Number of seconds to sleep before each download when
350 used alone or a lower bound of a range for randomized
351 sleep before each download (minimum possible number
352 of seconds to sleep) when used along with
353 max_sleep_interval.
354 max_sleep_interval:Upper bound of a range for randomized sleep before each
355 download (maximum possible number of seconds to sleep).
356 Must only be used along with sleep_interval.
357 Actual sleep time will be a random float from range
358 [sleep_interval; max_sleep_interval].
1cf376f5 359 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
cfb56d1a
PH
360 listformats: Print an overview of available video formats and exit.
361 list_thumbnails: Print a table of all thumbnails and exit.
347de493
PH
362 match_filter: A function that gets called with the info_dict of
363 every video.
364 If it returns a message, the video is ignored.
365 If it returns None, the video is downloaded.
366 match_filter_func in utils.py is one example for this.
7e5db8c9 367 no_color: Do not emit color codes in output.
0a840f58 368 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
504f20dd 369 HTTP header
0a840f58 370 geo_bypass_country:
773f291d
S
371 Two-letter ISO 3166-2 country code that will be used for
372 explicit geographic restriction bypassing via faking
504f20dd 373 X-Forwarded-For HTTP header
5f95927a
S
374 geo_bypass_ip_block:
375 IP range in CIDR notation that will be used similarly to
504f20dd 376 geo_bypass_country
71b640cc 377
85729c51 378 The following options determine which downloader is picked:
52a8a1e1 379 external_downloader: A dictionary of protocol keys and the executable of the
380 external downloader to use for it. The allowed protocols
381 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
382 Set the value to 'native' to use the native downloader
383 hls_prefer_native: Deprecated - Use external_downloader = {'m3u8': 'native'}
384 or {'m3u8': 'ffmpeg'} instead.
385 Use the native HLS downloader instead of ffmpeg/avconv
bf09af3a
S
386 if True, otherwise use ffmpeg/avconv if False, otherwise
387 use downloader suggested by extractor if None.
fe7e0c98 388
8222d8de 389 The following parameters are not used by YoutubeDL itself, they are used by
7a5c1cfe 390 the downloader (see yt_dlp/downloader/common.py):
8222d8de 391 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
881e6a1f 392 noresizebuffer, retries, continuedl, noprogress, consoletitle,
b54d4a5c 393 xattr_set_filesize, external_downloader_args, hls_use_mpegts,
e409895f 394 http_chunk_size.
76b1bd67
JMF
395
396 The following options are used by the post processors:
d4a24f40 397 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
e4172ac9 398 otherwise prefer ffmpeg. (avconv support is deprecated)
c0b7d117
S
399 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
400 to the binary or its containing directory.
43820c03 401 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
402 and a list of additional command-line arguments for the
403 postprocessor/executable. The dict can also have "PP+EXE" keys
404 which are used when the given exe is used by the given PP.
405 Use 'default' as the name for arguments to passed to all PP
e409895f 406
407 The following options are used by the extractors:
62bff2c1 408 extractor_retries: Number of times to retry for known errors
409 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
e409895f 410 hls_split_discontinuity: Split HLS playlists to different formats at
62bff2c1 411 discontinuities such as ad breaks (default: False)
3600fd59 412 youtube_include_dash_manifest: If True (default), DASH manifests and related
62bff2c1 413 data will be downloaded and processed by extractor.
414 You can reduce network I/O by disabling it if you don't
415 care about DASH. (only for youtube)
e409895f 416 youtube_include_hls_manifest: If True (default), HLS manifests and related
62bff2c1 417 data will be downloaded and processed by extractor.
418 You can reduce network I/O by disabling it if you don't
419 care about HLS. (only for youtube)
8222d8de
JMF
420 """
421
c9969434
S
422 _NUMERIC_FIELDS = set((
423 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
424 'timestamp', 'upload_year', 'upload_month', 'upload_day',
425 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
426 'average_rating', 'comment_count', 'age_limit',
427 'start_time', 'end_time',
428 'chapter_number', 'season_number', 'episode_number',
429 'track_number', 'disc_number', 'release_year',
430 'playlist_index',
431 ))
432
8222d8de
JMF
433 params = None
434 _ies = []
56d868db 435 _pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
0202b52a 436 __prepare_filename_warned = False
1cf376f5 437 _first_webpage_request = True
8222d8de
JMF
438 _download_retcode = None
439 _num_downloads = None
30a074c2 440 _playlist_level = 0
441 _playlist_urls = set()
8222d8de
JMF
442 _screen_file = None
443
3511266b 444 def __init__(self, params=None, auto_init=True):
8222d8de 445 """Create a FileDownloader object with the given options."""
e9f9a10f
JMF
446 if params is None:
447 params = {}
8222d8de 448 self._ies = []
56c73665 449 self._ies_instances = {}
56d868db 450 self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
0202b52a 451 self.__prepare_filename_warned = False
1cf376f5 452 self._first_webpage_request = True
ab8e5e51 453 self._post_hooks = []
933605d7 454 self._progress_hooks = []
8222d8de
JMF
455 self._download_retcode = 0
456 self._num_downloads = 0
457 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
0783b09b 458 self._err_file = sys.stderr
4abf617b
S
459 self.params = {
460 # Default parameters
461 'nocheckcertificate': False,
462 }
463 self.params.update(params)
a0e07d31 464 self.cache = Cache(self)
34308b30 465
be5df5ee
S
466 def check_deprecated(param, option, suggestion):
467 if self.params.get(param) is not None:
468 self.report_warning(
4cd0a709 469 '%s is deprecated. Use %s instead' % (option, suggestion))
be5df5ee
S
470 return True
471 return False
472
473 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
38cce791
YCH
474 if self.params.get('geo_verification_proxy') is None:
475 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
476
6b591b29 477 if self.params.get('final_ext'):
478 if self.params.get('merge_output_format'):
479 self.report_warning('--merge-output-format will be ignored since --remux-video or --recode-video is given')
480 self.params['merge_output_format'] = self.params['final_ext']
481
b9d973be 482 if 'overwrites' in self.params and self.params['overwrites'] is None:
483 del self.params['overwrites']
484
be5df5ee
S
485 check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
486 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
487 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
488
0783b09b 489 if params.get('bidi_workaround', False):
1c088fa8
PH
490 try:
491 import pty
492 master, slave = pty.openpty()
003c69a8 493 width = compat_get_terminal_size().columns
1c088fa8
PH
494 if width is None:
495 width_args = []
496 else:
497 width_args = ['-w', str(width)]
5d681e96 498 sp_kwargs = dict(
1c088fa8
PH
499 stdin=subprocess.PIPE,
500 stdout=slave,
501 stderr=self._err_file)
5d681e96
PH
502 try:
503 self._output_process = subprocess.Popen(
504 ['bidiv'] + width_args, **sp_kwargs
505 )
506 except OSError:
5d681e96
PH
507 self._output_process = subprocess.Popen(
508 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
509 self._output_channel = os.fdopen(master, 'rb')
1c088fa8 510 except OSError as ose:
66e7ace1 511 if ose.errno == errno.ENOENT:
6febd1c1 512 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
1c088fa8
PH
513 else:
514 raise
0783b09b 515
3089bc74
S
516 if (sys.platform != 'win32'
517 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
518 and not params.get('restrictfilenames', False)):
e9137224 519 # Unicode filesystem API will throw errors (#1474, #13027)
34308b30 520 self.report_warning(
6febd1c1 521 'Assuming --restrict-filenames since file system encoding '
1b725173 522 'cannot encode all characters. '
6febd1c1 523 'Set the LC_ALL environment variable to fix this.')
4a98cdbf 524 self.params['restrictfilenames'] = True
34308b30 525
de6000d9 526 self.outtmpl_dict = self.parse_outtmpl()
486dd09e 527
dca08720
PH
528 self._setup_opener()
529
4cd0a709 530 """Preload the archive, if any is specified"""
531 def preload_download_archive(fn):
532 if fn is None:
533 return False
534 if self.params.get('verbose'):
535 self._write_string('[debug] Loading archive file %r\n' % fn)
536 try:
537 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
538 for line in archive_file:
539 self.archive.add(line.strip())
540 except IOError as ioe:
541 if ioe.errno != errno.ENOENT:
542 raise
543 return False
544 return True
545
546 self.archive = set()
547 preload_download_archive(self.params.get('download_archive'))
548
3511266b
PH
549 if auto_init:
550 self.print_debug_header()
551 self.add_default_info_extractors()
552
4f026faf
PH
553 for pp_def_raw in self.params.get('postprocessors', []):
554 pp_class = get_postprocessor(pp_def_raw['key'])
555 pp_def = dict(pp_def_raw)
556 del pp_def['key']
5bfa4862 557 if 'when' in pp_def:
558 when = pp_def['when']
559 del pp_def['when']
560 else:
56d868db 561 when = 'post_process'
4f026faf 562 pp = pp_class(self, **compat_kwargs(pp_def))
5bfa4862 563 self.add_post_processor(pp, when=when)
4f026faf 564
ab8e5e51
AM
565 for ph in self.params.get('post_hooks', []):
566 self.add_post_hook(ph)
567
71b640cc
PH
568 for ph in self.params.get('progress_hooks', []):
569 self.add_progress_hook(ph)
570
51fb4995
YCH
571 register_socks_protocols()
572
7d4111ed
PH
573 def warn_if_short_id(self, argv):
574 # short YouTube ID starting with dash?
575 idxs = [
576 i for i, a in enumerate(argv)
577 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
578 if idxs:
579 correct_argv = (
7a5c1cfe 580 ['yt-dlp']
3089bc74
S
581 + [a for i, a in enumerate(argv) if i not in idxs]
582 + ['--'] + [argv[i] for i in idxs]
7d4111ed
PH
583 )
584 self.report_warning(
585 'Long argument string detected. '
586 'Use -- to separate parameters and URLs, like this:\n%s\n' %
587 args_to_str(correct_argv))
588
8222d8de
JMF
589 def add_info_extractor(self, ie):
590 """Add an InfoExtractor object to the end of the list."""
591 self._ies.append(ie)
e52d7f85
JMF
592 if not isinstance(ie, type):
593 self._ies_instances[ie.ie_key()] = ie
594 ie.set_downloader(self)
8222d8de 595
56c73665
JMF
596 def get_info_extractor(self, ie_key):
597 """
598 Get an instance of an IE with name ie_key, it will try to get one from
599 the _ies list, if there's no instance it will create a new one and add
600 it to the extractor list.
601 """
602 ie = self._ies_instances.get(ie_key)
603 if ie is None:
604 ie = get_info_extractor(ie_key)()
605 self.add_info_extractor(ie)
606 return ie
607
023fa8c4
JMF
608 def add_default_info_extractors(self):
609 """
610 Add the InfoExtractors returned by gen_extractors to the end of the list
611 """
e52d7f85 612 for ie in gen_extractor_classes():
023fa8c4
JMF
613 self.add_info_extractor(ie)
614
56d868db 615 def add_post_processor(self, pp, when='post_process'):
8222d8de 616 """Add a PostProcessor object to the end of the chain."""
5bfa4862 617 self._pps[when].append(pp)
8222d8de
JMF
618 pp.set_downloader(self)
619
ab8e5e51
AM
620 def add_post_hook(self, ph):
621 """Add the post hook"""
622 self._post_hooks.append(ph)
623
933605d7
JMF
624 def add_progress_hook(self, ph):
625 """Add the progress hook (currently only for the file downloader)"""
626 self._progress_hooks.append(ph)
8ab470f1 627
1c088fa8 628 def _bidi_workaround(self, message):
5d681e96 629 if not hasattr(self, '_output_channel'):
1c088fa8
PH
630 return message
631
5d681e96 632 assert hasattr(self, '_output_process')
11b85ce6 633 assert isinstance(message, compat_str)
6febd1c1
PH
634 line_count = message.count('\n') + 1
635 self._output_process.stdin.write((message + '\n').encode('utf-8'))
5d681e96 636 self._output_process.stdin.flush()
6febd1c1 637 res = ''.join(self._output_channel.readline().decode('utf-8')
9e1a5b84 638 for _ in range(line_count))
6febd1c1 639 return res[:-len('\n')]
1c088fa8 640
8222d8de 641 def to_screen(self, message, skip_eol=False):
0783b09b
PH
642 """Print message to stdout if not in quiet mode."""
643 return self.to_stdout(message, skip_eol, check_quiet=True)
644
734f90bb 645 def _write_string(self, s, out=None):
b58ddb32 646 write_string(s, out=out, encoding=self.params.get('encoding'))
734f90bb 647
0783b09b 648 def to_stdout(self, message, skip_eol=False, check_quiet=False):
8222d8de 649 """Print message to stdout if not in quiet mode."""
8bf9319e 650 if self.params.get('logger'):
43afe285 651 self.params['logger'].debug(message)
0783b09b 652 elif not check_quiet or not self.params.get('quiet', False):
1c088fa8 653 message = self._bidi_workaround(message)
6febd1c1 654 terminator = ['\n', ''][skip_eol]
8222d8de 655 output = message + terminator
1c088fa8 656
734f90bb 657 self._write_string(output, self._screen_file)
8222d8de
JMF
658
659 def to_stderr(self, message):
660 """Print message to stderr."""
11b85ce6 661 assert isinstance(message, compat_str)
8bf9319e 662 if self.params.get('logger'):
43afe285
IB
663 self.params['logger'].error(message)
664 else:
1c088fa8 665 message = self._bidi_workaround(message)
6febd1c1 666 output = message + '\n'
734f90bb 667 self._write_string(output, self._err_file)
8222d8de 668
1e5b9a95
PH
669 def to_console_title(self, message):
670 if not self.params.get('consoletitle', False):
671 return
4bede0d8
C
672 if compat_os_name == 'nt':
673 if ctypes.windll.kernel32.GetConsoleWindow():
674 # c_wchar_p() might not be necessary if `message` is
675 # already of type unicode()
676 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
1e5b9a95 677 elif 'TERM' in os.environ:
b46696bd 678 self._write_string('\033]0;%s\007' % message, self._screen_file)
1e5b9a95 679
bdde425c
PH
680 def save_console_title(self):
681 if not self.params.get('consoletitle', False):
682 return
94c3442e
S
683 if self.params.get('simulate', False):
684 return
4bede0d8 685 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 686 # Save the title on stack
734f90bb 687 self._write_string('\033[22;0t', self._screen_file)
bdde425c
PH
688
689 def restore_console_title(self):
690 if not self.params.get('consoletitle', False):
691 return
94c3442e
S
692 if self.params.get('simulate', False):
693 return
4bede0d8 694 if compat_os_name != 'nt' and 'TERM' in os.environ:
efd6c574 695 # Restore the title from stack
734f90bb 696 self._write_string('\033[23;0t', self._screen_file)
bdde425c
PH
697
698 def __enter__(self):
699 self.save_console_title()
700 return self
701
702 def __exit__(self, *args):
703 self.restore_console_title()
f89197d7 704
dca08720 705 if self.params.get('cookiefile') is not None:
1bab3437 706 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
bdde425c 707
8222d8de
JMF
708 def trouble(self, message=None, tb=None):
709 """Determine action to take when a download problem appears.
710
711 Depending on if the downloader has been configured to ignore
712 download errors or not, this method may throw an exception or
713 not when errors are found, after printing the message.
714
715 tb, if given, is additional traceback information.
716 """
717 if message is not None:
718 self.to_stderr(message)
719 if self.params.get('verbose'):
720 if tb is None:
721 if sys.exc_info()[0]: # if .trouble has been called from an except block
6febd1c1 722 tb = ''
8222d8de 723 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
6febd1c1 724 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
c0384f22 725 tb += encode_compat_str(traceback.format_exc())
8222d8de
JMF
726 else:
727 tb_data = traceback.format_list(traceback.extract_stack())
6febd1c1 728 tb = ''.join(tb_data)
8222d8de
JMF
729 self.to_stderr(tb)
730 if not self.params.get('ignoreerrors', False):
731 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
732 exc_info = sys.exc_info()[1].exc_info
733 else:
734 exc_info = sys.exc_info()
735 raise DownloadError(message, exc_info)
736 self._download_retcode = 1
737
738 def report_warning(self, message):
739 '''
740 Print the message to stderr, it will be prefixed with 'WARNING:'
741 If stderr is a tty file the 'WARNING:' will be colored
742 '''
6d07ce01
JMF
743 if self.params.get('logger') is not None:
744 self.params['logger'].warning(message)
8222d8de 745 else:
ad8915b7
PH
746 if self.params.get('no_warnings'):
747 return
e9c0cdd3 748 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6d07ce01
JMF
749 _msg_header = '\033[0;33mWARNING:\033[0m'
750 else:
751 _msg_header = 'WARNING:'
752 warning_message = '%s %s' % (_msg_header, message)
753 self.to_stderr(warning_message)
8222d8de
JMF
754
755 def report_error(self, message, tb=None):
756 '''
757 Do the same as trouble, but prefixes the message with 'ERROR:', colored
758 in red if stderr is a tty file.
759 '''
e9c0cdd3 760 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
6febd1c1 761 _msg_header = '\033[0;31mERROR:\033[0m'
8222d8de 762 else:
6febd1c1
PH
763 _msg_header = 'ERROR:'
764 error_message = '%s %s' % (_msg_header, message)
8222d8de
JMF
765 self.trouble(error_message, tb)
766
8222d8de
JMF
767 def report_file_already_downloaded(self, file_name):
768 """Report file has already been fully downloaded."""
769 try:
6febd1c1 770 self.to_screen('[download] %s has already been downloaded' % file_name)
ce02ed60 771 except UnicodeEncodeError:
6febd1c1 772 self.to_screen('[download] The file has already been downloaded')
8222d8de 773
0c3d0f51 774 def report_file_delete(self, file_name):
775 """Report that existing file will be deleted."""
776 try:
c25228e5 777 self.to_screen('Deleting existing file %s' % file_name)
0c3d0f51 778 except UnicodeEncodeError:
c25228e5 779 self.to_screen('Deleting existing file')
0c3d0f51 780
de6000d9 781 def parse_outtmpl(self):
782 outtmpl_dict = self.params.get('outtmpl', {})
783 if not isinstance(outtmpl_dict, dict):
784 outtmpl_dict = {'default': outtmpl_dict}
785 outtmpl_dict.update({
786 k: v for k, v in DEFAULT_OUTTMPL.items()
787 if not outtmpl_dict.get(k)})
788 for key, val in outtmpl_dict.items():
789 if isinstance(val, bytes):
790 self.report_warning(
791 'Parameter outtmpl is bytes, but should be a unicode string. '
792 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
793 return outtmpl_dict
794
143db31d 795 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
796 """ Make the template and info_dict suitable for substitution (outtmpl % info_dict)"""
797 template_dict = dict(info_dict)
a439a3a4 798 na = self.params.get('outtmpl_na_placeholder', 'NA')
143db31d 799
800 # duration_string
801 template_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
802 formatSeconds(info_dict['duration'], '-')
803 if info_dict.get('duration', None) is not None
804 else None)
805
806 # epoch
807 template_dict['epoch'] = int(time.time())
808
809 # autonumber
810 autonumber_size = self.params.get('autonumber_size')
811 if autonumber_size is None:
812 autonumber_size = 5
813 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
814
815 # resolution if not defined
816 if template_dict.get('resolution') is None:
817 if template_dict.get('width') and template_dict.get('height'):
818 template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
819 elif template_dict.get('height'):
820 template_dict['resolution'] = '%sp' % template_dict['height']
821 elif template_dict.get('width'):
822 template_dict['resolution'] = '%dx?' % template_dict['width']
823
143db31d 824 # For fields playlist_index and autonumber convert all occurrences
825 # of %(field)s to %(field)0Nd for backward compatibility
826 field_size_compat_map = {
a439a3a4 827 'playlist_index': len(str(template_dict.get('n_entries', na))),
143db31d 828 'autonumber': autonumber_size,
829 }
830 FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
831 mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
832 if mobj:
833 outtmpl = re.sub(
834 FIELD_SIZE_COMPAT_RE,
835 r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
836 outtmpl)
837
838 numeric_fields = list(self._NUMERIC_FIELDS)
a439a3a4 839 if sanitize is None:
840 sanitize = lambda k, v: v
143db31d 841
a439a3a4 842 # Internal Formatting = name.key1.key2+number>strf
843 INTERNAL_FORMAT_RE = FORMAT_RE.format(
844 r'''(?P<final_key>
845 (?P<fields>\w+(?:\.[-\w]+)*)
846 (?:\+(?P<add>-?\d+(?:\.\d+)?))?
847 (?:>(?P<strf_format>.+?))?
848 )''')
849 for mobj in re.finditer(INTERNAL_FORMAT_RE, outtmpl):
850 mobj = mobj.groupdict()
851 # Object traversal
852 fields = mobj['fields'].split('.')
853 final_key = mobj['final_key']
854 value = traverse_dict(template_dict, fields)
855 # Offset the value
856 if mobj['add']:
857 value = float_or_none(value)
858 if value is not None:
859 value = value + float(mobj['add'])
860 # Datetime formatting
861 if mobj['strf_format']:
862 value = strftime_or_none(value, mobj['strf_format'])
863 if mobj['type'] in 'crs' and value is not None: # string
864 value = sanitize('%{}'.format(mobj['type']) % fields[-1], value)
865 else: # numeric
866 numeric_fields.append(final_key)
867 value = float_or_none(value)
143db31d 868 if value is not None:
a439a3a4 869 template_dict[final_key] = value
143db31d 870
871 # Missing numeric fields used together with integer presentation types
872 # in format specification will break the argument substitution since
873 # string NA placeholder is returned for missing fields. We will patch
874 # output template for missing fields to meet string presentation type.
875 for numeric_field in numeric_fields:
a439a3a4 876 if template_dict.get(numeric_field) is None:
143db31d 877 outtmpl = re.sub(
878 FORMAT_RE.format(re.escape(numeric_field)),
879 r'%({0})s'.format(numeric_field), outtmpl)
880
a439a3a4 881 template_dict = collections.defaultdict(lambda: na, (
882 (k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
883 for k, v in template_dict.items() if v is not None))
143db31d 884 return outtmpl, template_dict
885
de6000d9 886 def _prepare_filename(self, info_dict, tmpl_type='default'):
8222d8de 887 try:
586a91b6 888 sanitize = lambda k, v: sanitize_filename(
45598aab 889 compat_str(v),
1bb5c511 890 restricted=self.params.get('restrictfilenames'),
40df485f 891 is_id=(k == 'id' or k.endswith('_id')))
de6000d9 892 outtmpl = self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default'])
143db31d 893 outtmpl, template_dict = self.prepare_outtmpl(outtmpl, info_dict, sanitize)
d0d9ade4 894
15da37c7
S
895 # expand_path translates '%%' into '%' and '$$' into '$'
896 # correspondingly that is not what we want since we need to keep
897 # '%%' intact for template dict substitution step. Working around
898 # with boundary-alike separator hack.
961ea474 899 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
15da37c7
S
900 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
901
902 # outtmpl should be expand_path'ed before template dict substitution
903 # because meta fields may contain env variables we don't want to
904 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
905 # title "Hello $PATH", we don't want `$PATH` to be expanded.
906 filename = expand_path(outtmpl).replace(sep, '') % template_dict
907
143db31d 908 force_ext = OUTTMPL_TYPES.get(tmpl_type)
de6000d9 909 if force_ext is not None:
910 filename = replace_extension(filename, force_ext, template_dict.get('ext'))
911
bdc3fd2f
U
912 # https://github.com/blackjack4494/youtube-dlc/issues/85
913 trim_file_name = self.params.get('trim_file_name', False)
914 if trim_file_name:
915 fn_groups = filename.rsplit('.')
916 ext = fn_groups[-1]
917 sub_ext = ''
918 if len(fn_groups) > 2:
919 sub_ext = fn_groups[-2]
920 filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
921
0202b52a 922 return filename
8222d8de 923 except ValueError as err:
6febd1c1 924 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
8222d8de
JMF
925 return None
926
de6000d9 927 def prepare_filename(self, info_dict, dir_type='', warn=False):
928 """Generate the output filename."""
0202b52a 929 paths = self.params.get('paths', {})
930 assert isinstance(paths, dict)
de6000d9 931 filename = self._prepare_filename(info_dict, dir_type or 'default')
932
933 if warn and not self.__prepare_filename_warned:
934 if not paths:
935 pass
936 elif filename == '-':
937 self.report_warning('--paths is ignored when an outputting to stdout')
938 elif os.path.isabs(filename):
939 self.report_warning('--paths is ignored since an absolute path is given in output template')
940 self.__prepare_filename_warned = True
941 if filename == '-' or not filename:
942 return filename
943
0202b52a 944 homepath = expand_path(paths.get('home', '').strip())
945 assert isinstance(homepath, compat_str)
946 subdir = expand_path(paths.get(dir_type, '').strip()) if dir_type else ''
947 assert isinstance(subdir, compat_str)
c2934512 948 path = os.path.join(homepath, subdir, filename)
949
950 # Temporary fix for #4787
951 # 'Treat' all problem characters by passing filename through preferredencoding
952 # to workaround encoding issues with subprocess on python2 @ Windows
953 if sys.version_info < (3, 0) and sys.platform == 'win32':
954 path = encodeFilename(path, True).decode(preferredencoding())
955 return sanitize_path(path, force=self.params.get('windowsfilenames'))
0202b52a 956
442c37b7 957 def _match_entry(self, info_dict, incomplete):
ecdec191 958 """ Returns None if the file should be downloaded """
8222d8de 959
8b0d7497 960 def check_filter():
961 video_title = info_dict.get('title', info_dict.get('id', 'video'))
962 if 'title' in info_dict:
963 # This can happen when we're just evaluating the playlist
964 title = info_dict['title']
965 matchtitle = self.params.get('matchtitle', False)
966 if matchtitle:
967 if not re.search(matchtitle, title, re.IGNORECASE):
968 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
969 rejecttitle = self.params.get('rejecttitle', False)
970 if rejecttitle:
971 if re.search(rejecttitle, title, re.IGNORECASE):
972 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
973 date = info_dict.get('upload_date')
974 if date is not None:
975 dateRange = self.params.get('daterange', DateRange())
976 if date not in dateRange:
977 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
978 view_count = info_dict.get('view_count')
979 if view_count is not None:
980 min_views = self.params.get('min_views')
981 if min_views is not None and view_count < min_views:
982 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
983 max_views = self.params.get('max_views')
984 if max_views is not None and view_count > max_views:
985 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
986 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
987 return 'Skipping "%s" because it is age restricted' % video_title
988 if self.in_download_archive(info_dict):
989 return '%s has already been recorded in archive' % video_title
990
991 if not incomplete:
992 match_filter = self.params.get('match_filter')
993 if match_filter is not None:
994 ret = match_filter(info_dict)
995 if ret is not None:
996 return ret
997 return None
998
999 reason = check_filter()
1000 if reason is not None:
1001 self.to_screen('[download] ' + reason)
d83cb531 1002 if reason.endswith('has already been recorded in the archive') and self.params.get('break_on_existing', False):
8b0d7497 1003 raise ExistingVideoReached()
d83cb531 1004 elif self.params.get('break_on_reject', False):
8b0d7497 1005 raise RejectedVideoReached()
1006 return reason
fe7e0c98 1007
b6c45014
JMF
1008 @staticmethod
1009 def add_extra_info(info_dict, extra_info):
1010 '''Set the keys from extra_info in info dict if they are missing'''
1011 for key, value in extra_info.items():
1012 info_dict.setdefault(key, value)
1013
58f197b7 1014 def extract_info(self, url, download=True, ie_key=None, extra_info={},
61aa5ba3 1015 process=True, force_generic_extractor=False):
8222d8de
JMF
1016 '''
1017 Returns a list with a dictionary for each video we find.
1018 If 'download', also downloads the videos.
1019 extra_info is a dict containing the extra values to add to each result
613b2d9d 1020 '''
fe7e0c98 1021
61aa5ba3 1022 if not ie_key and force_generic_extractor:
d22dec74
S
1023 ie_key = 'Generic'
1024
8222d8de 1025 if ie_key:
56c73665 1026 ies = [self.get_info_extractor(ie_key)]
8222d8de
JMF
1027 else:
1028 ies = self._ies
1029
1030 for ie in ies:
1031 if not ie.suitable(url):
1032 continue
1033
9a68de12 1034 ie_key = ie.ie_key()
1035 ie = self.get_info_extractor(ie_key)
8222d8de 1036 if not ie.working():
6febd1c1
PH
1037 self.report_warning('The program functionality for this site has been marked as broken, '
1038 'and will probably not work.')
8222d8de
JMF
1039
1040 try:
d0757229 1041 temp_id = str_or_none(
63be1aab 1042 ie.extract_id(url) if callable(getattr(ie, 'extract_id', None))
1043 else ie._match_id(url))
a0566bbf 1044 except (AssertionError, IndexError, AttributeError):
1045 temp_id = None
1046 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1047 self.to_screen("[%s] %s: has already been recorded in archive" % (
1048 ie_key, temp_id))
1049 break
58f197b7 1050 return self.__extract_info(url, ie, download, extra_info, process)
a0566bbf 1051 else:
1052 self.report_error('no suitable InfoExtractor for URL %s' % url)
1053
1054 def __handle_extraction_exceptions(func):
1055 def wrapper(self, *args, **kwargs):
1056 try:
1057 return func(self, *args, **kwargs)
773f291d
S
1058 except GeoRestrictedError as e:
1059 msg = e.msg
1060 if e.countries:
1061 msg += '\nThis video is available in %s.' % ', '.join(
1062 map(ISO3166Utils.short2full, e.countries))
1063 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1064 self.report_error(msg)
fb043a6e 1065 except ExtractorError as e: # An error we somewhat expected
2c74e6fa 1066 self.report_error(compat_str(e), e.format_traceback())
8b0d7497 1067 except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
d3e5bbf4 1068 raise
8222d8de
JMF
1069 except Exception as e:
1070 if self.params.get('ignoreerrors', False):
9b9c5355 1071 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
8222d8de
JMF
1072 else:
1073 raise
a0566bbf 1074 return wrapper
1075
1076 @__handle_extraction_exceptions
58f197b7 1077 def __extract_info(self, url, ie, download, extra_info, process):
a0566bbf 1078 ie_result = ie.extract(url)
1079 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1080 return
1081 if isinstance(ie_result, list):
1082 # Backwards compatibility: old IE result format
1083 ie_result = {
1084 '_type': 'compat_list',
1085 'entries': ie_result,
1086 }
a0566bbf 1087 self.add_default_extra_info(ie_result, ie, url)
1088 if process:
1089 return self.process_ie_result(ie_result, download, extra_info)
8222d8de 1090 else:
a0566bbf 1091 return ie_result
fe7e0c98 1092
ea38e55f
PH
1093 def add_default_extra_info(self, ie_result, ie, url):
1094 self.add_extra_info(ie_result, {
1095 'extractor': ie.IE_NAME,
1096 'webpage_url': url,
1097 'webpage_url_basename': url_basename(url),
1098 'extractor_key': ie.ie_key(),
1099 })
1100
8222d8de
JMF
1101 def process_ie_result(self, ie_result, download=True, extra_info={}):
1102 """
1103 Take the result of the ie(may be modified) and resolve all unresolved
1104 references (URLs, playlist items).
1105
1106 It will also download the videos if 'download'.
1107 Returns the resolved ie_result.
1108 """
e8ee972c
PH
1109 result_type = ie_result.get('_type', 'video')
1110
057a5206 1111 if result_type in ('url', 'url_transparent'):
134c6ea8 1112 ie_result['url'] = sanitize_url(ie_result['url'])
057a5206 1113 extract_flat = self.params.get('extract_flat', False)
3089bc74
S
1114 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1115 or extract_flat is True):
de6000d9 1116 self.__forced_printings(ie_result, self.prepare_filename(ie_result), incomplete=True)
e8ee972c
PH
1117 return ie_result
1118
8222d8de 1119 if result_type == 'video':
b6c45014 1120 self.add_extra_info(ie_result, extra_info)
feee2ecf 1121 return self.process_video_result(ie_result, download=download)
8222d8de
JMF
1122 elif result_type == 'url':
1123 # We have to add extra_info to the results because it may be
1124 # contained in a playlist
1125 return self.extract_info(ie_result['url'],
58f197b7 1126 download,
8222d8de
JMF
1127 ie_key=ie_result.get('ie_key'),
1128 extra_info=extra_info)
7fc3fa05
PH
1129 elif result_type == 'url_transparent':
1130 # Use the information from the embedding page
1131 info = self.extract_info(
1132 ie_result['url'], ie_key=ie_result.get('ie_key'),
1133 extra_info=extra_info, download=False, process=False)
1134
1640eb09
S
1135 # extract_info may return None when ignoreerrors is enabled and
1136 # extraction failed with an error, don't crash and return early
1137 # in this case
1138 if not info:
1139 return info
1140
412c617d
PH
1141 force_properties = dict(
1142 (k, v) for k, v in ie_result.items() if v is not None)
0396806f 1143 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
412c617d
PH
1144 if f in force_properties:
1145 del force_properties[f]
1146 new_result = info.copy()
1147 new_result.update(force_properties)
7fc3fa05 1148
0563f7ac
S
1149 # Extracted info may not be a video result (i.e.
1150 # info.get('_type', 'video') != video) but rather an url or
1151 # url_transparent. In such cases outer metadata (from ie_result)
1152 # should be propagated to inner one (info). For this to happen
1153 # _type of info should be overridden with url_transparent. This
067aa17e 1154 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
0563f7ac
S
1155 if new_result.get('_type') == 'url':
1156 new_result['_type'] = 'url_transparent'
7fc3fa05
PH
1157
1158 return self.process_ie_result(
1159 new_result, download=download, extra_info=extra_info)
40fcba5e 1160 elif result_type in ('playlist', 'multi_video'):
30a074c2 1161 # Protect from infinite recursion due to recursively nested playlists
1162 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1163 webpage_url = ie_result['webpage_url']
1164 if webpage_url in self._playlist_urls:
7e85e872 1165 self.to_screen(
30a074c2 1166 '[download] Skipping already downloaded playlist: %s'
1167 % ie_result.get('title') or ie_result.get('id'))
1168 return
7e85e872 1169
30a074c2 1170 self._playlist_level += 1
1171 self._playlist_urls.add(webpage_url)
1172 try:
1173 return self.__process_playlist(ie_result, download)
1174 finally:
1175 self._playlist_level -= 1
1176 if not self._playlist_level:
1177 self._playlist_urls.clear()
8222d8de 1178 elif result_type == 'compat_list':
c9bf4114
PH
1179 self.report_warning(
1180 'Extractor %s returned a compat_list result. '
1181 'It needs to be updated.' % ie_result.get('extractor'))
5f6a1245 1182
8222d8de 1183 def _fixup(r):
9e1a5b84
JW
1184 self.add_extra_info(
1185 r,
9103bbc5
JMF
1186 {
1187 'extractor': ie_result['extractor'],
1188 'webpage_url': ie_result['webpage_url'],
29eb5174 1189 'webpage_url_basename': url_basename(ie_result['webpage_url']),
be97abc2 1190 'extractor_key': ie_result['extractor_key'],
9e1a5b84
JW
1191 }
1192 )
8222d8de
JMF
1193 return r
1194 ie_result['entries'] = [
b6c45014 1195 self.process_ie_result(_fixup(r), download, extra_info)
8222d8de
JMF
1196 for r in ie_result['entries']
1197 ]
1198 return ie_result
1199 else:
1200 raise Exception('Invalid result type: %s' % result_type)
1201
e92caff5 1202 def _ensure_dir_exists(self, path):
1203 return make_dir(path, self.report_error)
1204
30a074c2 1205 def __process_playlist(self, ie_result, download):
1206 # We process each entry in the playlist
1207 playlist = ie_result.get('title') or ie_result.get('id')
1208 self.to_screen('[download] Downloading playlist: %s' % playlist)
1209
498f5606 1210 if 'entries' not in ie_result:
1211 raise EntryNotInPlaylist()
1212 incomplete_entries = bool(ie_result.get('requested_entries'))
1213 if incomplete_entries:
1214 def fill_missing_entries(entries, indexes):
1215 ret = [None] * max(*indexes)
1216 for i, entry in zip(indexes, entries):
1217 ret[i - 1] = entry
1218 return ret
1219 ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
02fd60d3 1220
30a074c2 1221 playlist_results = []
1222
1223 playliststart = self.params.get('playliststart', 1) - 1
1224 playlistend = self.params.get('playlistend')
1225 # For backwards compatibility, interpret -1 as whole list
1226 if playlistend == -1:
1227 playlistend = None
1228
1229 playlistitems_str = self.params.get('playlist_items')
1230 playlistitems = None
1231 if playlistitems_str is not None:
1232 def iter_playlistitems(format):
1233 for string_segment in format.split(','):
1234 if '-' in string_segment:
1235 start, end = string_segment.split('-')
1236 for item in range(int(start), int(end) + 1):
1237 yield int(item)
1238 else:
1239 yield int(string_segment)
1240 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
1241
1242 ie_entries = ie_result['entries']
1243
1244 def make_playlistitems_entries(list_ie_entries):
1245 num_entries = len(list_ie_entries)
498f5606 1246 for i in playlistitems:
1247 if -num_entries < i <= num_entries:
1248 yield list_ie_entries[i - 1]
1249 elif incomplete_entries:
1250 raise EntryNotInPlaylist()
30a074c2 1251
1252 if isinstance(ie_entries, list):
1253 n_all_entries = len(ie_entries)
1254 if playlistitems:
498f5606 1255 entries = list(make_playlistitems_entries(ie_entries))
30a074c2 1256 else:
1257 entries = ie_entries[playliststart:playlistend]
1258 n_entries = len(entries)
498f5606 1259 msg = 'Collected %d videos; downloading %d of them' % (n_all_entries, n_entries)
30a074c2 1260 elif isinstance(ie_entries, PagedList):
1261 if playlistitems:
1262 entries = []
1263 for item in playlistitems:
1264 entries.extend(ie_entries.getslice(
1265 item - 1, item
1266 ))
1267 else:
1268 entries = ie_entries.getslice(
1269 playliststart, playlistend)
1270 n_entries = len(entries)
498f5606 1271 msg = 'Downloading %d videos' % n_entries
30a074c2 1272 else: # iterable
1273 if playlistitems:
498f5606 1274 entries = list(make_playlistitems_entries(list(itertools.islice(
1275 ie_entries, 0, max(playlistitems)))))
30a074c2 1276 else:
1277 entries = list(itertools.islice(
1278 ie_entries, playliststart, playlistend))
1279 n_entries = len(entries)
498f5606 1280 msg = 'Downloading %d videos' % n_entries
1281
1282 if any((entry is None for entry in entries)):
1283 raise EntryNotInPlaylist()
1284 if not playlistitems and (playliststart or playlistend):
1285 playlistitems = list(range(1 + playliststart, 1 + playliststart + len(entries)))
1286 ie_result['entries'] = entries
1287 ie_result['requested_entries'] = playlistitems
1288
1289 if self.params.get('allow_playlist_files', True):
1290 ie_copy = {
1291 'playlist': playlist,
1292 'playlist_id': ie_result.get('id'),
1293 'playlist_title': ie_result.get('title'),
1294 'playlist_uploader': ie_result.get('uploader'),
1295 'playlist_uploader_id': ie_result.get('uploader_id'),
1296 'playlist_index': 0
1297 }
1298 ie_copy.update(dict(ie_result))
1299
1300 if self.params.get('writeinfojson', False):
1301 infofn = self.prepare_filename(ie_copy, 'pl_infojson')
1302 if not self._ensure_dir_exists(encodeFilename(infofn)):
1303 return
1304 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
1305 self.to_screen('[info] Playlist metadata is already present')
1306 else:
1307 self.to_screen('[info] Writing playlist metadata as JSON to: ' + infofn)
1308 try:
1309 write_json_file(self.filter_requested_info(ie_result, self.params.get('clean_infojson', True)), infofn)
1310 except (OSError, IOError):
1311 self.report_error('Cannot write playlist metadata to JSON file ' + infofn)
1312
1313 if self.params.get('writedescription', False):
1314 descfn = self.prepare_filename(ie_copy, 'pl_description')
1315 if not self._ensure_dir_exists(encodeFilename(descfn)):
1316 return
1317 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
1318 self.to_screen('[info] Playlist description is already present')
1319 elif ie_result.get('description') is None:
1320 self.report_warning('There\'s no playlist description to write.')
1321 else:
1322 try:
1323 self.to_screen('[info] Writing playlist description to: ' + descfn)
1324 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1325 descfile.write(ie_result['description'])
1326 except (OSError, IOError):
1327 self.report_error('Cannot write playlist description file ' + descfn)
1328 return
30a074c2 1329
1330 if self.params.get('playlistreverse', False):
1331 entries = entries[::-1]
30a074c2 1332 if self.params.get('playlistrandom', False):
1333 random.shuffle(entries)
1334
1335 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
1336
498f5606 1337 self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg))
26e2805c 1338 failures = 0
1339 max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
30a074c2 1340 for i, entry in enumerate(entries, 1):
1341 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
1342 # This __x_forwarded_for_ip thing is a bit ugly but requires
1343 # minimal changes
1344 if x_forwarded_for:
1345 entry['__x_forwarded_for_ip'] = x_forwarded_for
1346 extra = {
1347 'n_entries': n_entries,
1348 'playlist': playlist,
1349 'playlist_id': ie_result.get('id'),
1350 'playlist_title': ie_result.get('title'),
1351 'playlist_uploader': ie_result.get('uploader'),
1352 'playlist_uploader_id': ie_result.get('uploader_id'),
498f5606 1353 'playlist_index': playlistitems[i - 1] if playlistitems else i,
30a074c2 1354 'extractor': ie_result['extractor'],
1355 'webpage_url': ie_result['webpage_url'],
1356 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1357 'extractor_key': ie_result['extractor_key'],
1358 }
1359
1360 if self._match_entry(entry, incomplete=True) is not None:
1361 continue
1362
1363 entry_result = self.__process_iterable_entry(entry, download, extra)
26e2805c 1364 if not entry_result:
1365 failures += 1
1366 if failures >= max_failures:
1367 self.report_error(
1368 'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures))
1369 break
30a074c2 1370 # TODO: skip failed (empty) entries?
1371 playlist_results.append(entry_result)
1372 ie_result['entries'] = playlist_results
1373 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1374 return ie_result
1375
a0566bbf 1376 @__handle_extraction_exceptions
1377 def __process_iterable_entry(self, entry, download, extra_info):
1378 return self.process_ie_result(
1379 entry, download=download, extra_info=extra_info)
1380
67134eab
JMF
1381 def _build_format_filter(self, filter_spec):
1382 " Returns a function to filter the formats according to the filter_spec "
083c9df9
PH
1383
1384 OPERATORS = {
1385 '<': operator.lt,
1386 '<=': operator.le,
1387 '>': operator.gt,
1388 '>=': operator.ge,
1389 '=': operator.eq,
1390 '!=': operator.ne,
1391 }
67134eab 1392 operator_rex = re.compile(r'''(?x)\s*
a03a3c80 1393 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
083c9df9
PH
1394 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1395 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
67134eab 1396 $
083c9df9 1397 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
67134eab 1398 m = operator_rex.search(filter_spec)
9ddb6925
S
1399 if m:
1400 try:
1401 comparison_value = int(m.group('value'))
1402 except ValueError:
1403 comparison_value = parse_filesize(m.group('value'))
1404 if comparison_value is None:
1405 comparison_value = parse_filesize(m.group('value') + 'B')
1406 if comparison_value is None:
1407 raise ValueError(
1408 'Invalid value %r in format specification %r' % (
67134eab 1409 m.group('value'), filter_spec))
9ddb6925
S
1410 op = OPERATORS[m.group('op')]
1411
083c9df9 1412 if not m:
9ddb6925
S
1413 STR_OPERATORS = {
1414 '=': operator.eq,
10d33b34
YCH
1415 '^=': lambda attr, value: attr.startswith(value),
1416 '$=': lambda attr, value: attr.endswith(value),
1417 '*=': lambda attr, value: value in attr,
9ddb6925 1418 }
67134eab 1419 str_operator_rex = re.compile(r'''(?x)
f96bff99 1420 \s*(?P<key>[a-zA-Z0-9._-]+)
2cc779f4 1421 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
b0df5223 1422 \s*(?P<value>[a-zA-Z0-9._-]+)
67134eab 1423 \s*$
9ddb6925 1424 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
67134eab 1425 m = str_operator_rex.search(filter_spec)
9ddb6925
S
1426 if m:
1427 comparison_value = m.group('value')
2cc779f4
S
1428 str_op = STR_OPERATORS[m.group('op')]
1429 if m.group('negation'):
e118a879 1430 op = lambda attr, value: not str_op(attr, value)
2cc779f4
S
1431 else:
1432 op = str_op
083c9df9 1433
9ddb6925 1434 if not m:
67134eab 1435 raise ValueError('Invalid filter specification %r' % filter_spec)
083c9df9
PH
1436
1437 def _filter(f):
1438 actual_value = f.get(m.group('key'))
1439 if actual_value is None:
1440 return m.group('none_inclusive')
1441 return op(actual_value, comparison_value)
67134eab
JMF
1442 return _filter
1443
0017d9ad 1444 def _default_format_spec(self, info_dict, download=True):
0017d9ad 1445
af0f7428
S
1446 def can_merge():
1447 merger = FFmpegMergerPP(self)
1448 return merger.available and merger.can_merge()
1449
91ebc640 1450 prefer_best = (
1451 not self.params.get('simulate', False)
1452 and download
1453 and (
1454 not can_merge()
19807826 1455 or info_dict.get('is_live', False)
de6000d9 1456 or self.outtmpl_dict['default'] == '-'))
91ebc640 1457
1458 return (
1459 'best/bestvideo+bestaudio'
1460 if prefer_best
1461 else 'bestvideo*+bestaudio/best'
19807826 1462 if not self.params.get('allow_multiple_audio_streams', False)
91ebc640 1463 else 'bestvideo+bestaudio/best')
0017d9ad 1464
67134eab
JMF
1465 def build_format_selector(self, format_spec):
1466 def syntax_error(note, start):
1467 message = (
1468 'Invalid format specification: '
1469 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1470 return SyntaxError(message)
1471
1472 PICKFIRST = 'PICKFIRST'
1473 MERGE = 'MERGE'
1474 SINGLE = 'SINGLE'
0130afb7 1475 GROUP = 'GROUP'
67134eab
JMF
1476 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1477
91ebc640 1478 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1479 'video': self.params.get('allow_multiple_video_streams', False)}
909d24dd 1480
67134eab
JMF
1481 def _parse_filter(tokens):
1482 filter_parts = []
1483 for type, string, start, _, _ in tokens:
1484 if type == tokenize.OP and string == ']':
1485 return ''.join(filter_parts)
1486 else:
1487 filter_parts.append(string)
1488
232541df 1489 def _remove_unused_ops(tokens):
17cc1534 1490 # Remove operators that we don't use and join them with the surrounding strings
232541df
JMF
1491 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1492 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1493 last_string, last_start, last_end, last_line = None, None, None, None
1494 for type, string, start, end, line in tokens:
1495 if type == tokenize.OP and string == '[':
1496 if last_string:
1497 yield tokenize.NAME, last_string, last_start, last_end, last_line
1498 last_string = None
1499 yield type, string, start, end, line
1500 # everything inside brackets will be handled by _parse_filter
1501 for type, string, start, end, line in tokens:
1502 yield type, string, start, end, line
1503 if type == tokenize.OP and string == ']':
1504 break
1505 elif type == tokenize.OP and string in ALLOWED_OPS:
1506 if last_string:
1507 yield tokenize.NAME, last_string, last_start, last_end, last_line
1508 last_string = None
1509 yield type, string, start, end, line
1510 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1511 if not last_string:
1512 last_string = string
1513 last_start = start
1514 last_end = end
1515 else:
1516 last_string += string
1517 if last_string:
1518 yield tokenize.NAME, last_string, last_start, last_end, last_line
1519
cf2ac6df 1520 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
67134eab
JMF
1521 selectors = []
1522 current_selector = None
1523 for type, string, start, _, _ in tokens:
1524 # ENCODING is only defined in python 3.x
1525 if type == getattr(tokenize, 'ENCODING', None):
1526 continue
1527 elif type in [tokenize.NAME, tokenize.NUMBER]:
1528 current_selector = FormatSelector(SINGLE, string, [])
1529 elif type == tokenize.OP:
cf2ac6df
JMF
1530 if string == ')':
1531 if not inside_group:
1532 # ')' will be handled by the parentheses group
1533 tokens.restore_last_token()
67134eab 1534 break
cf2ac6df 1535 elif inside_merge and string in ['/', ',']:
0130afb7
JMF
1536 tokens.restore_last_token()
1537 break
cf2ac6df
JMF
1538 elif inside_choice and string == ',':
1539 tokens.restore_last_token()
1540 break
1541 elif string == ',':
0a31a350
JMF
1542 if not current_selector:
1543 raise syntax_error('"," must follow a format selector', start)
67134eab
JMF
1544 selectors.append(current_selector)
1545 current_selector = None
1546 elif string == '/':
d96d604e
JMF
1547 if not current_selector:
1548 raise syntax_error('"/" must follow a format selector', start)
67134eab 1549 first_choice = current_selector
cf2ac6df 1550 second_choice = _parse_format_selection(tokens, inside_choice=True)
f5f4a27a 1551 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
67134eab
JMF
1552 elif string == '[':
1553 if not current_selector:
1554 current_selector = FormatSelector(SINGLE, 'best', [])
1555 format_filter = _parse_filter(tokens)
1556 current_selector.filters.append(format_filter)
0130afb7
JMF
1557 elif string == '(':
1558 if current_selector:
1559 raise syntax_error('Unexpected "("', start)
cf2ac6df
JMF
1560 group = _parse_format_selection(tokens, inside_group=True)
1561 current_selector = FormatSelector(GROUP, group, [])
67134eab 1562 elif string == '+':
d03cfdce 1563 if not current_selector:
1564 raise syntax_error('Unexpected "+"', start)
1565 selector_1 = current_selector
1566 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1567 if not selector_2:
1568 raise syntax_error('Expected a selector', start)
1569 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
67134eab
JMF
1570 else:
1571 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1572 elif type == tokenize.ENDMARKER:
1573 break
1574 if current_selector:
1575 selectors.append(current_selector)
1576 return selectors
1577
f8d4ad9a 1578 def _merge(formats_pair):
1579 format_1, format_2 = formats_pair
1580
1581 formats_info = []
1582 formats_info.extend(format_1.get('requested_formats', (format_1,)))
1583 formats_info.extend(format_2.get('requested_formats', (format_2,)))
1584
1585 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
1586 get_no_more = {"video": False, "audio": False}
1587 for (i, fmt_info) in enumerate(formats_info):
1588 for aud_vid in ["audio", "video"]:
1589 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
1590 if get_no_more[aud_vid]:
1591 formats_info.pop(i)
1592 get_no_more[aud_vid] = True
1593
1594 if len(formats_info) == 1:
1595 return formats_info[0]
1596
1597 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
1598 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
1599
1600 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
1601 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
1602
1603 output_ext = self.params.get('merge_output_format')
1604 if not output_ext:
1605 if the_only_video:
1606 output_ext = the_only_video['ext']
1607 elif the_only_audio and not video_fmts:
1608 output_ext = the_only_audio['ext']
1609 else:
1610 output_ext = 'mkv'
1611
1612 new_dict = {
1613 'requested_formats': formats_info,
1614 'format': '+'.join(fmt_info.get('format') for fmt_info in formats_info),
1615 'format_id': '+'.join(fmt_info.get('format_id') for fmt_info in formats_info),
1616 'ext': output_ext,
1617 }
1618
1619 if the_only_video:
1620 new_dict.update({
1621 'width': the_only_video.get('width'),
1622 'height': the_only_video.get('height'),
1623 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
1624 'fps': the_only_video.get('fps'),
1625 'vcodec': the_only_video.get('vcodec'),
1626 'vbr': the_only_video.get('vbr'),
1627 'stretched_ratio': the_only_video.get('stretched_ratio'),
1628 })
1629
1630 if the_only_audio:
1631 new_dict.update({
1632 'acodec': the_only_audio.get('acodec'),
1633 'abr': the_only_audio.get('abr'),
1634 })
1635
1636 return new_dict
1637
67134eab 1638 def _build_selector_function(selector):
909d24dd 1639 if isinstance(selector, list): # ,
67134eab
JMF
1640 fs = [_build_selector_function(s) for s in selector]
1641
317f7ab6 1642 def selector_function(ctx):
67134eab 1643 for f in fs:
317f7ab6 1644 for format in f(ctx):
67134eab
JMF
1645 yield format
1646 return selector_function
909d24dd 1647
1648 elif selector.type == GROUP: # ()
0130afb7 1649 selector_function = _build_selector_function(selector.selector)
909d24dd 1650
1651 elif selector.type == PICKFIRST: # /
67134eab
JMF
1652 fs = [_build_selector_function(s) for s in selector.selector]
1653
317f7ab6 1654 def selector_function(ctx):
67134eab 1655 for f in fs:
317f7ab6 1656 picked_formats = list(f(ctx))
67134eab
JMF
1657 if picked_formats:
1658 return picked_formats
1659 return []
67134eab 1660
909d24dd 1661 elif selector.type == SINGLE: # atom
598d185d 1662 format_spec = selector.selector or 'best'
909d24dd 1663
f8d4ad9a 1664 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
909d24dd 1665 if format_spec == 'all':
1666 def selector_function(ctx):
1667 formats = list(ctx['formats'])
1668 if formats:
1669 for f in formats:
1670 yield f
f8d4ad9a 1671 elif format_spec == 'mergeall':
1672 def selector_function(ctx):
1673 formats = list(ctx['formats'])
e01d6aa4 1674 if not formats:
1675 return
921b76ca 1676 merged_format = formats[-1]
1677 for f in formats[-2::-1]:
f8d4ad9a 1678 merged_format = _merge((merged_format, f))
1679 yield merged_format
909d24dd 1680
1681 else:
1682 format_fallback = False
eff63539 1683 mobj = re.match(
1684 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
1685 format_spec)
1686 if mobj is not None:
1687 format_idx = int_or_none(mobj.group('n'), default=1)
1688 format_idx = format_idx - 1 if mobj.group('bw')[0] == 'w' else -format_idx
1689 format_type = (mobj.group('type') or [None])[0]
1690 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
1691 format_modified = mobj.group('mod') is not None
909d24dd 1692
1693 format_fallback = not format_type and not format_modified # for b, w
eff63539 1694 filter_f = (
1695 (lambda f: f.get('%scodec' % format_type) != 'none')
1696 if format_type and format_modified # bv*, ba*, wv*, wa*
1697 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
1698 if format_type # bv, ba, wv, wa
1699 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
1700 if not format_modified # b, w
1701 else None) # b*, w*
67134eab 1702 else:
909d24dd 1703 format_idx = -1
1704 filter_f = ((lambda f: f.get('ext') == format_spec)
1705 if format_spec in ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] # extension
1706 else (lambda f: f.get('format_id') == format_spec)) # id
1707
1708 def selector_function(ctx):
1709 formats = list(ctx['formats'])
1710 if not formats:
1711 return
1712 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
eff63539 1713 n = len(matches)
1714 if -n <= format_idx < n:
909d24dd 1715 yield matches[format_idx]
eff63539 1716 elif format_fallback and ctx['incomplete_formats']:
909d24dd 1717 # for extractors with incomplete formats (audio only (soundcloud)
1718 # or video only (imgur)) best/worst will fallback to
1719 # best/worst {video,audio}-only format
eff63539 1720 n = len(formats)
1721 if -n <= format_idx < n:
1722 yield formats[format_idx]
909d24dd 1723
1724 elif selector.type == MERGE: # +
d03cfdce 1725 selector_1, selector_2 = map(_build_selector_function, selector.selector)
083c9df9 1726
317f7ab6
S
1727 def selector_function(ctx):
1728 for pair in itertools.product(
d03cfdce 1729 selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
67134eab 1730 yield _merge(pair)
083c9df9 1731
67134eab 1732 filters = [self._build_format_filter(f) for f in selector.filters]
083c9df9 1733
317f7ab6
S
1734 def final_selector(ctx):
1735 ctx_copy = copy.deepcopy(ctx)
67134eab 1736 for _filter in filters:
317f7ab6
S
1737 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1738 return selector_function(ctx_copy)
67134eab 1739 return final_selector
083c9df9 1740
67134eab 1741 stream = io.BytesIO(format_spec.encode('utf-8'))
0130afb7 1742 try:
232541df 1743 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
0130afb7
JMF
1744 except tokenize.TokenError:
1745 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1746
1747 class TokenIterator(object):
1748 def __init__(self, tokens):
1749 self.tokens = tokens
1750 self.counter = 0
1751
1752 def __iter__(self):
1753 return self
1754
1755 def __next__(self):
1756 if self.counter >= len(self.tokens):
1757 raise StopIteration()
1758 value = self.tokens[self.counter]
1759 self.counter += 1
1760 return value
1761
1762 next = __next__
1763
1764 def restore_last_token(self):
1765 self.counter -= 1
1766
1767 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
67134eab 1768 return _build_selector_function(parsed_selector)
a9c58ad9 1769
e5660ee6
JMF
1770 def _calc_headers(self, info_dict):
1771 res = std_headers.copy()
1772
1773 add_headers = info_dict.get('http_headers')
1774 if add_headers:
1775 res.update(add_headers)
1776
1777 cookies = self._calc_cookies(info_dict)
1778 if cookies:
1779 res['Cookie'] = cookies
1780
0016b84e
S
1781 if 'X-Forwarded-For' not in res:
1782 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
1783 if x_forwarded_for_ip:
1784 res['X-Forwarded-For'] = x_forwarded_for_ip
1785
e5660ee6
JMF
1786 return res
1787
1788 def _calc_cookies(self, info_dict):
5c2266df 1789 pr = sanitized_Request(info_dict['url'])
e5660ee6 1790 self.cookiejar.add_cookie_header(pr)
662435f7 1791 return pr.get_header('Cookie')
e5660ee6 1792
dd82ffea
JMF
1793 def process_video_result(self, info_dict, download=True):
1794 assert info_dict.get('_type', 'video') == 'video'
1795
bec1fad2
PH
1796 if 'id' not in info_dict:
1797 raise ExtractorError('Missing "id" field in extractor result')
1798 if 'title' not in info_dict:
1799 raise ExtractorError('Missing "title" field in extractor result')
1800
c9969434
S
1801 def report_force_conversion(field, field_not, conversion):
1802 self.report_warning(
1803 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
1804 % (field, field_not, conversion))
1805
1806 def sanitize_string_field(info, string_field):
1807 field = info.get(string_field)
1808 if field is None or isinstance(field, compat_str):
1809 return
1810 report_force_conversion(string_field, 'a string', 'string')
1811 info[string_field] = compat_str(field)
1812
1813 def sanitize_numeric_fields(info):
1814 for numeric_field in self._NUMERIC_FIELDS:
1815 field = info.get(numeric_field)
1816 if field is None or isinstance(field, compat_numeric_types):
1817 continue
1818 report_force_conversion(numeric_field, 'numeric', 'int')
1819 info[numeric_field] = int_or_none(field)
1820
1821 sanitize_string_field(info_dict, 'id')
1822 sanitize_numeric_fields(info_dict)
be6217b2 1823
dd82ffea
JMF
1824 if 'playlist' not in info_dict:
1825 # It isn't part of a playlist
1826 info_dict['playlist'] = None
1827 info_dict['playlist_index'] = None
1828
d5519808 1829 thumbnails = info_dict.get('thumbnails')
cfb56d1a
PH
1830 if thumbnails is None:
1831 thumbnail = info_dict.get('thumbnail')
1832 if thumbnail:
a7a14d95 1833 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
d5519808 1834 if thumbnails:
be6d7229 1835 thumbnails.sort(key=lambda t: (
d37708fc
RA
1836 t.get('preference') if t.get('preference') is not None else -1,
1837 t.get('width') if t.get('width') is not None else -1,
1838 t.get('height') if t.get('height') is not None else -1,
1839 t.get('id') if t.get('id') is not None else '', t.get('url')))
f6c24009 1840 for i, t in enumerate(thumbnails):
dcf77cf1 1841 t['url'] = sanitize_url(t['url'])
9603e8a7 1842 if t.get('width') and t.get('height'):
d5519808 1843 t['resolution'] = '%dx%d' % (t['width'], t['height'])
f6c24009
PH
1844 if t.get('id') is None:
1845 t['id'] = '%d' % i
d5519808 1846
b7b72db9 1847 if self.params.get('list_thumbnails'):
1848 self.list_thumbnails(info_dict)
1849 return
1850
536a55da
S
1851 thumbnail = info_dict.get('thumbnail')
1852 if thumbnail:
1853 info_dict['thumbnail'] = sanitize_url(thumbnail)
1854 elif thumbnails:
d5519808
PH
1855 info_dict['thumbnail'] = thumbnails[-1]['url']
1856
c9ae7b95 1857 if 'display_id' not in info_dict and 'id' in info_dict:
0afef30b
PH
1858 info_dict['display_id'] = info_dict['id']
1859
10db0d2f 1860 for ts_key, date_key in (
1861 ('timestamp', 'upload_date'),
1862 ('release_timestamp', 'release_date'),
1863 ):
1864 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
1865 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
1866 # see http://bugs.python.org/issue1646728)
1867 try:
1868 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
1869 info_dict[date_key] = upload_date.strftime('%Y%m%d')
1870 except (ValueError, OverflowError, OSError):
1871 pass
9d2ecdbc 1872
33d2fc2f
S
1873 # Auto generate title fields corresponding to the *_number fields when missing
1874 # in order to always have clean titles. This is very common for TV series.
1875 for field in ('chapter', 'season', 'episode'):
1876 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
1877 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
1878
05108a49
S
1879 for cc_kind in ('subtitles', 'automatic_captions'):
1880 cc = info_dict.get(cc_kind)
1881 if cc:
1882 for _, subtitle in cc.items():
1883 for subtitle_format in subtitle:
1884 if subtitle_format.get('url'):
1885 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
1886 if subtitle_format.get('ext') is None:
1887 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
1888
1889 automatic_captions = info_dict.get('automatic_captions')
4bba3716 1890 subtitles = info_dict.get('subtitles')
4bba3716 1891
a504ced0 1892 if self.params.get('listsubtitles', False):
360e1ca5 1893 if 'automatic_captions' in info_dict:
05108a49
S
1894 self.list_subtitles(
1895 info_dict['id'], automatic_captions, 'automatic captions')
4bba3716 1896 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
a504ced0 1897 return
05108a49 1898
360e1ca5 1899 info_dict['requested_subtitles'] = self.process_subtitles(
05108a49 1900 info_dict['id'], subtitles, automatic_captions)
a504ced0 1901
dd82ffea
JMF
1902 # We now pick which formats have to be downloaded
1903 if info_dict.get('formats') is None:
1904 # There's only one format available
1905 formats = [info_dict]
1906 else:
1907 formats = info_dict['formats']
1908
db95dc13 1909 if not formats:
b7da73eb 1910 if not self.params.get('ignore_no_formats_error'):
1911 raise ExtractorError('No video formats found!')
1912 else:
1913 self.report_warning('No video formats found!')
db95dc13 1914
73af5cc8
S
1915 def is_wellformed(f):
1916 url = f.get('url')
a5ac0c47 1917 if not url:
73af5cc8
S
1918 self.report_warning(
1919 '"url" field is missing or empty - skipping format, '
1920 'there is an error in extractor')
a5ac0c47
S
1921 return False
1922 if isinstance(url, bytes):
1923 sanitize_string_field(f, 'url')
1924 return True
73af5cc8
S
1925
1926 # Filter out malformed formats for better extraction robustness
1927 formats = list(filter(is_wellformed, formats))
1928
181c7053
S
1929 formats_dict = {}
1930
dd82ffea 1931 # We check that all the formats have the format and format_id fields
db95dc13 1932 for i, format in enumerate(formats):
c9969434
S
1933 sanitize_string_field(format, 'format_id')
1934 sanitize_numeric_fields(format)
dcf77cf1 1935 format['url'] = sanitize_url(format['url'])
e74e3b63 1936 if not format.get('format_id'):
8016c922 1937 format['format_id'] = compat_str(i)
e2effb08
S
1938 else:
1939 # Sanitize format_id from characters used in format selector expression
ec85ded8 1940 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
181c7053
S
1941 format_id = format['format_id']
1942 if format_id not in formats_dict:
1943 formats_dict[format_id] = []
1944 formats_dict[format_id].append(format)
1945
1946 # Make sure all formats have unique format_id
1947 for format_id, ambiguous_formats in formats_dict.items():
1948 if len(ambiguous_formats) > 1:
1949 for i, format in enumerate(ambiguous_formats):
1950 format['format_id'] = '%s-%d' % (format_id, i)
1951
1952 for i, format in enumerate(formats):
8c51aa65 1953 if format.get('format') is None:
6febd1c1 1954 format['format'] = '{id} - {res}{note}'.format(
8c51aa65
JMF
1955 id=format['format_id'],
1956 res=self.format_resolution(format),
6febd1c1 1957 note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
8c51aa65 1958 )
c1002e96 1959 # Automatically determine file extension if missing
5b1d8575 1960 if format.get('ext') is None:
cce929ea 1961 format['ext'] = determine_ext(format['url']).lower()
b5559424
S
1962 # Automatically determine protocol if missing (useful for format
1963 # selection purposes)
6f0be937 1964 if format.get('protocol') is None:
b5559424 1965 format['protocol'] = determine_protocol(format)
e5660ee6
JMF
1966 # Add HTTP headers, so that external programs can use them from the
1967 # json output
1968 full_format_info = info_dict.copy()
1969 full_format_info.update(format)
1970 format['http_headers'] = self._calc_headers(full_format_info)
0016b84e
S
1971 # Remove private housekeeping stuff
1972 if '__x_forwarded_for_ip' in info_dict:
1973 del info_dict['__x_forwarded_for_ip']
dd82ffea 1974
4bcc7bd1 1975 # TODO Central sorting goes here
99e206d5 1976
b7da73eb 1977 if formats and formats[0] is not info_dict:
b3d9ef88
JMF
1978 # only set the 'formats' fields if the original info_dict list them
1979 # otherwise we end up with a circular reference, the first (and unique)
f89197d7 1980 # element in the 'formats' field in info_dict is info_dict itself,
dfb1b146 1981 # which can't be exported to json
b3d9ef88 1982 info_dict['formats'] = formats
cfb56d1a 1983 if self.params.get('listformats'):
b7da73eb 1984 if not info_dict.get('formats'):
1985 raise ExtractorError('No video formats found', expected=True)
bfaae0a7 1986 self.list_formats(info_dict)
1987 return
1988
de3ef3ed 1989 req_format = self.params.get('format')
a9c58ad9 1990 if req_format is None:
0017d9ad
S
1991 req_format = self._default_format_spec(info_dict, download=download)
1992 if self.params.get('verbose'):
e8be92f9 1993 self.to_screen('[debug] Default format spec: %s' % req_format)
0017d9ad 1994
5acfa126 1995 format_selector = self.build_format_selector(req_format)
317f7ab6
S
1996
1997 # While in format selection we may need to have an access to the original
1998 # format set in order to calculate some metrics or do some processing.
1999 # For now we need to be able to guess whether original formats provided
2000 # by extractor are incomplete or not (i.e. whether extractor provides only
2001 # video-only or audio-only formats) for proper formats selection for
2002 # extractors with such incomplete formats (see
067aa17e 2003 # https://github.com/ytdl-org/youtube-dl/pull/5556).
317f7ab6
S
2004 # Since formats may be filtered during format selection and may not match
2005 # the original formats the results may be incorrect. Thus original formats
2006 # or pre-calculated metrics should be passed to format selection routines
2007 # as well.
2008 # We will pass a context object containing all necessary additional data
2009 # instead of just formats.
2010 # This fixes incorrect format selection issue (see
067aa17e 2011 # https://github.com/ytdl-org/youtube-dl/issues/10083).
2e221ca3 2012 incomplete_formats = (
317f7ab6 2013 # All formats are video-only or
3089bc74 2014 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
317f7ab6 2015 # all formats are audio-only
3089bc74 2016 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
317f7ab6
S
2017
2018 ctx = {
2019 'formats': formats,
2020 'incomplete_formats': incomplete_formats,
2021 }
2022
2023 formats_to_download = list(format_selector(ctx))
dd82ffea 2024 if not formats_to_download:
b7da73eb 2025 if not self.params.get('ignore_no_formats_error'):
2026 raise ExtractorError('Requested format is not available', expected=True)
2027 else:
2028 self.report_warning('Requested format is not available')
2029 elif download:
2030 self.to_screen(
2031 '[info] %s: Downloading format(s) %s'
2032 % (info_dict['id'], ", ".join([f['format_id'] for f in formats_to_download])))
dd82ffea 2033 if len(formats_to_download) > 1:
b7da73eb 2034 self.to_screen(
2035 '[info] %s: Downloading video in %s formats'
2036 % (info_dict['id'], len(formats_to_download)))
2037 for fmt in formats_to_download:
dd82ffea 2038 new_info = dict(info_dict)
b7da73eb 2039 new_info.update(fmt)
dd82ffea
JMF
2040 self.process_info(new_info)
2041 # We update the info dict with the best quality format (backwards compatibility)
b7da73eb 2042 if formats_to_download:
2043 info_dict.update(formats_to_download[-1])
dd82ffea
JMF
2044 return info_dict
2045
98c70d6f 2046 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
a504ced0 2047 """Select the requested subtitles and their format"""
98c70d6f
JMF
2048 available_subs = {}
2049 if normal_subtitles and self.params.get('writesubtitles'):
2050 available_subs.update(normal_subtitles)
2051 if automatic_captions and self.params.get('writeautomaticsub'):
2052 for lang, cap_info in automatic_captions.items():
360e1ca5
JMF
2053 if lang not in available_subs:
2054 available_subs[lang] = cap_info
2055
4d171848
JMF
2056 if (not self.params.get('writesubtitles') and not
2057 self.params.get('writeautomaticsub') or not
2058 available_subs):
2059 return None
a504ced0 2060
c32b0aab 2061 all_sub_langs = available_subs.keys()
a504ced0 2062 if self.params.get('allsubtitles', False):
c32b0aab 2063 requested_langs = all_sub_langs
2064 elif self.params.get('subtitleslangs', False):
2065 requested_langs = set()
2066 for lang in self.params.get('subtitleslangs'):
2067 if lang == 'all':
2068 requested_langs.update(all_sub_langs)
2069 continue
2070 discard = lang[0] == '-'
2071 if discard:
2072 lang = lang[1:]
2073 current_langs = filter(re.compile(lang + '$').match, all_sub_langs)
2074 if discard:
2075 for lang in current_langs:
2076 requested_langs.discard(lang)
2077 else:
2078 requested_langs.update(current_langs)
2079 elif 'en' in available_subs:
2080 requested_langs = ['en']
a504ced0 2081 else:
c32b0aab 2082 requested_langs = [list(all_sub_langs)[0]]
a504ced0
JMF
2083
2084 formats_query = self.params.get('subtitlesformat', 'best')
2085 formats_preference = formats_query.split('/') if formats_query else []
2086 subs = {}
2087 for lang in requested_langs:
2088 formats = available_subs.get(lang)
2089 if formats is None:
2090 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
2091 continue
a504ced0
JMF
2092 for ext in formats_preference:
2093 if ext == 'best':
2094 f = formats[-1]
2095 break
2096 matches = list(filter(lambda f: f['ext'] == ext, formats))
2097 if matches:
2098 f = matches[-1]
2099 break
2100 else:
2101 f = formats[-1]
2102 self.report_warning(
2103 'No subtitle format found matching "%s" for language %s, '
2104 'using %s' % (formats_query, lang, f['ext']))
2105 subs[lang] = f
2106 return subs
2107
d06daf23
S
2108 def __forced_printings(self, info_dict, filename, incomplete):
2109 def print_mandatory(field):
2110 if (self.params.get('force%s' % field, False)
2111 and (not incomplete or info_dict.get(field) is not None)):
2112 self.to_stdout(info_dict[field])
2113
2114 def print_optional(field):
2115 if (self.params.get('force%s' % field, False)
2116 and info_dict.get(field) is not None):
2117 self.to_stdout(info_dict[field])
2118
2119 print_mandatory('title')
2120 print_mandatory('id')
2121 if self.params.get('forceurl', False) and not incomplete:
2122 if info_dict.get('requested_formats') is not None:
2123 for f in info_dict['requested_formats']:
2124 self.to_stdout(f['url'] + f.get('play_path', ''))
2125 else:
2126 # For RTMP URLs, also include the playpath
2127 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
2128 print_optional('thumbnail')
2129 print_optional('description')
2130 if self.params.get('forcefilename', False) and filename is not None:
2131 self.to_stdout(filename)
2132 if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
2133 self.to_stdout(formatSeconds(info_dict['duration']))
2134 print_mandatory('format')
2135 if self.params.get('forcejson', False):
277d6ff5 2136 self.post_extract(info_dict)
75d43ca0 2137 self.to_stdout(json.dumps(info_dict, default=repr))
d06daf23 2138
8222d8de
JMF
2139 def process_info(self, info_dict):
2140 """Process a single resolved IE result."""
2141
2142 assert info_dict.get('_type', 'video') == 'video'
fd288278 2143
0202b52a 2144 info_dict.setdefault('__postprocessors', [])
2145
fd288278
PH
2146 max_downloads = self.params.get('max_downloads')
2147 if max_downloads is not None:
2148 if self._num_downloads >= int(max_downloads):
2149 raise MaxDownloadsReached()
8222d8de 2150
d06daf23 2151 # TODO: backward compatibility, to be removed
8222d8de 2152 info_dict['fulltitle'] = info_dict['title']
8222d8de 2153
11b85ce6 2154 if 'format' not in info_dict:
8222d8de
JMF
2155 info_dict['format'] = info_dict['ext']
2156
8b0d7497 2157 if self._match_entry(info_dict, incomplete=False) is not None:
8222d8de
JMF
2158 return
2159
277d6ff5 2160 self.post_extract(info_dict)
fd288278 2161 self._num_downloads += 1
8222d8de 2162
56d868db 2163 info_dict, _ = self.pre_process(info_dict)
5bfa4862 2164
dcf64d43 2165 # info_dict['_filename'] needs to be set for backward compatibility
de6000d9 2166 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2167 temp_filename = self.prepare_filename(info_dict, 'temp')
0202b52a 2168 files_to_move = {}
8222d8de
JMF
2169
2170 # Forced printings
0202b52a 2171 self.__forced_printings(info_dict, full_filename, incomplete=False)
8222d8de 2172
8222d8de 2173 if self.params.get('simulate', False):
2d30509f 2174 if self.params.get('force_write_download_archive', False):
2175 self.record_download_archive(info_dict)
2176
2177 # Do nothing else if in simulate mode
8222d8de
JMF
2178 return
2179
de6000d9 2180 if full_filename is None:
8222d8de
JMF
2181 return
2182
e92caff5 2183 if not self._ensure_dir_exists(encodeFilename(full_filename)):
0202b52a 2184 return
e92caff5 2185 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
8222d8de
JMF
2186 return
2187
2188 if self.params.get('writedescription', False):
de6000d9 2189 descfn = self.prepare_filename(info_dict, 'description')
e92caff5 2190 if not self._ensure_dir_exists(encodeFilename(descfn)):
0202b52a 2191 return
0c3d0f51 2192 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
6febd1c1 2193 self.to_screen('[info] Video description is already present')
f00fd51d
JMF
2194 elif info_dict.get('description') is None:
2195 self.report_warning('There\'s no description to write.')
7b6fefc9
PH
2196 else:
2197 try:
6febd1c1 2198 self.to_screen('[info] Writing video description to: ' + descfn)
7b6fefc9
PH
2199 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
2200 descfile.write(info_dict['description'])
7b6fefc9 2201 except (OSError, IOError):
6febd1c1 2202 self.report_error('Cannot write description file ' + descfn)
7b6fefc9 2203 return
8222d8de 2204
1fb07d10 2205 if self.params.get('writeannotations', False):
de6000d9 2206 annofn = self.prepare_filename(info_dict, 'annotation')
e92caff5 2207 if not self._ensure_dir_exists(encodeFilename(annofn)):
0202b52a 2208 return
0c3d0f51 2209 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
6febd1c1 2210 self.to_screen('[info] Video annotations are already present')
ffddb112
RA
2211 elif not info_dict.get('annotations'):
2212 self.report_warning('There are no annotations to write.')
7b6fefc9
PH
2213 else:
2214 try:
6febd1c1 2215 self.to_screen('[info] Writing video annotations to: ' + annofn)
7b6fefc9
PH
2216 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2217 annofile.write(info_dict['annotations'])
2218 except (KeyError, TypeError):
6febd1c1 2219 self.report_warning('There are no annotations to write.')
7b6fefc9 2220 except (OSError, IOError):
6febd1c1 2221 self.report_error('Cannot write annotations file: ' + annofn)
7b6fefc9 2222 return
1fb07d10 2223
9f448fcb 2224 def dl(name, info, subtitle=False):
98b69821 2225 fd = get_suitable_downloader(info, self.params)(self, self.params)
2226 for ph in self._progress_hooks:
2227 fd.add_progress_hook(ph)
2228 if self.params.get('verbose'):
29f7c58a 2229 self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
46906886
DA
2230 new_info = dict(info)
2231 if new_info.get('http_headers') is None:
2232 new_info['http_headers'] = self._calc_headers(new_info)
2233 return fd.download(name, new_info, subtitle)
98b69821 2234
c4a91be7 2235 subtitles_are_requested = any([self.params.get('writesubtitles', False),
0b7f3118 2236 self.params.get('writeautomaticsub')])
c4a91be7 2237
c84dd8a9 2238 if subtitles_are_requested and info_dict.get('requested_subtitles'):
8222d8de
JMF
2239 # subtitles download errors are already managed as troubles in relevant IE
2240 # that way it will silently go on when used with unsupporting IE
c84dd8a9 2241 subtitles = info_dict['requested_subtitles']
fa57af1e 2242 # ie = self.get_info_extractor(info_dict['extractor_key'])
a504ced0
JMF
2243 for sub_lang, sub_info in subtitles.items():
2244 sub_format = sub_info['ext']
56d868db 2245 sub_filename = subtitles_filename(temp_filename, sub_lang, sub_format, info_dict.get('ext'))
2246 sub_filename_final = subtitles_filename(
2247 self.prepare_filename(info_dict, 'subtitle'), sub_lang, sub_format, info_dict.get('ext'))
0c3d0f51 2248 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(sub_filename)):
5ff1bc0c 2249 self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
dcf64d43 2250 sub_info['filepath'] = sub_filename
0202b52a 2251 files_to_move[sub_filename] = sub_filename_final
a504ced0 2252 else:
0c9df79e 2253 self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
5ff1bc0c
RA
2254 if sub_info.get('data') is not None:
2255 try:
2256 # Use newline='' to prevent conversion of newline characters
067aa17e 2257 # See https://github.com/ytdl-org/youtube-dl/issues/10268
5ff1bc0c
RA
2258 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
2259 subfile.write(sub_info['data'])
dcf64d43 2260 sub_info['filepath'] = sub_filename
0202b52a 2261 files_to_move[sub_filename] = sub_filename_final
5ff1bc0c
RA
2262 except (OSError, IOError):
2263 self.report_error('Cannot write subtitles file ' + sub_filename)
2264 return
7b6fefc9 2265 else:
5ff1bc0c 2266 try:
dcf64d43 2267 dl(sub_filename, sub_info.copy(), subtitle=True)
2268 sub_info['filepath'] = sub_filename
0202b52a 2269 files_to_move[sub_filename] = sub_filename_final
0c9df79e 2270 except (ExtractorError, IOError, OSError, ValueError, compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
5ff1bc0c
RA
2271 self.report_warning('Unable to download subtitle for "%s": %s' %
2272 (sub_lang, error_to_compat_str(err)))
2273 continue
8222d8de 2274
8222d8de 2275 if self.params.get('writeinfojson', False):
de6000d9 2276 infofn = self.prepare_filename(info_dict, 'infojson')
e92caff5 2277 if not self._ensure_dir_exists(encodeFilename(infofn)):
0202b52a 2278 return
0c3d0f51 2279 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
66c935fb 2280 self.to_screen('[info] Video metadata is already present')
7b6fefc9 2281 else:
66c935fb 2282 self.to_screen('[info] Writing video metadata as JSON to: ' + infofn)
7b6fefc9 2283 try:
75d43ca0 2284 write_json_file(self.filter_requested_info(info_dict, self.params.get('clean_infojson', True)), infofn)
7b6fefc9 2285 except (OSError, IOError):
66c935fb 2286 self.report_error('Cannot write video metadata to JSON file ' + infofn)
7b6fefc9 2287 return
de6000d9 2288 info_dict['__infojson_filename'] = infofn
8222d8de 2289
56d868db 2290 for thumb_ext in self._write_thumbnails(info_dict, temp_filename):
2291 thumb_filename_temp = replace_extension(temp_filename, thumb_ext, info_dict.get('ext'))
2292 thumb_filename = replace_extension(
2293 self.prepare_filename(info_dict, 'thumbnail'), thumb_ext, info_dict.get('ext'))
dcf64d43 2294 files_to_move[thumb_filename_temp] = thumb_filename
8222d8de 2295
732044af 2296 # Write internet shortcut files
2297 url_link = webloc_link = desktop_link = False
2298 if self.params.get('writelink', False):
2299 if sys.platform == "darwin": # macOS.
2300 webloc_link = True
2301 elif sys.platform.startswith("linux"):
2302 desktop_link = True
2303 else: # if sys.platform in ['win32', 'cygwin']:
2304 url_link = True
2305 if self.params.get('writeurllink', False):
2306 url_link = True
2307 if self.params.get('writewebloclink', False):
2308 webloc_link = True
2309 if self.params.get('writedesktoplink', False):
2310 desktop_link = True
2311
2312 if url_link or webloc_link or desktop_link:
2313 if 'webpage_url' not in info_dict:
2314 self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
2315 return
2316 ascii_url = iri_to_uri(info_dict['webpage_url'])
2317
2318 def _write_link_file(extension, template, newline, embed_filename):
0202b52a 2319 linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
10e3742e 2320 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
732044af 2321 self.to_screen('[info] Internet shortcut is already present')
2322 else:
2323 try:
2324 self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
2325 with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
2326 template_vars = {'url': ascii_url}
2327 if embed_filename:
2328 template_vars['filename'] = linkfn[:-(len(extension) + 1)]
2329 linkfile.write(template % template_vars)
2330 except (OSError, IOError):
2331 self.report_error('Cannot write internet shortcut ' + linkfn)
2332 return False
2333 return True
2334
2335 if url_link:
2336 if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
2337 return
2338 if webloc_link:
2339 if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
2340 return
2341 if desktop_link:
2342 if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
2343 return
2344
56d868db 2345 try:
2346 info_dict, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
2347 except PostProcessingError as err:
2348 self.report_error('Preprocessing: %s' % str(err))
2349 return
2350
732044af 2351 must_record_download_archive = False
56d868db 2352 if self.params.get('skip_download', False):
2353 info_dict['filepath'] = temp_filename
2354 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2355 info_dict['__files_to_move'] = files_to_move
2356 info_dict = self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict)
2357 else:
2358 # Download
4340deca 2359 try:
0202b52a 2360
6b591b29 2361 def existing_file(*filepaths):
2362 ext = info_dict.get('ext')
2363 final_ext = self.params.get('final_ext', ext)
2364 existing_files = []
2365 for file in orderedSet(filepaths):
2366 if final_ext != ext:
2367 converted = replace_extension(file, final_ext, ext)
2368 if os.path.exists(encodeFilename(converted)):
2369 existing_files.append(converted)
2370 if os.path.exists(encodeFilename(file)):
2371 existing_files.append(file)
2372
2373 if not existing_files or self.params.get('overwrites', False):
2374 for file in orderedSet(existing_files):
2375 self.report_file_delete(file)
2376 os.remove(encodeFilename(file))
2377 return None
2378
2379 self.report_file_already_downloaded(existing_files[0])
2380 info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
2381 return existing_files[0]
0202b52a 2382
2383 success = True
4340deca
P
2384 if info_dict.get('requested_formats') is not None:
2385 downloaded = []
d47aeb22 2386 merger = FFmpegMergerPP(self)
63ad4d43 2387 if self.params.get('allow_unplayable_formats'):
2388 self.report_warning(
2389 'You have requested merging of multiple formats '
2390 'while also allowing unplayable formats to be downloaded. '
2391 'The formats won\'t be merged to prevent data corruption.')
2392 elif not merger.available:
2393 self.report_warning(
2394 'You have requested merging of multiple formats but ffmpeg is not installed. '
2395 'The formats won\'t be merged.')
81cd954a
S
2396
2397 def compatible_formats(formats):
d03cfdce 2398 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
2399 video_formats = [format for format in formats if format.get('vcodec') != 'none']
2400 audio_formats = [format for format in formats if format.get('acodec') != 'none']
2401 if len(video_formats) > 2 or len(audio_formats) > 2:
2402 return False
2403
81cd954a 2404 # Check extension
d03cfdce 2405 exts = set(format.get('ext') for format in formats)
2406 COMPATIBLE_EXTS = (
2407 set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
2408 set(('webm',)),
2409 )
2410 for ext_sets in COMPATIBLE_EXTS:
2411 if ext_sets.issuperset(exts):
2412 return True
81cd954a
S
2413 # TODO: Check acodec/vcodec
2414 return False
2415
2416 requested_formats = info_dict['requested_formats']
0202b52a 2417 old_ext = info_dict['ext']
4d971a16 2418 if self.params.get('merge_output_format') is None:
2419 if not compatible_formats(requested_formats):
2420 info_dict['ext'] = 'mkv'
2421 self.report_warning(
2422 'Requested formats are incompatible for merge and will be merged into mkv.')
2423 if (info_dict['ext'] == 'webm'
2424 and self.params.get('writethumbnail', False)
2425 and info_dict.get('thumbnails')):
2426 info_dict['ext'] = 'mkv'
2427 self.report_warning(
2428 'webm doesn\'t support embedding a thumbnail, mkv will be used.')
0202b52a 2429
2430 def correct_ext(filename):
2431 filename_real_ext = os.path.splitext(filename)[1][1:]
2432 filename_wo_ext = (
2433 os.path.splitext(filename)[0]
2434 if filename_real_ext == old_ext
2435 else filename)
2436 return '%s.%s' % (filename_wo_ext, info_dict['ext'])
2437
38c6902b 2438 # Ensure filename always has a correct extension for successful merge
0202b52a 2439 full_filename = correct_ext(full_filename)
2440 temp_filename = correct_ext(temp_filename)
2441 dl_filename = existing_file(full_filename, temp_filename)
1ea24129 2442 info_dict['__real_download'] = False
0202b52a 2443 if dl_filename is None:
81cd954a 2444 for f in requested_formats:
5b5fbc08
JMF
2445 new_info = dict(info_dict)
2446 new_info.update(f)
c5c9bf0c 2447 fname = prepend_extension(
de6000d9 2448 self.prepare_filename(new_info, 'temp'),
c5c9bf0c 2449 'f%s' % f['format_id'], new_info['ext'])
e92caff5 2450 if not self._ensure_dir_exists(fname):
c5c9bf0c 2451 return
5b5fbc08 2452 downloaded.append(fname)
a9e7f546 2453 partial_success, real_download = dl(fname, new_info)
1ea24129 2454 info_dict['__real_download'] = info_dict['__real_download'] or real_download
5b5fbc08 2455 success = success and partial_success
63ad4d43 2456 if merger.available and not self.params.get('allow_unplayable_formats'):
efabc161 2457 info_dict['__postprocessors'].append(merger)
1ea24129 2458 info_dict['__files_to_merge'] = downloaded
2459 # Even if there were no downloads, it is being merged only now
2460 info_dict['__real_download'] = True
42bb0c59 2461 else:
2462 for file in downloaded:
2463 files_to_move[file] = None
4340deca
P
2464 else:
2465 # Just a single file
0202b52a 2466 dl_filename = existing_file(full_filename, temp_filename)
2467 if dl_filename is None:
2468 success, real_download = dl(temp_filename, info_dict)
2469 info_dict['__real_download'] = real_download
2470
0202b52a 2471 dl_filename = dl_filename or temp_filename
c571435f 2472 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
0202b52a 2473
4340deca 2474 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
7960b056 2475 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
4340deca
P
2476 return
2477 except (OSError, IOError) as err:
2478 raise UnavailableVideoError(err)
2479 except (ContentTooShortError, ) as err:
2480 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
2481 return
8222d8de 2482
de6000d9 2483 if success and full_filename != '-':
6271f1ca 2484 # Fixup content
62cd676c
PH
2485 fixup_policy = self.params.get('fixup')
2486 if fixup_policy is None:
2487 fixup_policy = 'detect_or_warn'
2488
e4172ac9 2489 INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg to fix this automatically.'
d1e4a464 2490
6271f1ca
PH
2491 stretched_ratio = info_dict.get('stretched_ratio')
2492 if stretched_ratio is not None and stretched_ratio != 1:
6271f1ca
PH
2493 if fixup_policy == 'warn':
2494 self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
2495 info_dict['id'], stretched_ratio))
2496 elif fixup_policy == 'detect_or_warn':
2497 stretched_pp = FFmpegFixupStretchedPP(self)
2498 if stretched_pp.available:
6271f1ca
PH
2499 info_dict['__postprocessors'].append(stretched_pp)
2500 else:
2501 self.report_warning(
d1e4a464
S
2502 '%s: Non-uniform pixel ratio (%s). %s'
2503 % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
6271f1ca 2504 else:
62cd676c
PH
2505 assert fixup_policy in ('ignore', 'never')
2506
3089bc74 2507 if (info_dict.get('requested_formats') is None
6b591b29 2508 and info_dict.get('container') == 'm4a_dash'
2509 and info_dict.get('ext') == 'm4a'):
62cd676c 2510 if fixup_policy == 'warn':
d1e4a464
S
2511 self.report_warning(
2512 '%s: writing DASH m4a. '
2513 'Only some players support this container.'
2514 % info_dict['id'])
62cd676c
PH
2515 elif fixup_policy == 'detect_or_warn':
2516 fixup_pp = FFmpegFixupM4aPP(self)
2517 if fixup_pp.available:
62cd676c
PH
2518 info_dict['__postprocessors'].append(fixup_pp)
2519 else:
2520 self.report_warning(
d1e4a464
S
2521 '%s: writing DASH m4a. '
2522 'Only some players support this container. %s'
2523 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
62cd676c
PH
2524 else:
2525 assert fixup_policy in ('ignore', 'never')
6271f1ca 2526
0a473f2f 2527 if ('protocol' in info_dict
2528 and get_suitable_downloader(info_dict, self.params).__name__ == 'HlsFD'):
f17f8651 2529 if fixup_policy == 'warn':
a02682fd 2530 self.report_warning('%s: malformed AAC bitstream detected.' % (
f17f8651 2531 info_dict['id']))
2532 elif fixup_policy == 'detect_or_warn':
2533 fixup_pp = FFmpegFixupM3u8PP(self)
2534 if fixup_pp.available:
f17f8651 2535 info_dict['__postprocessors'].append(fixup_pp)
2536 else:
2537 self.report_warning(
a02682fd 2538 '%s: malformed AAC bitstream detected. %s'
d1e4a464 2539 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
f17f8651 2540 else:
2541 assert fixup_policy in ('ignore', 'never')
2542
8222d8de 2543 try:
23c1a667 2544 info_dict = self.post_process(dl_filename, info_dict, files_to_move)
af819c21 2545 except PostProcessingError as err:
2546 self.report_error('Postprocessing: %s' % str(err))
8222d8de 2547 return
ab8e5e51
AM
2548 try:
2549 for ph in self._post_hooks:
23c1a667 2550 ph(info_dict['filepath'])
ab8e5e51
AM
2551 except Exception as err:
2552 self.report_error('post hooks: %s' % str(err))
2553 return
2d30509f 2554 must_record_download_archive = True
2555
2556 if must_record_download_archive or self.params.get('force_write_download_archive', False):
2557 self.record_download_archive(info_dict)
c3e6ffba 2558 max_downloads = self.params.get('max_downloads')
2559 if max_downloads is not None and self._num_downloads >= int(max_downloads):
2560 raise MaxDownloadsReached()
8222d8de
JMF
2561
2562 def download(self, url_list):
2563 """Download a given list of URLs."""
de6000d9 2564 outtmpl = self.outtmpl_dict['default']
3089bc74
S
2565 if (len(url_list) > 1
2566 and outtmpl != '-'
2567 and '%' not in outtmpl
2568 and self.params.get('max_downloads') != 1):
acd69589 2569 raise SameFileError(outtmpl)
8222d8de
JMF
2570
2571 for url in url_list:
2572 try:
5f6a1245 2573 # It also downloads the videos
61aa5ba3
S
2574 res = self.extract_info(
2575 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
8222d8de 2576 except UnavailableVideoError:
6febd1c1 2577 self.report_error('unable to download video')
8222d8de 2578 except MaxDownloadsReached:
8b0d7497 2579 self.to_screen('[info] Maximum number of downloaded files reached')
2580 raise
2581 except ExistingVideoReached:
d83cb531 2582 self.to_screen('[info] Encountered a file that is already in the archive, stopping due to --break-on-existing')
8b0d7497 2583 raise
2584 except RejectedVideoReached:
d83cb531 2585 self.to_screen('[info] Encountered a file that did not match filter, stopping due to --break-on-reject')
8222d8de 2586 raise
63e0be34
PH
2587 else:
2588 if self.params.get('dump_single_json', False):
277d6ff5 2589 self.post_extract(res)
75d43ca0 2590 self.to_stdout(json.dumps(res, default=repr))
8222d8de
JMF
2591
2592 return self._download_retcode
2593
1dcc4c0c 2594 def download_with_info_file(self, info_filename):
31bd3925
JMF
2595 with contextlib.closing(fileinput.FileInput(
2596 [info_filename], mode='r',
2597 openhook=fileinput.hook_encoded('utf-8'))) as f:
2598 # FileInput doesn't have a read method, we can't call json.load
498f5606 2599 info = self.filter_requested_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
d4943898
JMF
2600 try:
2601 self.process_ie_result(info, download=True)
498f5606 2602 except (DownloadError, EntryNotInPlaylist):
d4943898
JMF
2603 webpage_url = info.get('webpage_url')
2604 if webpage_url is not None:
6febd1c1 2605 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
d4943898
JMF
2606 return self.download([webpage_url])
2607 else:
2608 raise
2609 return self._download_retcode
1dcc4c0c 2610
cb202fd2 2611 @staticmethod
75d43ca0 2612 def filter_requested_info(info_dict, actually_filter=True):
2613 if not actually_filter:
394dcd44 2614 info_dict['epoch'] = int(time.time())
75d43ca0 2615 return info_dict
5226731e 2616 exceptions = {
498f5606 2617 'remove': ['requested_formats', 'requested_subtitles', 'requested_entries', 'filepath', 'entries'],
5226731e 2618 'keep': ['_type'],
2619 }
2620 keep_key = lambda k: k in exceptions['keep'] or not (k.startswith('_') or k in exceptions['remove'])
2621 filter_fn = lambda obj: (
a515a78d 2622 list(map(filter_fn, obj)) if isinstance(obj, (list, tuple))
2623 else obj if not isinstance(obj, dict)
2624 else dict((k, filter_fn(v)) for k, v in obj.items() if keep_key(k)))
5226731e 2625 return filter_fn(info_dict)
cb202fd2 2626
dcf64d43 2627 def run_pp(self, pp, infodict):
5bfa4862 2628 files_to_delete = []
dcf64d43 2629 if '__files_to_move' not in infodict:
2630 infodict['__files_to_move'] = {}
af819c21 2631 files_to_delete, infodict = pp.run(infodict)
5bfa4862 2632 if not files_to_delete:
dcf64d43 2633 return infodict
5bfa4862 2634
2635 if self.params.get('keepvideo', False):
2636 for f in files_to_delete:
dcf64d43 2637 infodict['__files_to_move'].setdefault(f, '')
5bfa4862 2638 else:
2639 for old_filename in set(files_to_delete):
2640 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2641 try:
2642 os.remove(encodeFilename(old_filename))
2643 except (IOError, OSError):
2644 self.report_warning('Unable to remove downloaded original file')
dcf64d43 2645 if old_filename in infodict['__files_to_move']:
2646 del infodict['__files_to_move'][old_filename]
2647 return infodict
5bfa4862 2648
277d6ff5 2649 @staticmethod
2650 def post_extract(info_dict):
2651 def actual_post_extract(info_dict):
2652 if info_dict.get('_type') in ('playlist', 'multi_video'):
2653 for video_dict in info_dict.get('entries', {}):
b050d210 2654 actual_post_extract(video_dict or {})
277d6ff5 2655 return
2656
2657 if '__post_extractor' not in info_dict:
2658 return
2659 post_extractor = info_dict['__post_extractor']
2660 if post_extractor:
2661 info_dict.update(post_extractor().items())
2662 del info_dict['__post_extractor']
2663 return
2664
b050d210 2665 actual_post_extract(info_dict or {})
277d6ff5 2666
56d868db 2667 def pre_process(self, ie_info, key='pre_process', files_to_move=None):
5bfa4862 2668 info = dict(ie_info)
56d868db 2669 info['__files_to_move'] = files_to_move or {}
2670 for pp in self._pps[key]:
dcf64d43 2671 info = self.run_pp(pp, info)
56d868db 2672 return info, info.pop('__files_to_move', None)
5bfa4862 2673
dcf64d43 2674 def post_process(self, filename, ie_info, files_to_move=None):
8222d8de
JMF
2675 """Run all the postprocessors on the given file."""
2676 info = dict(ie_info)
2677 info['filepath'] = filename
dcf64d43 2678 info['__files_to_move'] = files_to_move or {}
0202b52a 2679
56d868db 2680 for pp in ie_info.get('__postprocessors', []) + self._pps['post_process']:
dcf64d43 2681 info = self.run_pp(pp, info)
2682 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
2683 del info['__files_to_move']
56d868db 2684 for pp in self._pps['after_move']:
dcf64d43 2685 info = self.run_pp(pp, info)
23c1a667 2686 return info
c1c9a79c 2687
5db07df6 2688 def _make_archive_id(self, info_dict):
e9fef7ee
S
2689 video_id = info_dict.get('id')
2690 if not video_id:
2691 return
5db07df6
PH
2692 # Future-proof against any change in case
2693 # and backwards compatibility with prior versions
e9fef7ee 2694 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
7012b23c 2695 if extractor is None:
1211bb6d
S
2696 url = str_or_none(info_dict.get('url'))
2697 if not url:
2698 return
e9fef7ee
S
2699 # Try to find matching extractor for the URL and take its ie_key
2700 for ie in self._ies:
1211bb6d 2701 if ie.suitable(url):
e9fef7ee
S
2702 extractor = ie.ie_key()
2703 break
2704 else:
2705 return
d0757229 2706 return '%s %s' % (extractor.lower(), video_id)
5db07df6
PH
2707
2708 def in_download_archive(self, info_dict):
2709 fn = self.params.get('download_archive')
2710 if fn is None:
2711 return False
2712
2713 vid_id = self._make_archive_id(info_dict)
e9fef7ee 2714 if not vid_id:
7012b23c 2715 return False # Incomplete video information
5db07df6 2716
a45e8619 2717 return vid_id in self.archive
c1c9a79c
PH
2718
2719 def record_download_archive(self, info_dict):
2720 fn = self.params.get('download_archive')
2721 if fn is None:
2722 return
5db07df6
PH
2723 vid_id = self._make_archive_id(info_dict)
2724 assert vid_id
c1c9a79c 2725 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
6febd1c1 2726 archive_file.write(vid_id + '\n')
a45e8619 2727 self.archive.add(vid_id)
dd82ffea 2728
8c51aa65 2729 @staticmethod
8abeeb94 2730 def format_resolution(format, default='unknown'):
fb04e403
PH
2731 if format.get('vcodec') == 'none':
2732 return 'audio only'
f49d89ee
PH
2733 if format.get('resolution') is not None:
2734 return format['resolution']
35615307
DA
2735 if format.get('width') and format.get('height'):
2736 res = '%dx%d' % (format['width'], format['height'])
2737 elif format.get('height'):
2738 res = '%sp' % format['height']
2739 elif format.get('width'):
388ae76b 2740 res = '%dx?' % format['width']
8c51aa65 2741 else:
8abeeb94 2742 res = default
8c51aa65
JMF
2743 return res
2744
c57f7757
PH
2745 def _format_note(self, fdict):
2746 res = ''
2747 if fdict.get('ext') in ['f4f', 'f4m']:
2748 res += '(unsupported) '
32f90364
PH
2749 if fdict.get('language'):
2750 if res:
2751 res += ' '
9016d76f 2752 res += '[%s] ' % fdict['language']
c57f7757
PH
2753 if fdict.get('format_note') is not None:
2754 res += fdict['format_note'] + ' '
2755 if fdict.get('tbr') is not None:
2756 res += '%4dk ' % fdict['tbr']
2757 if fdict.get('container') is not None:
2758 if res:
2759 res += ', '
2760 res += '%s container' % fdict['container']
3089bc74
S
2761 if (fdict.get('vcodec') is not None
2762 and fdict.get('vcodec') != 'none'):
c57f7757
PH
2763 if res:
2764 res += ', '
2765 res += fdict['vcodec']
91c7271a 2766 if fdict.get('vbr') is not None:
c57f7757
PH
2767 res += '@'
2768 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
2769 res += 'video@'
2770 if fdict.get('vbr') is not None:
2771 res += '%4dk' % fdict['vbr']
fbb21cf5 2772 if fdict.get('fps') is not None:
5d583bdf
S
2773 if res:
2774 res += ', '
2775 res += '%sfps' % fdict['fps']
c57f7757
PH
2776 if fdict.get('acodec') is not None:
2777 if res:
2778 res += ', '
2779 if fdict['acodec'] == 'none':
2780 res += 'video only'
2781 else:
2782 res += '%-5s' % fdict['acodec']
2783 elif fdict.get('abr') is not None:
2784 if res:
2785 res += ', '
2786 res += 'audio'
2787 if fdict.get('abr') is not None:
2788 res += '@%3dk' % fdict['abr']
2789 if fdict.get('asr') is not None:
2790 res += ' (%5dHz)' % fdict['asr']
2791 if fdict.get('filesize') is not None:
2792 if res:
2793 res += ', '
2794 res += format_bytes(fdict['filesize'])
9732d77e
PH
2795 elif fdict.get('filesize_approx') is not None:
2796 if res:
2797 res += ', '
2798 res += '~' + format_bytes(fdict['filesize_approx'])
c57f7757 2799 return res
91c7271a 2800
76d321f6 2801 def _format_note_table(self, f):
2802 def join_fields(*vargs):
2803 return ', '.join((val for val in vargs if val != ''))
2804
2805 return join_fields(
2806 'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
2807 format_field(f, 'language', '[%s]'),
2808 format_field(f, 'format_note'),
2809 format_field(f, 'container', ignore=(None, f.get('ext'))),
2810 format_field(f, 'asr', '%5dHz'))
2811
c57f7757 2812 def list_formats(self, info_dict):
94badb25 2813 formats = info_dict.get('formats', [info_dict])
76d321f6 2814 new_format = self.params.get('listformats_table', False)
2815 if new_format:
2816 table = [
2817 [
2818 format_field(f, 'format_id'),
2819 format_field(f, 'ext'),
2820 self.format_resolution(f),
2821 format_field(f, 'fps', '%d'),
2822 '|',
2823 format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
2824 format_field(f, 'tbr', '%4dk'),
52a8a1e1 2825 shorten_protocol_name(f.get('protocol', '').replace("native", "n")),
76d321f6 2826 '|',
2827 format_field(f, 'vcodec', default='unknown').replace('none', ''),
2828 format_field(f, 'vbr', '%4dk'),
2829 format_field(f, 'acodec', default='unknown').replace('none', ''),
2830 format_field(f, 'abr', '%3dk'),
2831 format_field(f, 'asr', '%5dHz'),
2832 self._format_note_table(f)]
2833 for f in formats
2834 if f.get('preference') is None or f['preference'] >= -1000]
2835 header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', ' TBR', 'PROTO',
2836 '|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'NOTE']
2837 else:
2838 table = [
2839 [
2840 format_field(f, 'format_id'),
2841 format_field(f, 'ext'),
2842 self.format_resolution(f),
2843 self._format_note(f)]
2844 for f in formats
2845 if f.get('preference') is None or f['preference'] >= -1000]
2846 header_line = ['format code', 'extension', 'resolution', 'note']
57dd9a8f 2847
cfb56d1a 2848 self.to_screen(
76d321f6 2849 '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(
2850 header_line,
2851 table,
2852 delim=new_format,
2853 extraGap=(0 if new_format else 1),
2854 hideEmpty=new_format)))
cfb56d1a
PH
2855
2856 def list_thumbnails(self, info_dict):
2857 thumbnails = info_dict.get('thumbnails')
2858 if not thumbnails:
b7b72db9 2859 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
2860 return
cfb56d1a
PH
2861
2862 self.to_screen(
2863 '[info] Thumbnails for %s:' % info_dict['id'])
2864 self.to_screen(render_table(
2865 ['ID', 'width', 'height', 'URL'],
2866 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
dca08720 2867
360e1ca5 2868 def list_subtitles(self, video_id, subtitles, name='subtitles'):
a504ced0 2869 if not subtitles:
360e1ca5 2870 self.to_screen('%s has no %s' % (video_id, name))
a504ced0 2871 return
a504ced0 2872 self.to_screen(
edab9dbf
JMF
2873 'Available %s for %s:' % (name, video_id))
2874 self.to_screen(render_table(
2875 ['Language', 'formats'],
2876 [[lang, ', '.join(f['ext'] for f in reversed(formats))]
2877 for lang, formats in subtitles.items()]))
a504ced0 2878
dca08720
PH
2879 def urlopen(self, req):
2880 """ Start an HTTP download """
82d8a8b6 2881 if isinstance(req, compat_basestring):
67dda517 2882 req = sanitized_Request(req)
19a41fc6 2883 return self._opener.open(req, timeout=self._socket_timeout)
dca08720
PH
2884
2885 def print_debug_header(self):
2886 if not self.params.get('verbose'):
2887 return
62fec3b2 2888
4192b51c 2889 if type('') is not compat_str:
067aa17e 2890 # Python 2.6 on SLES11 SP1 (https://github.com/ytdl-org/youtube-dl/issues/3326)
4192b51c
PH
2891 self.report_warning(
2892 'Your Python is broken! Update to a newer and supported version')
2893
c6afed48
PH
2894 stdout_encoding = getattr(
2895 sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
b0472057 2896 encoding_str = (
734f90bb
PH
2897 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
2898 locale.getpreferredencoding(),
2899 sys.getfilesystemencoding(),
c6afed48 2900 stdout_encoding,
b0472057 2901 self.get_encoding()))
4192b51c 2902 write_string(encoding_str, encoding=None)
734f90bb 2903
e5813e53 2904 source = (
2905 '(exe)' if hasattr(sys, 'frozen')
2906 else '(zip)' if isinstance(globals().get('__loader__'), zipimporter)
2907 else '(source)' if os.path.basename(sys.argv[0]) == '__main__.py'
2908 else '')
2909 self._write_string('[debug] yt-dlp version %s %s\n' % (__version__, source))
e0986e31 2910 if _LAZY_LOADER:
f74980cb 2911 self._write_string('[debug] Lazy loading extractors enabled\n')
2912 if _PLUGIN_CLASSES:
2913 self._write_string(
2914 '[debug] Plugin Extractors: %s\n' % [ie.ie_key() for ie in _PLUGIN_CLASSES])
dca08720
PH
2915 try:
2916 sp = subprocess.Popen(
2917 ['git', 'rev-parse', '--short', 'HEAD'],
2918 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2919 cwd=os.path.dirname(os.path.abspath(__file__)))
f5b1bca9 2920 out, err = process_communicate_or_kill(sp)
dca08720
PH
2921 out = out.decode().strip()
2922 if re.match('[0-9a-f]+', out):
f74980cb 2923 self._write_string('[debug] Git HEAD: %s\n' % out)
70a1165b 2924 except Exception:
dca08720
PH
2925 try:
2926 sys.exc_clear()
70a1165b 2927 except Exception:
dca08720 2928 pass
b300cda4
S
2929
2930 def python_implementation():
2931 impl_name = platform.python_implementation()
2932 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2933 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
2934 return impl_name
2935
e5813e53 2936 self._write_string('[debug] Python version %s (%s %s) - %s\n' % (
2937 platform.python_version(),
2938 python_implementation(),
2939 platform.architecture()[0],
b300cda4 2940 platform_name()))
d28b5171 2941
73fac4e9 2942 exe_versions = FFmpegPostProcessor.get_versions(self)
4c83c967 2943 exe_versions['rtmpdump'] = rtmpdump_version()
feee8d32 2944 exe_versions['phantomjs'] = PhantomJSwrapper._version()
d28b5171
PH
2945 exe_str = ', '.join(
2946 '%s %s' % (exe, v)
2947 for exe, v in sorted(exe_versions.items())
2948 if v
2949 )
2950 if not exe_str:
2951 exe_str = 'none'
2952 self._write_string('[debug] exe versions: %s\n' % exe_str)
dca08720
PH
2953
2954 proxy_map = {}
2955 for handler in self._opener.handlers:
2956 if hasattr(handler, 'proxies'):
2957 proxy_map.update(handler.proxies)
734f90bb 2958 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
dca08720 2959
58b1f00d
PH
2960 if self.params.get('call_home', False):
2961 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
2962 self._write_string('[debug] Public IP address: %s\n' % ipaddr)
f5546c0b 2963 return
58b1f00d
PH
2964 latest_version = self.urlopen(
2965 'https://yt-dl.org/latest/version').read().decode('utf-8')
2966 if version_tuple(latest_version) > version_tuple(__version__):
2967 self.report_warning(
2968 'You are using an outdated version (newest version: %s)! '
2969 'See https://yt-dl.org/update if you need help updating.' %
2970 latest_version)
2971
e344693b 2972 def _setup_opener(self):
6ad14cab 2973 timeout_val = self.params.get('socket_timeout')
19a41fc6 2974 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
6ad14cab 2975
dca08720
PH
2976 opts_cookiefile = self.params.get('cookiefile')
2977 opts_proxy = self.params.get('proxy')
2978
2979 if opts_cookiefile is None:
2980 self.cookiejar = compat_cookiejar.CookieJar()
2981 else:
590bc6f6 2982 opts_cookiefile = expand_path(opts_cookiefile)
1bab3437 2983 self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
dca08720 2984 if os.access(opts_cookiefile, os.R_OK):
1d88b3e6 2985 self.cookiejar.load(ignore_discard=True, ignore_expires=True)
dca08720 2986
6a3f4c3f 2987 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
dca08720
PH
2988 if opts_proxy is not None:
2989 if opts_proxy == '':
2990 proxies = {}
2991 else:
2992 proxies = {'http': opts_proxy, 'https': opts_proxy}
2993 else:
2994 proxies = compat_urllib_request.getproxies()
067aa17e 2995 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
dca08720
PH
2996 if 'http' in proxies and 'https' not in proxies:
2997 proxies['https'] = proxies['http']
91410c9b 2998 proxy_handler = PerRequestProxyHandler(proxies)
a0ddb8a2
PH
2999
3000 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
be4a824d
PH
3001 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
3002 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
fca6dba8 3003 redirect_handler = YoutubeDLRedirectHandler()
8b172c2e 3004 data_handler = compat_urllib_request_DataHandler()
6240b0a2
JMF
3005
3006 # When passing our own FileHandler instance, build_opener won't add the
3007 # default FileHandler and allows us to disable the file protocol, which
3008 # can be used for malicious purposes (see
067aa17e 3009 # https://github.com/ytdl-org/youtube-dl/issues/8227)
6240b0a2
JMF
3010 file_handler = compat_urllib_request.FileHandler()
3011
3012 def file_open(*args, **kwargs):
7a5c1cfe 3013 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
6240b0a2
JMF
3014 file_handler.file_open = file_open
3015
3016 opener = compat_urllib_request.build_opener(
fca6dba8 3017 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2461f79d 3018
dca08720
PH
3019 # Delete the default user-agent header, which would otherwise apply in
3020 # cases where our custom HTTP handler doesn't come into play
067aa17e 3021 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
dca08720
PH
3022 opener.addheaders = []
3023 self._opener = opener
62fec3b2
PH
3024
3025 def encode(self, s):
3026 if isinstance(s, bytes):
3027 return s # Already encoded
3028
3029 try:
3030 return s.encode(self.get_encoding())
3031 except UnicodeEncodeError as err:
3032 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
3033 raise
3034
3035 def get_encoding(self):
3036 encoding = self.params.get('encoding')
3037 if encoding is None:
3038 encoding = preferredencoding()
3039 return encoding
ec82d85a 3040
de6000d9 3041 def _write_thumbnails(self, info_dict, filename): # return the extensions
6c4fd172 3042 write_all = self.params.get('write_all_thumbnails', False)
3043 thumbnails = []
3044 if write_all or self.params.get('writethumbnail', False):
0202b52a 3045 thumbnails = info_dict.get('thumbnails') or []
6c4fd172 3046 multiple = write_all and len(thumbnails) > 1
ec82d85a 3047
0202b52a 3048 ret = []
6c4fd172 3049 for t in thumbnails[::1 if write_all else -1]:
ec82d85a 3050 thumb_ext = determine_ext(t['url'], 'jpg')
6c4fd172 3051 suffix = '%s.' % t['id'] if multiple else ''
3052 thumb_display_id = '%s ' % t['id'] if multiple else ''
dcf64d43 3053 t['filepath'] = thumb_filename = replace_extension(filename, suffix + thumb_ext, info_dict.get('ext'))
ec82d85a 3054
0c3d0f51 3055 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(thumb_filename)):
de6000d9 3056 ret.append(suffix + thumb_ext)
ec82d85a
PH
3057 self.to_screen('[%s] %s: Thumbnail %sis already present' %
3058 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3059 else:
5ef7d9bd 3060 self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
ec82d85a
PH
3061 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3062 try:
3063 uf = self.urlopen(t['url'])
d3d89c32 3064 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
ec82d85a 3065 shutil.copyfileobj(uf, thumbf)
de6000d9 3066 ret.append(suffix + thumb_ext)
ec82d85a
PH
3067 self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
3068 (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
3069 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
3070 self.report_warning('Unable to download thumbnail "%s": %s' %
9b9c5355 3071 (t['url'], error_to_compat_str(err)))
6c4fd172 3072 if ret and not write_all:
3073 break
0202b52a 3074 return ret