]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
Option to choose different downloader for different protocols
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 #!/usr/bin/env python
2 # coding: utf-8
3
4 from __future__ import absolute_import, unicode_literals
5
6 import collections
7 import contextlib
8 import copy
9 import datetime
10 import errno
11 import fileinput
12 import io
13 import itertools
14 import json
15 import locale
16 import operator
17 import os
18 import platform
19 import re
20 import shutil
21 import subprocess
22 import socket
23 import sys
24 import time
25 import tokenize
26 import traceback
27 import random
28
29 from string import ascii_letters
30 from zipimport import zipimporter
31
32 from .compat import (
33 compat_basestring,
34 compat_cookiejar,
35 compat_get_terminal_size,
36 compat_http_client,
37 compat_kwargs,
38 compat_numeric_types,
39 compat_os_name,
40 compat_str,
41 compat_tokenize_tokenize,
42 compat_urllib_error,
43 compat_urllib_request,
44 compat_urllib_request_DataHandler,
45 )
46 from .utils import (
47 age_restricted,
48 args_to_str,
49 ContentTooShortError,
50 date_from_str,
51 DateRange,
52 DEFAULT_OUTTMPL,
53 OUTTMPL_TYPES,
54 determine_ext,
55 determine_protocol,
56 DOT_DESKTOP_LINK_TEMPLATE,
57 DOT_URL_LINK_TEMPLATE,
58 DOT_WEBLOC_LINK_TEMPLATE,
59 DownloadError,
60 encode_compat_str,
61 encodeFilename,
62 error_to_compat_str,
63 EntryNotInPlaylist,
64 ExistingVideoReached,
65 expand_path,
66 ExtractorError,
67 float_or_none,
68 format_bytes,
69 format_field,
70 FORMAT_RE,
71 formatSeconds,
72 GeoRestrictedError,
73 int_or_none,
74 iri_to_uri,
75 ISO3166Utils,
76 locked_file,
77 make_dir,
78 make_HTTPS_handler,
79 MaxDownloadsReached,
80 orderedSet,
81 PagedList,
82 parse_filesize,
83 PerRequestProxyHandler,
84 platform_name,
85 PostProcessingError,
86 preferredencoding,
87 prepend_extension,
88 register_socks_protocols,
89 render_table,
90 replace_extension,
91 RejectedVideoReached,
92 SameFileError,
93 sanitize_filename,
94 sanitize_path,
95 sanitize_url,
96 sanitized_Request,
97 std_headers,
98 str_or_none,
99 strftime_or_none,
100 subtitles_filename,
101 to_high_limit_path,
102 UnavailableVideoError,
103 url_basename,
104 version_tuple,
105 write_json_file,
106 write_string,
107 YoutubeDLCookieJar,
108 YoutubeDLCookieProcessor,
109 YoutubeDLHandler,
110 YoutubeDLRedirectHandler,
111 process_communicate_or_kill,
112 )
113 from .cache import Cache
114 from .extractor import (
115 gen_extractor_classes,
116 get_info_extractor,
117 _LAZY_LOADER,
118 _PLUGIN_CLASSES
119 )
120 from .extractor.openload import PhantomJSwrapper
121 from .downloader import (
122 get_suitable_downloader,
123 shorten_protocol_name
124 )
125 from .downloader.rtmp import rtmpdump_version
126 from .postprocessor import (
127 FFmpegFixupM3u8PP,
128 FFmpegFixupM4aPP,
129 FFmpegFixupStretchedPP,
130 FFmpegMergerPP,
131 FFmpegPostProcessor,
132 # FFmpegSubtitlesConvertorPP,
133 get_postprocessor,
134 MoveFilesAfterDownloadPP,
135 )
136 from .version import __version__
137
138 if compat_os_name == 'nt':
139 import ctypes
140
141
142 class YoutubeDL(object):
143 """YoutubeDL class.
144
145 YoutubeDL objects are the ones responsible of downloading the
146 actual video file and writing it to disk if the user has requested
147 it, among some other tasks. In most cases there should be one per
148 program. As, given a video URL, the downloader doesn't know how to
149 extract all the needed information, task that InfoExtractors do, it
150 has to pass the URL to one of them.
151
152 For this, YoutubeDL objects have a method that allows
153 InfoExtractors to be registered in a given order. When it is passed
154 a URL, the YoutubeDL object handles it to the first InfoExtractor it
155 finds that reports being able to handle it. The InfoExtractor extracts
156 all the information about the video or videos the URL refers to, and
157 YoutubeDL process the extracted information, possibly using a File
158 Downloader to download the video.
159
160 YoutubeDL objects accept a lot of parameters. In order not to saturate
161 the object constructor with arguments, it receives a dictionary of
162 options instead. These options are available through the params
163 attribute for the InfoExtractors to use. The YoutubeDL also
164 registers itself as the downloader in charge for the InfoExtractors
165 that are added to it, so this is a "mutual registration".
166
167 Available options:
168
169 username: Username for authentication purposes.
170 password: Password for authentication purposes.
171 videopassword: Password for accessing a video.
172 ap_mso: Adobe Pass multiple-system operator identifier.
173 ap_username: Multiple-system operator account username.
174 ap_password: Multiple-system operator account password.
175 usenetrc: Use netrc for authentication instead.
176 verbose: Print additional info to stdout.
177 quiet: Do not print messages to stdout.
178 no_warnings: Do not print out anything for warnings.
179 forceurl: Force printing final URL.
180 forcetitle: Force printing title.
181 forceid: Force printing ID.
182 forcethumbnail: Force printing thumbnail URL.
183 forcedescription: Force printing description.
184 forcefilename: Force printing final filename.
185 forceduration: Force printing duration.
186 forcejson: Force printing info_dict as JSON.
187 dump_single_json: Force printing the info_dict of the whole playlist
188 (or video) as a single JSON line.
189 force_write_download_archive: Force writing download archive regardless
190 of 'skip_download' or 'simulate'.
191 simulate: Do not download the video files.
192 format: Video format code. see "FORMAT SELECTION" for more details.
193 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
194 format_sort: How to sort the video formats. see "Sorting Formats"
195 for more details.
196 format_sort_force: Force the given format_sort. see "Sorting Formats"
197 for more details.
198 allow_multiple_video_streams: Allow multiple video streams to be merged
199 into a single file
200 allow_multiple_audio_streams: Allow multiple audio streams to be merged
201 into a single file
202 paths: Dictionary of output paths. The allowed keys are 'home'
203 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
204 outtmpl: Dictionary of templates for output names. Allowed keys
205 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
206 A string a also accepted for backward compatibility
207 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
208 restrictfilenames: Do not allow "&" and spaces in file names
209 trim_file_name: Limit length of filename (extension excluded)
210 windowsfilenames: Force the filenames to be windows compatible
211 ignoreerrors: Do not stop on download errors
212 (Default True when running yt-dlp,
213 but False when directly accessing YoutubeDL class)
214 force_generic_extractor: Force downloader to use the generic extractor
215 overwrites: Overwrite all video and metadata files if True,
216 overwrite only non-video files if None
217 and don't overwrite any file if False
218 playliststart: Playlist item to start at.
219 playlistend: Playlist item to end at.
220 playlist_items: Specific indices of playlist to download.
221 playlistreverse: Download playlist items in reverse order.
222 playlistrandom: Download playlist items in random order.
223 matchtitle: Download only matching titles.
224 rejecttitle: Reject downloads for matching titles.
225 logger: Log messages to a logging.Logger instance.
226 logtostderr: Log messages to stderr instead of stdout.
227 writedescription: Write the video description to a .description file
228 writeinfojson: Write the video description to a .info.json file
229 clean_infojson: Remove private fields from the infojson
230 writecomments: Extract video comments. This will not be written to disk
231 unless writeinfojson is also given
232 writeannotations: Write the video annotations to a .annotations.xml file
233 writethumbnail: Write the thumbnail image to a file
234 allow_playlist_files: Whether to write playlists' description, infojson etc
235 also to disk when using the 'write*' options
236 write_all_thumbnails: Write all thumbnail formats to files
237 writelink: Write an internet shortcut file, depending on the
238 current platform (.url/.webloc/.desktop)
239 writeurllink: Write a Windows internet shortcut file (.url)
240 writewebloclink: Write a macOS internet shortcut file (.webloc)
241 writedesktoplink: Write a Linux internet shortcut file (.desktop)
242 writesubtitles: Write the video subtitles to a file
243 writeautomaticsub: Write the automatically generated subtitles to a file
244 allsubtitles: Downloads all the subtitles of the video
245 (requires writesubtitles or writeautomaticsub)
246 listsubtitles: Lists all available subtitles for the video
247 subtitlesformat: The format code for subtitles
248 subtitleslangs: List of languages of the subtitles to download
249 keepvideo: Keep the video file after post-processing
250 daterange: A DateRange object, download only if the upload_date is in the range.
251 skip_download: Skip the actual download of the video file
252 cachedir: Location of the cache files in the filesystem.
253 False to disable filesystem cache.
254 noplaylist: Download single video instead of a playlist if in doubt.
255 age_limit: An integer representing the user's age in years.
256 Unsuitable videos for the given age are skipped.
257 min_views: An integer representing the minimum view count the video
258 must have in order to not be skipped.
259 Videos without view count information are always
260 downloaded. None for no limit.
261 max_views: An integer representing the maximum view count.
262 Videos that are more popular than that are not
263 downloaded.
264 Videos without view count information are always
265 downloaded. None for no limit.
266 download_archive: File name of a file where all downloads are recorded.
267 Videos already present in the file are not downloaded
268 again.
269 break_on_existing: Stop the download process after attempting to download a
270 file that is in the archive.
271 break_on_reject: Stop the download process when encountering a video that
272 has been filtered out.
273 cookiefile: File name where cookies should be read from and dumped to
274 nocheckcertificate:Do not verify SSL certificates
275 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
276 At the moment, this is only supported by YouTube.
277 proxy: URL of the proxy server to use
278 geo_verification_proxy: URL of the proxy to use for IP address verification
279 on geo-restricted sites.
280 socket_timeout: Time to wait for unresponsive hosts, in seconds
281 bidi_workaround: Work around buggy terminals without bidirectional text
282 support, using fridibi
283 debug_printtraffic:Print out sent and received HTTP traffic
284 include_ads: Download ads as well
285 default_search: Prepend this string if an input url is not valid.
286 'auto' for elaborate guessing
287 encoding: Use this encoding instead of the system-specified.
288 extract_flat: Do not resolve URLs, return the immediate result.
289 Pass in 'in_playlist' to only show this behavior for
290 playlist items.
291 postprocessors: A list of dictionaries, each with an entry
292 * key: The name of the postprocessor. See
293 yt_dlp/postprocessor/__init__.py for a list.
294 * _after_move: Optional. If True, run this post_processor
295 after 'MoveFilesAfterDownload'
296 as well as any further keyword arguments for the
297 postprocessor.
298 post_hooks: A list of functions that get called as the final step
299 for each video file, after all postprocessors have been
300 called. The filename will be passed as the only argument.
301 progress_hooks: A list of functions that get called on download
302 progress, with a dictionary with the entries
303 * status: One of "downloading", "error", or "finished".
304 Check this first and ignore unknown values.
305
306 If status is one of "downloading", or "finished", the
307 following properties may also be present:
308 * filename: The final filename (always present)
309 * tmpfilename: The filename we're currently writing to
310 * downloaded_bytes: Bytes on disk
311 * total_bytes: Size of the whole file, None if unknown
312 * total_bytes_estimate: Guess of the eventual file size,
313 None if unavailable.
314 * elapsed: The number of seconds since download started.
315 * eta: The estimated time in seconds, None if unknown
316 * speed: The download speed in bytes/second, None if
317 unknown
318 * fragment_index: The counter of the currently
319 downloaded video fragment.
320 * fragment_count: The number of fragments (= individual
321 files that will be merged)
322
323 Progress hooks are guaranteed to be called at least once
324 (with status "finished") if the download is successful.
325 merge_output_format: Extension to use when merging formats.
326 final_ext: Expected final extension; used to detect when the file was
327 already downloaded and converted. "merge_output_format" is
328 replaced by this extension when given
329 fixup: Automatically correct known faults of the file.
330 One of:
331 - "never": do nothing
332 - "warn": only emit a warning
333 - "detect_or_warn": check whether we can do anything
334 about it, warn otherwise (default)
335 source_address: Client-side IP address to bind to.
336 call_home: Boolean, true iff we are allowed to contact the
337 yt-dlp servers for debugging. (BROKEN)
338 sleep_interval_requests: Number of seconds to sleep between requests
339 during extraction
340 sleep_interval: Number of seconds to sleep before each download when
341 used alone or a lower bound of a range for randomized
342 sleep before each download (minimum possible number
343 of seconds to sleep) when used along with
344 max_sleep_interval.
345 max_sleep_interval:Upper bound of a range for randomized sleep before each
346 download (maximum possible number of seconds to sleep).
347 Must only be used along with sleep_interval.
348 Actual sleep time will be a random float from range
349 [sleep_interval; max_sleep_interval].
350 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
351 listformats: Print an overview of available video formats and exit.
352 list_thumbnails: Print a table of all thumbnails and exit.
353 match_filter: A function that gets called with the info_dict of
354 every video.
355 If it returns a message, the video is ignored.
356 If it returns None, the video is downloaded.
357 match_filter_func in utils.py is one example for this.
358 no_color: Do not emit color codes in output.
359 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
360 HTTP header
361 geo_bypass_country:
362 Two-letter ISO 3166-2 country code that will be used for
363 explicit geographic restriction bypassing via faking
364 X-Forwarded-For HTTP header
365 geo_bypass_ip_block:
366 IP range in CIDR notation that will be used similarly to
367 geo_bypass_country
368
369 The following options determine which downloader is picked:
370 external_downloader: A dictionary of protocol keys and the executable of the
371 external downloader to use for it. The allowed protocols
372 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
373 Set the value to 'native' to use the native downloader
374 hls_prefer_native: Deprecated - Use external_downloader = {'m3u8': 'native'}
375 or {'m3u8': 'ffmpeg'} instead.
376 Use the native HLS downloader instead of ffmpeg/avconv
377 if True, otherwise use ffmpeg/avconv if False, otherwise
378 use downloader suggested by extractor if None.
379
380 The following parameters are not used by YoutubeDL itself, they are used by
381 the downloader (see yt_dlp/downloader/common.py):
382 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
383 noresizebuffer, retries, continuedl, noprogress, consoletitle,
384 xattr_set_filesize, external_downloader_args, hls_use_mpegts,
385 http_chunk_size.
386
387 The following options are used by the post processors:
388 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
389 otherwise prefer ffmpeg. (avconv support is deprecated)
390 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
391 to the binary or its containing directory.
392 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
393 and a list of additional command-line arguments for the
394 postprocessor/executable. The dict can also have "PP+EXE" keys
395 which are used when the given exe is used by the given PP.
396 Use 'default' as the name for arguments to passed to all PP
397
398 The following options are used by the extractors:
399 extractor_retries: Number of times to retry for known errors
400 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
401 hls_split_discontinuity: Split HLS playlists to different formats at
402 discontinuities such as ad breaks (default: False)
403 youtube_include_dash_manifest: If True (default), DASH manifests and related
404 data will be downloaded and processed by extractor.
405 You can reduce network I/O by disabling it if you don't
406 care about DASH. (only for youtube)
407 youtube_include_hls_manifest: If True (default), HLS manifests and related
408 data will be downloaded and processed by extractor.
409 You can reduce network I/O by disabling it if you don't
410 care about HLS. (only for youtube)
411 """
412
413 _NUMERIC_FIELDS = set((
414 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
415 'timestamp', 'upload_year', 'upload_month', 'upload_day',
416 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
417 'average_rating', 'comment_count', 'age_limit',
418 'start_time', 'end_time',
419 'chapter_number', 'season_number', 'episode_number',
420 'track_number', 'disc_number', 'release_year',
421 'playlist_index',
422 ))
423
424 params = None
425 _ies = []
426 _pps = {'beforedl': [], 'aftermove': [], 'normal': []}
427 __prepare_filename_warned = False
428 _first_webpage_request = True
429 _download_retcode = None
430 _num_downloads = None
431 _playlist_level = 0
432 _playlist_urls = set()
433 _screen_file = None
434
435 def __init__(self, params=None, auto_init=True):
436 """Create a FileDownloader object with the given options."""
437 if params is None:
438 params = {}
439 self._ies = []
440 self._ies_instances = {}
441 self._pps = {'beforedl': [], 'aftermove': [], 'normal': []}
442 self.__prepare_filename_warned = False
443 self._first_webpage_request = True
444 self._post_hooks = []
445 self._progress_hooks = []
446 self._download_retcode = 0
447 self._num_downloads = 0
448 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
449 self._err_file = sys.stderr
450 self.params = {
451 # Default parameters
452 'nocheckcertificate': False,
453 }
454 self.params.update(params)
455 self.cache = Cache(self)
456 self.archive = set()
457
458 """Preload the archive, if any is specified"""
459 def preload_download_archive(self):
460 fn = self.params.get('download_archive')
461 if fn is None:
462 return False
463 try:
464 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
465 for line in archive_file:
466 self.archive.add(line.strip())
467 except IOError as ioe:
468 if ioe.errno != errno.ENOENT:
469 raise
470 return False
471 return True
472
473 def check_deprecated(param, option, suggestion):
474 if self.params.get(param) is not None:
475 self.report_warning(
476 '%s is deprecated. Use %s instead.' % (option, suggestion))
477 return True
478 return False
479
480 if self.params.get('verbose'):
481 self.to_stdout('[debug] Loading archive file %r' % self.params.get('download_archive'))
482
483 preload_download_archive(self)
484
485 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
486 if self.params.get('geo_verification_proxy') is None:
487 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
488
489 if self.params.get('final_ext'):
490 if self.params.get('merge_output_format'):
491 self.report_warning('--merge-output-format will be ignored since --remux-video or --recode-video is given')
492 self.params['merge_output_format'] = self.params['final_ext']
493
494 if 'overwrites' in self.params and self.params['overwrites'] is None:
495 del self.params['overwrites']
496
497 check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
498 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
499 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
500
501 if params.get('bidi_workaround', False):
502 try:
503 import pty
504 master, slave = pty.openpty()
505 width = compat_get_terminal_size().columns
506 if width is None:
507 width_args = []
508 else:
509 width_args = ['-w', str(width)]
510 sp_kwargs = dict(
511 stdin=subprocess.PIPE,
512 stdout=slave,
513 stderr=self._err_file)
514 try:
515 self._output_process = subprocess.Popen(
516 ['bidiv'] + width_args, **sp_kwargs
517 )
518 except OSError:
519 self._output_process = subprocess.Popen(
520 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
521 self._output_channel = os.fdopen(master, 'rb')
522 except OSError as ose:
523 if ose.errno == errno.ENOENT:
524 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
525 else:
526 raise
527
528 if (sys.platform != 'win32'
529 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
530 and not params.get('restrictfilenames', False)):
531 # Unicode filesystem API will throw errors (#1474, #13027)
532 self.report_warning(
533 'Assuming --restrict-filenames since file system encoding '
534 'cannot encode all characters. '
535 'Set the LC_ALL environment variable to fix this.')
536 self.params['restrictfilenames'] = True
537
538 self.outtmpl_dict = self.parse_outtmpl()
539
540 self._setup_opener()
541
542 if auto_init:
543 self.print_debug_header()
544 self.add_default_info_extractors()
545
546 for pp_def_raw in self.params.get('postprocessors', []):
547 pp_class = get_postprocessor(pp_def_raw['key'])
548 pp_def = dict(pp_def_raw)
549 del pp_def['key']
550 if 'when' in pp_def:
551 when = pp_def['when']
552 del pp_def['when']
553 else:
554 when = 'normal'
555 pp = pp_class(self, **compat_kwargs(pp_def))
556 self.add_post_processor(pp, when=when)
557
558 for ph in self.params.get('post_hooks', []):
559 self.add_post_hook(ph)
560
561 for ph in self.params.get('progress_hooks', []):
562 self.add_progress_hook(ph)
563
564 register_socks_protocols()
565
566 def warn_if_short_id(self, argv):
567 # short YouTube ID starting with dash?
568 idxs = [
569 i for i, a in enumerate(argv)
570 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
571 if idxs:
572 correct_argv = (
573 ['yt-dlp']
574 + [a for i, a in enumerate(argv) if i not in idxs]
575 + ['--'] + [argv[i] for i in idxs]
576 )
577 self.report_warning(
578 'Long argument string detected. '
579 'Use -- to separate parameters and URLs, like this:\n%s\n' %
580 args_to_str(correct_argv))
581
582 def add_info_extractor(self, ie):
583 """Add an InfoExtractor object to the end of the list."""
584 self._ies.append(ie)
585 if not isinstance(ie, type):
586 self._ies_instances[ie.ie_key()] = ie
587 ie.set_downloader(self)
588
589 def get_info_extractor(self, ie_key):
590 """
591 Get an instance of an IE with name ie_key, it will try to get one from
592 the _ies list, if there's no instance it will create a new one and add
593 it to the extractor list.
594 """
595 ie = self._ies_instances.get(ie_key)
596 if ie is None:
597 ie = get_info_extractor(ie_key)()
598 self.add_info_extractor(ie)
599 return ie
600
601 def add_default_info_extractors(self):
602 """
603 Add the InfoExtractors returned by gen_extractors to the end of the list
604 """
605 for ie in gen_extractor_classes():
606 self.add_info_extractor(ie)
607
608 def add_post_processor(self, pp, when='normal'):
609 """Add a PostProcessor object to the end of the chain."""
610 self._pps[when].append(pp)
611 pp.set_downloader(self)
612
613 def add_post_hook(self, ph):
614 """Add the post hook"""
615 self._post_hooks.append(ph)
616
617 def add_progress_hook(self, ph):
618 """Add the progress hook (currently only for the file downloader)"""
619 self._progress_hooks.append(ph)
620
621 def _bidi_workaround(self, message):
622 if not hasattr(self, '_output_channel'):
623 return message
624
625 assert hasattr(self, '_output_process')
626 assert isinstance(message, compat_str)
627 line_count = message.count('\n') + 1
628 self._output_process.stdin.write((message + '\n').encode('utf-8'))
629 self._output_process.stdin.flush()
630 res = ''.join(self._output_channel.readline().decode('utf-8')
631 for _ in range(line_count))
632 return res[:-len('\n')]
633
634 def to_screen(self, message, skip_eol=False):
635 """Print message to stdout if not in quiet mode."""
636 return self.to_stdout(message, skip_eol, check_quiet=True)
637
638 def _write_string(self, s, out=None):
639 write_string(s, out=out, encoding=self.params.get('encoding'))
640
641 def to_stdout(self, message, skip_eol=False, check_quiet=False):
642 """Print message to stdout if not in quiet mode."""
643 if self.params.get('logger'):
644 self.params['logger'].debug(message)
645 elif not check_quiet or not self.params.get('quiet', False):
646 message = self._bidi_workaround(message)
647 terminator = ['\n', ''][skip_eol]
648 output = message + terminator
649
650 self._write_string(output, self._screen_file)
651
652 def to_stderr(self, message):
653 """Print message to stderr."""
654 assert isinstance(message, compat_str)
655 if self.params.get('logger'):
656 self.params['logger'].error(message)
657 else:
658 message = self._bidi_workaround(message)
659 output = message + '\n'
660 self._write_string(output, self._err_file)
661
662 def to_console_title(self, message):
663 if not self.params.get('consoletitle', False):
664 return
665 if compat_os_name == 'nt':
666 if ctypes.windll.kernel32.GetConsoleWindow():
667 # c_wchar_p() might not be necessary if `message` is
668 # already of type unicode()
669 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
670 elif 'TERM' in os.environ:
671 self._write_string('\033]0;%s\007' % message, self._screen_file)
672
673 def save_console_title(self):
674 if not self.params.get('consoletitle', False):
675 return
676 if self.params.get('simulate', False):
677 return
678 if compat_os_name != 'nt' and 'TERM' in os.environ:
679 # Save the title on stack
680 self._write_string('\033[22;0t', self._screen_file)
681
682 def restore_console_title(self):
683 if not self.params.get('consoletitle', False):
684 return
685 if self.params.get('simulate', False):
686 return
687 if compat_os_name != 'nt' and 'TERM' in os.environ:
688 # Restore the title from stack
689 self._write_string('\033[23;0t', self._screen_file)
690
691 def __enter__(self):
692 self.save_console_title()
693 return self
694
695 def __exit__(self, *args):
696 self.restore_console_title()
697
698 if self.params.get('cookiefile') is not None:
699 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
700
701 def trouble(self, message=None, tb=None):
702 """Determine action to take when a download problem appears.
703
704 Depending on if the downloader has been configured to ignore
705 download errors or not, this method may throw an exception or
706 not when errors are found, after printing the message.
707
708 tb, if given, is additional traceback information.
709 """
710 if message is not None:
711 self.to_stderr(message)
712 if self.params.get('verbose'):
713 if tb is None:
714 if sys.exc_info()[0]: # if .trouble has been called from an except block
715 tb = ''
716 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
717 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
718 tb += encode_compat_str(traceback.format_exc())
719 else:
720 tb_data = traceback.format_list(traceback.extract_stack())
721 tb = ''.join(tb_data)
722 self.to_stderr(tb)
723 if not self.params.get('ignoreerrors', False):
724 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
725 exc_info = sys.exc_info()[1].exc_info
726 else:
727 exc_info = sys.exc_info()
728 raise DownloadError(message, exc_info)
729 self._download_retcode = 1
730
731 def report_warning(self, message):
732 '''
733 Print the message to stderr, it will be prefixed with 'WARNING:'
734 If stderr is a tty file the 'WARNING:' will be colored
735 '''
736 if self.params.get('logger') is not None:
737 self.params['logger'].warning(message)
738 else:
739 if self.params.get('no_warnings'):
740 return
741 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
742 _msg_header = '\033[0;33mWARNING:\033[0m'
743 else:
744 _msg_header = 'WARNING:'
745 warning_message = '%s %s' % (_msg_header, message)
746 self.to_stderr(warning_message)
747
748 def report_error(self, message, tb=None):
749 '''
750 Do the same as trouble, but prefixes the message with 'ERROR:', colored
751 in red if stderr is a tty file.
752 '''
753 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
754 _msg_header = '\033[0;31mERROR:\033[0m'
755 else:
756 _msg_header = 'ERROR:'
757 error_message = '%s %s' % (_msg_header, message)
758 self.trouble(error_message, tb)
759
760 def report_file_already_downloaded(self, file_name):
761 """Report file has already been fully downloaded."""
762 try:
763 self.to_screen('[download] %s has already been downloaded' % file_name)
764 except UnicodeEncodeError:
765 self.to_screen('[download] The file has already been downloaded')
766
767 def report_file_delete(self, file_name):
768 """Report that existing file will be deleted."""
769 try:
770 self.to_screen('Deleting existing file %s' % file_name)
771 except UnicodeEncodeError:
772 self.to_screen('Deleting existing file')
773
774 def parse_outtmpl(self):
775 outtmpl_dict = self.params.get('outtmpl', {})
776 if not isinstance(outtmpl_dict, dict):
777 outtmpl_dict = {'default': outtmpl_dict}
778 outtmpl_dict.update({
779 k: v for k, v in DEFAULT_OUTTMPL.items()
780 if not outtmpl_dict.get(k)})
781 for key, val in outtmpl_dict.items():
782 if isinstance(val, bytes):
783 self.report_warning(
784 'Parameter outtmpl is bytes, but should be a unicode string. '
785 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
786 return outtmpl_dict
787
788 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
789 """ Make the template and info_dict suitable for substitution (outtmpl % info_dict)"""
790 template_dict = dict(info_dict)
791
792 # duration_string
793 template_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
794 formatSeconds(info_dict['duration'], '-')
795 if info_dict.get('duration', None) is not None
796 else None)
797
798 # epoch
799 template_dict['epoch'] = int(time.time())
800
801 # autonumber
802 autonumber_size = self.params.get('autonumber_size')
803 if autonumber_size is None:
804 autonumber_size = 5
805 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
806
807 # resolution if not defined
808 if template_dict.get('resolution') is None:
809 if template_dict.get('width') and template_dict.get('height'):
810 template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
811 elif template_dict.get('height'):
812 template_dict['resolution'] = '%sp' % template_dict['height']
813 elif template_dict.get('width'):
814 template_dict['resolution'] = '%dx?' % template_dict['width']
815
816 if sanitize is None:
817 sanitize = lambda k, v: v
818 template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
819 for k, v in template_dict.items()
820 if v is not None and not isinstance(v, (list, tuple, dict)))
821 na = self.params.get('outtmpl_na_placeholder', 'NA')
822 template_dict = collections.defaultdict(lambda: na, template_dict)
823
824 # For fields playlist_index and autonumber convert all occurrences
825 # of %(field)s to %(field)0Nd for backward compatibility
826 field_size_compat_map = {
827 'playlist_index': len(str(template_dict['n_entries'])),
828 'autonumber': autonumber_size,
829 }
830 FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
831 mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
832 if mobj:
833 outtmpl = re.sub(
834 FIELD_SIZE_COMPAT_RE,
835 r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
836 outtmpl)
837
838 numeric_fields = list(self._NUMERIC_FIELDS)
839
840 # Format date
841 FORMAT_DATE_RE = FORMAT_RE.format(r'(?P<key>(?P<field>\w+)>(?P<format>.+?))')
842 for mobj in re.finditer(FORMAT_DATE_RE, outtmpl):
843 conv_type, field, frmt, key = mobj.group('type', 'field', 'format', 'key')
844 if key in template_dict:
845 continue
846 value = strftime_or_none(template_dict.get(field), frmt, na)
847 if conv_type in 'crs': # string
848 value = sanitize(field, value)
849 else: # number
850 numeric_fields.append(key)
851 value = float_or_none(value, default=None)
852 if value is not None:
853 template_dict[key] = value
854
855 # Missing numeric fields used together with integer presentation types
856 # in format specification will break the argument substitution since
857 # string NA placeholder is returned for missing fields. We will patch
858 # output template for missing fields to meet string presentation type.
859 for numeric_field in numeric_fields:
860 if numeric_field not in template_dict:
861 outtmpl = re.sub(
862 FORMAT_RE.format(re.escape(numeric_field)),
863 r'%({0})s'.format(numeric_field), outtmpl)
864
865 return outtmpl, template_dict
866
867 def _prepare_filename(self, info_dict, tmpl_type='default'):
868 try:
869 sanitize = lambda k, v: sanitize_filename(
870 compat_str(v),
871 restricted=self.params.get('restrictfilenames'),
872 is_id=(k == 'id' or k.endswith('_id')))
873 outtmpl = self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default'])
874 outtmpl, template_dict = self.prepare_outtmpl(outtmpl, info_dict, sanitize)
875
876 # expand_path translates '%%' into '%' and '$$' into '$'
877 # correspondingly that is not what we want since we need to keep
878 # '%%' intact for template dict substitution step. Working around
879 # with boundary-alike separator hack.
880 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
881 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
882
883 # outtmpl should be expand_path'ed before template dict substitution
884 # because meta fields may contain env variables we don't want to
885 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
886 # title "Hello $PATH", we don't want `$PATH` to be expanded.
887 filename = expand_path(outtmpl).replace(sep, '') % template_dict
888
889 force_ext = OUTTMPL_TYPES.get(tmpl_type)
890 if force_ext is not None:
891 filename = replace_extension(filename, force_ext, template_dict.get('ext'))
892
893 # https://github.com/blackjack4494/youtube-dlc/issues/85
894 trim_file_name = self.params.get('trim_file_name', False)
895 if trim_file_name:
896 fn_groups = filename.rsplit('.')
897 ext = fn_groups[-1]
898 sub_ext = ''
899 if len(fn_groups) > 2:
900 sub_ext = fn_groups[-2]
901 filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
902
903 return filename
904 except ValueError as err:
905 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
906 return None
907
908 def prepare_filename(self, info_dict, dir_type='', warn=False):
909 """Generate the output filename."""
910 paths = self.params.get('paths', {})
911 assert isinstance(paths, dict)
912 filename = self._prepare_filename(info_dict, dir_type or 'default')
913
914 if warn and not self.__prepare_filename_warned:
915 if not paths:
916 pass
917 elif filename == '-':
918 self.report_warning('--paths is ignored when an outputting to stdout')
919 elif os.path.isabs(filename):
920 self.report_warning('--paths is ignored since an absolute path is given in output template')
921 self.__prepare_filename_warned = True
922 if filename == '-' or not filename:
923 return filename
924
925 homepath = expand_path(paths.get('home', '').strip())
926 assert isinstance(homepath, compat_str)
927 subdir = expand_path(paths.get(dir_type, '').strip()) if dir_type else ''
928 assert isinstance(subdir, compat_str)
929 path = os.path.join(homepath, subdir, filename)
930
931 # Temporary fix for #4787
932 # 'Treat' all problem characters by passing filename through preferredencoding
933 # to workaround encoding issues with subprocess on python2 @ Windows
934 if sys.version_info < (3, 0) and sys.platform == 'win32':
935 path = encodeFilename(path, True).decode(preferredencoding())
936 return sanitize_path(path, force=self.params.get('windowsfilenames'))
937
938 def _match_entry(self, info_dict, incomplete):
939 """ Returns None if the file should be downloaded """
940
941 def check_filter():
942 video_title = info_dict.get('title', info_dict.get('id', 'video'))
943 if 'title' in info_dict:
944 # This can happen when we're just evaluating the playlist
945 title = info_dict['title']
946 matchtitle = self.params.get('matchtitle', False)
947 if matchtitle:
948 if not re.search(matchtitle, title, re.IGNORECASE):
949 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
950 rejecttitle = self.params.get('rejecttitle', False)
951 if rejecttitle:
952 if re.search(rejecttitle, title, re.IGNORECASE):
953 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
954 date = info_dict.get('upload_date')
955 if date is not None:
956 dateRange = self.params.get('daterange', DateRange())
957 if date not in dateRange:
958 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
959 view_count = info_dict.get('view_count')
960 if view_count is not None:
961 min_views = self.params.get('min_views')
962 if min_views is not None and view_count < min_views:
963 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
964 max_views = self.params.get('max_views')
965 if max_views is not None and view_count > max_views:
966 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
967 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
968 return 'Skipping "%s" because it is age restricted' % video_title
969 if self.in_download_archive(info_dict):
970 return '%s has already been recorded in archive' % video_title
971
972 if not incomplete:
973 match_filter = self.params.get('match_filter')
974 if match_filter is not None:
975 ret = match_filter(info_dict)
976 if ret is not None:
977 return ret
978 return None
979
980 reason = check_filter()
981 if reason is not None:
982 self.to_screen('[download] ' + reason)
983 if reason.endswith('has already been recorded in the archive') and self.params.get('break_on_existing', False):
984 raise ExistingVideoReached()
985 elif self.params.get('break_on_reject', False):
986 raise RejectedVideoReached()
987 return reason
988
989 @staticmethod
990 def add_extra_info(info_dict, extra_info):
991 '''Set the keys from extra_info in info dict if they are missing'''
992 for key, value in extra_info.items():
993 info_dict.setdefault(key, value)
994
995 def extract_info(self, url, download=True, ie_key=None, info_dict=None, extra_info={},
996 process=True, force_generic_extractor=False):
997 '''
998 Returns a list with a dictionary for each video we find.
999 If 'download', also downloads the videos.
1000 extra_info is a dict containing the extra values to add to each result
1001 '''
1002
1003 if not ie_key and force_generic_extractor:
1004 ie_key = 'Generic'
1005
1006 if ie_key:
1007 ies = [self.get_info_extractor(ie_key)]
1008 else:
1009 ies = self._ies
1010
1011 for ie in ies:
1012 if not ie.suitable(url):
1013 continue
1014
1015 ie_key = ie.ie_key()
1016 ie = self.get_info_extractor(ie_key)
1017 if not ie.working():
1018 self.report_warning('The program functionality for this site has been marked as broken, '
1019 'and will probably not work.')
1020
1021 try:
1022 temp_id = str_or_none(
1023 ie.extract_id(url) if callable(getattr(ie, 'extract_id', None))
1024 else ie._match_id(url))
1025 except (AssertionError, IndexError, AttributeError):
1026 temp_id = None
1027 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1028 self.to_screen("[%s] %s: has already been recorded in archive" % (
1029 ie_key, temp_id))
1030 break
1031 return self.__extract_info(url, ie, download, extra_info, process, info_dict)
1032 else:
1033 self.report_error('no suitable InfoExtractor for URL %s' % url)
1034
1035 def __handle_extraction_exceptions(func):
1036 def wrapper(self, *args, **kwargs):
1037 try:
1038 return func(self, *args, **kwargs)
1039 except GeoRestrictedError as e:
1040 msg = e.msg
1041 if e.countries:
1042 msg += '\nThis video is available in %s.' % ', '.join(
1043 map(ISO3166Utils.short2full, e.countries))
1044 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1045 self.report_error(msg)
1046 except ExtractorError as e: # An error we somewhat expected
1047 self.report_error(compat_str(e), e.format_traceback())
1048 except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
1049 raise
1050 except Exception as e:
1051 if self.params.get('ignoreerrors', False):
1052 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
1053 else:
1054 raise
1055 return wrapper
1056
1057 @__handle_extraction_exceptions
1058 def __extract_info(self, url, ie, download, extra_info, process, info_dict):
1059 ie_result = ie.extract(url)
1060 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1061 return
1062 if isinstance(ie_result, list):
1063 # Backwards compatibility: old IE result format
1064 ie_result = {
1065 '_type': 'compat_list',
1066 'entries': ie_result,
1067 }
1068 if info_dict:
1069 if info_dict.get('id'):
1070 ie_result['id'] = info_dict['id']
1071 if info_dict.get('title'):
1072 ie_result['title'] = info_dict['title']
1073 self.add_default_extra_info(ie_result, ie, url)
1074 if process:
1075 return self.process_ie_result(ie_result, download, extra_info)
1076 else:
1077 return ie_result
1078
1079 def add_default_extra_info(self, ie_result, ie, url):
1080 self.add_extra_info(ie_result, {
1081 'extractor': ie.IE_NAME,
1082 'webpage_url': url,
1083 'webpage_url_basename': url_basename(url),
1084 'extractor_key': ie.ie_key(),
1085 })
1086
1087 def process_ie_result(self, ie_result, download=True, extra_info={}):
1088 """
1089 Take the result of the ie(may be modified) and resolve all unresolved
1090 references (URLs, playlist items).
1091
1092 It will also download the videos if 'download'.
1093 Returns the resolved ie_result.
1094 """
1095 result_type = ie_result.get('_type', 'video')
1096
1097 if result_type in ('url', 'url_transparent'):
1098 ie_result['url'] = sanitize_url(ie_result['url'])
1099 extract_flat = self.params.get('extract_flat', False)
1100 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1101 or extract_flat is True):
1102 self.__forced_printings(ie_result, self.prepare_filename(ie_result), incomplete=True)
1103 return ie_result
1104
1105 if result_type == 'video':
1106 self.add_extra_info(ie_result, extra_info)
1107 return self.process_video_result(ie_result, download=download)
1108 elif result_type == 'url':
1109 # We have to add extra_info to the results because it may be
1110 # contained in a playlist
1111 return self.extract_info(ie_result['url'],
1112 download, info_dict=ie_result,
1113 ie_key=ie_result.get('ie_key'),
1114 extra_info=extra_info)
1115 elif result_type == 'url_transparent':
1116 # Use the information from the embedding page
1117 info = self.extract_info(
1118 ie_result['url'], ie_key=ie_result.get('ie_key'),
1119 extra_info=extra_info, download=False, process=False)
1120
1121 # extract_info may return None when ignoreerrors is enabled and
1122 # extraction failed with an error, don't crash and return early
1123 # in this case
1124 if not info:
1125 return info
1126
1127 force_properties = dict(
1128 (k, v) for k, v in ie_result.items() if v is not None)
1129 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
1130 if f in force_properties:
1131 del force_properties[f]
1132 new_result = info.copy()
1133 new_result.update(force_properties)
1134
1135 # Extracted info may not be a video result (i.e.
1136 # info.get('_type', 'video') != video) but rather an url or
1137 # url_transparent. In such cases outer metadata (from ie_result)
1138 # should be propagated to inner one (info). For this to happen
1139 # _type of info should be overridden with url_transparent. This
1140 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1141 if new_result.get('_type') == 'url':
1142 new_result['_type'] = 'url_transparent'
1143
1144 return self.process_ie_result(
1145 new_result, download=download, extra_info=extra_info)
1146 elif result_type in ('playlist', 'multi_video'):
1147 # Protect from infinite recursion due to recursively nested playlists
1148 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1149 webpage_url = ie_result['webpage_url']
1150 if webpage_url in self._playlist_urls:
1151 self.to_screen(
1152 '[download] Skipping already downloaded playlist: %s'
1153 % ie_result.get('title') or ie_result.get('id'))
1154 return
1155
1156 self._playlist_level += 1
1157 self._playlist_urls.add(webpage_url)
1158 try:
1159 return self.__process_playlist(ie_result, download)
1160 finally:
1161 self._playlist_level -= 1
1162 if not self._playlist_level:
1163 self._playlist_urls.clear()
1164 elif result_type == 'compat_list':
1165 self.report_warning(
1166 'Extractor %s returned a compat_list result. '
1167 'It needs to be updated.' % ie_result.get('extractor'))
1168
1169 def _fixup(r):
1170 self.add_extra_info(
1171 r,
1172 {
1173 'extractor': ie_result['extractor'],
1174 'webpage_url': ie_result['webpage_url'],
1175 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1176 'extractor_key': ie_result['extractor_key'],
1177 }
1178 )
1179 return r
1180 ie_result['entries'] = [
1181 self.process_ie_result(_fixup(r), download, extra_info)
1182 for r in ie_result['entries']
1183 ]
1184 return ie_result
1185 else:
1186 raise Exception('Invalid result type: %s' % result_type)
1187
1188 def _ensure_dir_exists(self, path):
1189 return make_dir(path, self.report_error)
1190
1191 def __process_playlist(self, ie_result, download):
1192 # We process each entry in the playlist
1193 playlist = ie_result.get('title') or ie_result.get('id')
1194 self.to_screen('[download] Downloading playlist: %s' % playlist)
1195
1196 if 'entries' not in ie_result:
1197 raise EntryNotInPlaylist()
1198 incomplete_entries = bool(ie_result.get('requested_entries'))
1199 if incomplete_entries:
1200 def fill_missing_entries(entries, indexes):
1201 ret = [None] * max(*indexes)
1202 for i, entry in zip(indexes, entries):
1203 ret[i - 1] = entry
1204 return ret
1205 ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
1206
1207 playlist_results = []
1208
1209 playliststart = self.params.get('playliststart', 1) - 1
1210 playlistend = self.params.get('playlistend')
1211 # For backwards compatibility, interpret -1 as whole list
1212 if playlistend == -1:
1213 playlistend = None
1214
1215 playlistitems_str = self.params.get('playlist_items')
1216 playlistitems = None
1217 if playlistitems_str is not None:
1218 def iter_playlistitems(format):
1219 for string_segment in format.split(','):
1220 if '-' in string_segment:
1221 start, end = string_segment.split('-')
1222 for item in range(int(start), int(end) + 1):
1223 yield int(item)
1224 else:
1225 yield int(string_segment)
1226 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
1227
1228 ie_entries = ie_result['entries']
1229
1230 def make_playlistitems_entries(list_ie_entries):
1231 num_entries = len(list_ie_entries)
1232 for i in playlistitems:
1233 if -num_entries < i <= num_entries:
1234 yield list_ie_entries[i - 1]
1235 elif incomplete_entries:
1236 raise EntryNotInPlaylist()
1237
1238 if isinstance(ie_entries, list):
1239 n_all_entries = len(ie_entries)
1240 if playlistitems:
1241 entries = list(make_playlistitems_entries(ie_entries))
1242 else:
1243 entries = ie_entries[playliststart:playlistend]
1244 n_entries = len(entries)
1245 msg = 'Collected %d videos; downloading %d of them' % (n_all_entries, n_entries)
1246 elif isinstance(ie_entries, PagedList):
1247 if playlistitems:
1248 entries = []
1249 for item in playlistitems:
1250 entries.extend(ie_entries.getslice(
1251 item - 1, item
1252 ))
1253 else:
1254 entries = ie_entries.getslice(
1255 playliststart, playlistend)
1256 n_entries = len(entries)
1257 msg = 'Downloading %d videos' % n_entries
1258 else: # iterable
1259 if playlistitems:
1260 entries = list(make_playlistitems_entries(list(itertools.islice(
1261 ie_entries, 0, max(playlistitems)))))
1262 else:
1263 entries = list(itertools.islice(
1264 ie_entries, playliststart, playlistend))
1265 n_entries = len(entries)
1266 msg = 'Downloading %d videos' % n_entries
1267
1268 if any((entry is None for entry in entries)):
1269 raise EntryNotInPlaylist()
1270 if not playlistitems and (playliststart or playlistend):
1271 playlistitems = list(range(1 + playliststart, 1 + playliststart + len(entries)))
1272 ie_result['entries'] = entries
1273 ie_result['requested_entries'] = playlistitems
1274
1275 if self.params.get('allow_playlist_files', True):
1276 ie_copy = {
1277 'playlist': playlist,
1278 'playlist_id': ie_result.get('id'),
1279 'playlist_title': ie_result.get('title'),
1280 'playlist_uploader': ie_result.get('uploader'),
1281 'playlist_uploader_id': ie_result.get('uploader_id'),
1282 'playlist_index': 0
1283 }
1284 ie_copy.update(dict(ie_result))
1285
1286 if self.params.get('writeinfojson', False):
1287 infofn = self.prepare_filename(ie_copy, 'pl_infojson')
1288 if not self._ensure_dir_exists(encodeFilename(infofn)):
1289 return
1290 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
1291 self.to_screen('[info] Playlist metadata is already present')
1292 else:
1293 self.to_screen('[info] Writing playlist metadata as JSON to: ' + infofn)
1294 try:
1295 write_json_file(self.filter_requested_info(ie_result, self.params.get('clean_infojson', True)), infofn)
1296 except (OSError, IOError):
1297 self.report_error('Cannot write playlist metadata to JSON file ' + infofn)
1298
1299 if self.params.get('writedescription', False):
1300 descfn = self.prepare_filename(ie_copy, 'pl_description')
1301 if not self._ensure_dir_exists(encodeFilename(descfn)):
1302 return
1303 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
1304 self.to_screen('[info] Playlist description is already present')
1305 elif ie_result.get('description') is None:
1306 self.report_warning('There\'s no playlist description to write.')
1307 else:
1308 try:
1309 self.to_screen('[info] Writing playlist description to: ' + descfn)
1310 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1311 descfile.write(ie_result['description'])
1312 except (OSError, IOError):
1313 self.report_error('Cannot write playlist description file ' + descfn)
1314 return
1315
1316 if self.params.get('playlistreverse', False):
1317 entries = entries[::-1]
1318 if self.params.get('playlistrandom', False):
1319 random.shuffle(entries)
1320
1321 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
1322
1323 self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg))
1324 for i, entry in enumerate(entries, 1):
1325 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
1326 # This __x_forwarded_for_ip thing is a bit ugly but requires
1327 # minimal changes
1328 if x_forwarded_for:
1329 entry['__x_forwarded_for_ip'] = x_forwarded_for
1330 extra = {
1331 'n_entries': n_entries,
1332 'playlist': playlist,
1333 'playlist_id': ie_result.get('id'),
1334 'playlist_title': ie_result.get('title'),
1335 'playlist_uploader': ie_result.get('uploader'),
1336 'playlist_uploader_id': ie_result.get('uploader_id'),
1337 'playlist_index': playlistitems[i - 1] if playlistitems else i,
1338 'extractor': ie_result['extractor'],
1339 'webpage_url': ie_result['webpage_url'],
1340 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1341 'extractor_key': ie_result['extractor_key'],
1342 }
1343
1344 if self._match_entry(entry, incomplete=True) is not None:
1345 continue
1346
1347 entry_result = self.__process_iterable_entry(entry, download, extra)
1348 # TODO: skip failed (empty) entries?
1349 playlist_results.append(entry_result)
1350 ie_result['entries'] = playlist_results
1351 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1352 return ie_result
1353
1354 @__handle_extraction_exceptions
1355 def __process_iterable_entry(self, entry, download, extra_info):
1356 return self.process_ie_result(
1357 entry, download=download, extra_info=extra_info)
1358
1359 def _build_format_filter(self, filter_spec):
1360 " Returns a function to filter the formats according to the filter_spec "
1361
1362 OPERATORS = {
1363 '<': operator.lt,
1364 '<=': operator.le,
1365 '>': operator.gt,
1366 '>=': operator.ge,
1367 '=': operator.eq,
1368 '!=': operator.ne,
1369 }
1370 operator_rex = re.compile(r'''(?x)\s*
1371 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
1372 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1373 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
1374 $
1375 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1376 m = operator_rex.search(filter_spec)
1377 if m:
1378 try:
1379 comparison_value = int(m.group('value'))
1380 except ValueError:
1381 comparison_value = parse_filesize(m.group('value'))
1382 if comparison_value is None:
1383 comparison_value = parse_filesize(m.group('value') + 'B')
1384 if comparison_value is None:
1385 raise ValueError(
1386 'Invalid value %r in format specification %r' % (
1387 m.group('value'), filter_spec))
1388 op = OPERATORS[m.group('op')]
1389
1390 if not m:
1391 STR_OPERATORS = {
1392 '=': operator.eq,
1393 '^=': lambda attr, value: attr.startswith(value),
1394 '$=': lambda attr, value: attr.endswith(value),
1395 '*=': lambda attr, value: value in attr,
1396 }
1397 str_operator_rex = re.compile(r'''(?x)
1398 \s*(?P<key>[a-zA-Z0-9._-]+)
1399 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
1400 \s*(?P<value>[a-zA-Z0-9._-]+)
1401 \s*$
1402 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1403 m = str_operator_rex.search(filter_spec)
1404 if m:
1405 comparison_value = m.group('value')
1406 str_op = STR_OPERATORS[m.group('op')]
1407 if m.group('negation'):
1408 op = lambda attr, value: not str_op(attr, value)
1409 else:
1410 op = str_op
1411
1412 if not m:
1413 raise ValueError('Invalid filter specification %r' % filter_spec)
1414
1415 def _filter(f):
1416 actual_value = f.get(m.group('key'))
1417 if actual_value is None:
1418 return m.group('none_inclusive')
1419 return op(actual_value, comparison_value)
1420 return _filter
1421
1422 def _default_format_spec(self, info_dict, download=True):
1423
1424 def can_merge():
1425 merger = FFmpegMergerPP(self)
1426 return merger.available and merger.can_merge()
1427
1428 prefer_best = (
1429 not self.params.get('simulate', False)
1430 and download
1431 and (
1432 not can_merge()
1433 or info_dict.get('is_live', False)
1434 or self.outtmpl_dict['default'] == '-'))
1435
1436 return (
1437 'best/bestvideo+bestaudio'
1438 if prefer_best
1439 else 'bestvideo*+bestaudio/best'
1440 if not self.params.get('allow_multiple_audio_streams', False)
1441 else 'bestvideo+bestaudio/best')
1442
1443 def build_format_selector(self, format_spec):
1444 def syntax_error(note, start):
1445 message = (
1446 'Invalid format specification: '
1447 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1448 return SyntaxError(message)
1449
1450 PICKFIRST = 'PICKFIRST'
1451 MERGE = 'MERGE'
1452 SINGLE = 'SINGLE'
1453 GROUP = 'GROUP'
1454 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1455
1456 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1457 'video': self.params.get('allow_multiple_video_streams', False)}
1458
1459 def _parse_filter(tokens):
1460 filter_parts = []
1461 for type, string, start, _, _ in tokens:
1462 if type == tokenize.OP and string == ']':
1463 return ''.join(filter_parts)
1464 else:
1465 filter_parts.append(string)
1466
1467 def _remove_unused_ops(tokens):
1468 # Remove operators that we don't use and join them with the surrounding strings
1469 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1470 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1471 last_string, last_start, last_end, last_line = None, None, None, None
1472 for type, string, start, end, line in tokens:
1473 if type == tokenize.OP and string == '[':
1474 if last_string:
1475 yield tokenize.NAME, last_string, last_start, last_end, last_line
1476 last_string = None
1477 yield type, string, start, end, line
1478 # everything inside brackets will be handled by _parse_filter
1479 for type, string, start, end, line in tokens:
1480 yield type, string, start, end, line
1481 if type == tokenize.OP and string == ']':
1482 break
1483 elif type == tokenize.OP and string in ALLOWED_OPS:
1484 if last_string:
1485 yield tokenize.NAME, last_string, last_start, last_end, last_line
1486 last_string = None
1487 yield type, string, start, end, line
1488 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1489 if not last_string:
1490 last_string = string
1491 last_start = start
1492 last_end = end
1493 else:
1494 last_string += string
1495 if last_string:
1496 yield tokenize.NAME, last_string, last_start, last_end, last_line
1497
1498 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
1499 selectors = []
1500 current_selector = None
1501 for type, string, start, _, _ in tokens:
1502 # ENCODING is only defined in python 3.x
1503 if type == getattr(tokenize, 'ENCODING', None):
1504 continue
1505 elif type in [tokenize.NAME, tokenize.NUMBER]:
1506 current_selector = FormatSelector(SINGLE, string, [])
1507 elif type == tokenize.OP:
1508 if string == ')':
1509 if not inside_group:
1510 # ')' will be handled by the parentheses group
1511 tokens.restore_last_token()
1512 break
1513 elif inside_merge and string in ['/', ',']:
1514 tokens.restore_last_token()
1515 break
1516 elif inside_choice and string == ',':
1517 tokens.restore_last_token()
1518 break
1519 elif string == ',':
1520 if not current_selector:
1521 raise syntax_error('"," must follow a format selector', start)
1522 selectors.append(current_selector)
1523 current_selector = None
1524 elif string == '/':
1525 if not current_selector:
1526 raise syntax_error('"/" must follow a format selector', start)
1527 first_choice = current_selector
1528 second_choice = _parse_format_selection(tokens, inside_choice=True)
1529 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
1530 elif string == '[':
1531 if not current_selector:
1532 current_selector = FormatSelector(SINGLE, 'best', [])
1533 format_filter = _parse_filter(tokens)
1534 current_selector.filters.append(format_filter)
1535 elif string == '(':
1536 if current_selector:
1537 raise syntax_error('Unexpected "("', start)
1538 group = _parse_format_selection(tokens, inside_group=True)
1539 current_selector = FormatSelector(GROUP, group, [])
1540 elif string == '+':
1541 if not current_selector:
1542 raise syntax_error('Unexpected "+"', start)
1543 selector_1 = current_selector
1544 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1545 if not selector_2:
1546 raise syntax_error('Expected a selector', start)
1547 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
1548 else:
1549 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1550 elif type == tokenize.ENDMARKER:
1551 break
1552 if current_selector:
1553 selectors.append(current_selector)
1554 return selectors
1555
1556 def _merge(formats_pair):
1557 format_1, format_2 = formats_pair
1558
1559 formats_info = []
1560 formats_info.extend(format_1.get('requested_formats', (format_1,)))
1561 formats_info.extend(format_2.get('requested_formats', (format_2,)))
1562
1563 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
1564 get_no_more = {"video": False, "audio": False}
1565 for (i, fmt_info) in enumerate(formats_info):
1566 for aud_vid in ["audio", "video"]:
1567 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
1568 if get_no_more[aud_vid]:
1569 formats_info.pop(i)
1570 get_no_more[aud_vid] = True
1571
1572 if len(formats_info) == 1:
1573 return formats_info[0]
1574
1575 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
1576 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
1577
1578 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
1579 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
1580
1581 output_ext = self.params.get('merge_output_format')
1582 if not output_ext:
1583 if the_only_video:
1584 output_ext = the_only_video['ext']
1585 elif the_only_audio and not video_fmts:
1586 output_ext = the_only_audio['ext']
1587 else:
1588 output_ext = 'mkv'
1589
1590 new_dict = {
1591 'requested_formats': formats_info,
1592 'format': '+'.join(fmt_info.get('format') for fmt_info in formats_info),
1593 'format_id': '+'.join(fmt_info.get('format_id') for fmt_info in formats_info),
1594 'ext': output_ext,
1595 }
1596
1597 if the_only_video:
1598 new_dict.update({
1599 'width': the_only_video.get('width'),
1600 'height': the_only_video.get('height'),
1601 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
1602 'fps': the_only_video.get('fps'),
1603 'vcodec': the_only_video.get('vcodec'),
1604 'vbr': the_only_video.get('vbr'),
1605 'stretched_ratio': the_only_video.get('stretched_ratio'),
1606 })
1607
1608 if the_only_audio:
1609 new_dict.update({
1610 'acodec': the_only_audio.get('acodec'),
1611 'abr': the_only_audio.get('abr'),
1612 })
1613
1614 return new_dict
1615
1616 def _build_selector_function(selector):
1617 if isinstance(selector, list): # ,
1618 fs = [_build_selector_function(s) for s in selector]
1619
1620 def selector_function(ctx):
1621 for f in fs:
1622 for format in f(ctx):
1623 yield format
1624 return selector_function
1625
1626 elif selector.type == GROUP: # ()
1627 selector_function = _build_selector_function(selector.selector)
1628
1629 elif selector.type == PICKFIRST: # /
1630 fs = [_build_selector_function(s) for s in selector.selector]
1631
1632 def selector_function(ctx):
1633 for f in fs:
1634 picked_formats = list(f(ctx))
1635 if picked_formats:
1636 return picked_formats
1637 return []
1638
1639 elif selector.type == SINGLE: # atom
1640 format_spec = (selector.selector if selector.selector is not None else 'best').lower()
1641
1642 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
1643 if format_spec == 'all':
1644 def selector_function(ctx):
1645 formats = list(ctx['formats'])
1646 if formats:
1647 for f in formats:
1648 yield f
1649 elif format_spec == 'mergeall':
1650 def selector_function(ctx):
1651 formats = list(ctx['formats'])
1652 merged_format = formats[0]
1653 for f in formats[1:]:
1654 merged_format = _merge((merged_format, f))
1655 yield merged_format
1656
1657 else:
1658 format_fallback = False
1659 mobj = re.match(
1660 r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
1661 format_spec)
1662 if mobj is not None:
1663 format_idx = int_or_none(mobj.group('n'), default=1)
1664 format_idx = format_idx - 1 if mobj.group('bw')[0] == 'w' else -format_idx
1665 format_type = (mobj.group('type') or [None])[0]
1666 not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
1667 format_modified = mobj.group('mod') is not None
1668
1669 format_fallback = not format_type and not format_modified # for b, w
1670 filter_f = (
1671 (lambda f: f.get('%scodec' % format_type) != 'none')
1672 if format_type and format_modified # bv*, ba*, wv*, wa*
1673 else (lambda f: f.get('%scodec' % not_format_type) == 'none')
1674 if format_type # bv, ba, wv, wa
1675 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
1676 if not format_modified # b, w
1677 else None) # b*, w*
1678 else:
1679 format_idx = -1
1680 filter_f = ((lambda f: f.get('ext') == format_spec)
1681 if format_spec in ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] # extension
1682 else (lambda f: f.get('format_id') == format_spec)) # id
1683
1684 def selector_function(ctx):
1685 formats = list(ctx['formats'])
1686 if not formats:
1687 return
1688 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
1689 n = len(matches)
1690 if -n <= format_idx < n:
1691 yield matches[format_idx]
1692 elif format_fallback and ctx['incomplete_formats']:
1693 # for extractors with incomplete formats (audio only (soundcloud)
1694 # or video only (imgur)) best/worst will fallback to
1695 # best/worst {video,audio}-only format
1696 n = len(formats)
1697 if -n <= format_idx < n:
1698 yield formats[format_idx]
1699
1700 elif selector.type == MERGE: # +
1701 selector_1, selector_2 = map(_build_selector_function, selector.selector)
1702
1703 def selector_function(ctx):
1704 for pair in itertools.product(
1705 selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
1706 yield _merge(pair)
1707
1708 filters = [self._build_format_filter(f) for f in selector.filters]
1709
1710 def final_selector(ctx):
1711 ctx_copy = copy.deepcopy(ctx)
1712 for _filter in filters:
1713 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1714 return selector_function(ctx_copy)
1715 return final_selector
1716
1717 stream = io.BytesIO(format_spec.encode('utf-8'))
1718 try:
1719 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
1720 except tokenize.TokenError:
1721 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1722
1723 class TokenIterator(object):
1724 def __init__(self, tokens):
1725 self.tokens = tokens
1726 self.counter = 0
1727
1728 def __iter__(self):
1729 return self
1730
1731 def __next__(self):
1732 if self.counter >= len(self.tokens):
1733 raise StopIteration()
1734 value = self.tokens[self.counter]
1735 self.counter += 1
1736 return value
1737
1738 next = __next__
1739
1740 def restore_last_token(self):
1741 self.counter -= 1
1742
1743 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
1744 return _build_selector_function(parsed_selector)
1745
1746 def _calc_headers(self, info_dict):
1747 res = std_headers.copy()
1748
1749 add_headers = info_dict.get('http_headers')
1750 if add_headers:
1751 res.update(add_headers)
1752
1753 cookies = self._calc_cookies(info_dict)
1754 if cookies:
1755 res['Cookie'] = cookies
1756
1757 if 'X-Forwarded-For' not in res:
1758 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
1759 if x_forwarded_for_ip:
1760 res['X-Forwarded-For'] = x_forwarded_for_ip
1761
1762 return res
1763
1764 def _calc_cookies(self, info_dict):
1765 pr = sanitized_Request(info_dict['url'])
1766 self.cookiejar.add_cookie_header(pr)
1767 return pr.get_header('Cookie')
1768
1769 def process_video_result(self, info_dict, download=True):
1770 assert info_dict.get('_type', 'video') == 'video'
1771
1772 if 'id' not in info_dict:
1773 raise ExtractorError('Missing "id" field in extractor result')
1774 if 'title' not in info_dict:
1775 raise ExtractorError('Missing "title" field in extractor result')
1776
1777 def report_force_conversion(field, field_not, conversion):
1778 self.report_warning(
1779 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
1780 % (field, field_not, conversion))
1781
1782 def sanitize_string_field(info, string_field):
1783 field = info.get(string_field)
1784 if field is None or isinstance(field, compat_str):
1785 return
1786 report_force_conversion(string_field, 'a string', 'string')
1787 info[string_field] = compat_str(field)
1788
1789 def sanitize_numeric_fields(info):
1790 for numeric_field in self._NUMERIC_FIELDS:
1791 field = info.get(numeric_field)
1792 if field is None or isinstance(field, compat_numeric_types):
1793 continue
1794 report_force_conversion(numeric_field, 'numeric', 'int')
1795 info[numeric_field] = int_or_none(field)
1796
1797 sanitize_string_field(info_dict, 'id')
1798 sanitize_numeric_fields(info_dict)
1799
1800 if 'playlist' not in info_dict:
1801 # It isn't part of a playlist
1802 info_dict['playlist'] = None
1803 info_dict['playlist_index'] = None
1804
1805 thumbnails = info_dict.get('thumbnails')
1806 if thumbnails is None:
1807 thumbnail = info_dict.get('thumbnail')
1808 if thumbnail:
1809 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
1810 if thumbnails:
1811 thumbnails.sort(key=lambda t: (
1812 t.get('preference') if t.get('preference') is not None else -1,
1813 t.get('width') if t.get('width') is not None else -1,
1814 t.get('height') if t.get('height') is not None else -1,
1815 t.get('id') if t.get('id') is not None else '', t.get('url')))
1816 for i, t in enumerate(thumbnails):
1817 t['url'] = sanitize_url(t['url'])
1818 if t.get('width') and t.get('height'):
1819 t['resolution'] = '%dx%d' % (t['width'], t['height'])
1820 if t.get('id') is None:
1821 t['id'] = '%d' % i
1822
1823 if self.params.get('list_thumbnails'):
1824 self.list_thumbnails(info_dict)
1825 return
1826
1827 thumbnail = info_dict.get('thumbnail')
1828 if thumbnail:
1829 info_dict['thumbnail'] = sanitize_url(thumbnail)
1830 elif thumbnails:
1831 info_dict['thumbnail'] = thumbnails[-1]['url']
1832
1833 if 'display_id' not in info_dict and 'id' in info_dict:
1834 info_dict['display_id'] = info_dict['id']
1835
1836 for ts_key, date_key in (
1837 ('timestamp', 'upload_date'),
1838 ('release_timestamp', 'release_date'),
1839 ):
1840 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
1841 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
1842 # see http://bugs.python.org/issue1646728)
1843 try:
1844 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
1845 info_dict[date_key] = upload_date.strftime('%Y%m%d')
1846 except (ValueError, OverflowError, OSError):
1847 pass
1848
1849 # Auto generate title fields corresponding to the *_number fields when missing
1850 # in order to always have clean titles. This is very common for TV series.
1851 for field in ('chapter', 'season', 'episode'):
1852 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
1853 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
1854
1855 for cc_kind in ('subtitles', 'automatic_captions'):
1856 cc = info_dict.get(cc_kind)
1857 if cc:
1858 for _, subtitle in cc.items():
1859 for subtitle_format in subtitle:
1860 if subtitle_format.get('url'):
1861 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
1862 if subtitle_format.get('ext') is None:
1863 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
1864
1865 automatic_captions = info_dict.get('automatic_captions')
1866 subtitles = info_dict.get('subtitles')
1867
1868 if self.params.get('listsubtitles', False):
1869 if 'automatic_captions' in info_dict:
1870 self.list_subtitles(
1871 info_dict['id'], automatic_captions, 'automatic captions')
1872 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
1873 return
1874
1875 info_dict['requested_subtitles'] = self.process_subtitles(
1876 info_dict['id'], subtitles, automatic_captions)
1877
1878 # We now pick which formats have to be downloaded
1879 if info_dict.get('formats') is None:
1880 # There's only one format available
1881 formats = [info_dict]
1882 else:
1883 formats = info_dict['formats']
1884
1885 if not formats:
1886 raise ExtractorError('No video formats found!')
1887
1888 def is_wellformed(f):
1889 url = f.get('url')
1890 if not url:
1891 self.report_warning(
1892 '"url" field is missing or empty - skipping format, '
1893 'there is an error in extractor')
1894 return False
1895 if isinstance(url, bytes):
1896 sanitize_string_field(f, 'url')
1897 return True
1898
1899 # Filter out malformed formats for better extraction robustness
1900 formats = list(filter(is_wellformed, formats))
1901
1902 formats_dict = {}
1903
1904 # We check that all the formats have the format and format_id fields
1905 for i, format in enumerate(formats):
1906 sanitize_string_field(format, 'format_id')
1907 sanitize_numeric_fields(format)
1908 format['url'] = sanitize_url(format['url'])
1909 if not format.get('format_id'):
1910 format['format_id'] = compat_str(i)
1911 else:
1912 # Sanitize format_id from characters used in format selector expression
1913 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
1914 format_id = format['format_id']
1915 if format_id not in formats_dict:
1916 formats_dict[format_id] = []
1917 formats_dict[format_id].append(format)
1918
1919 # Make sure all formats have unique format_id
1920 for format_id, ambiguous_formats in formats_dict.items():
1921 if len(ambiguous_formats) > 1:
1922 for i, format in enumerate(ambiguous_formats):
1923 format['format_id'] = '%s-%d' % (format_id, i)
1924
1925 for i, format in enumerate(formats):
1926 if format.get('format') is None:
1927 format['format'] = '{id} - {res}{note}'.format(
1928 id=format['format_id'],
1929 res=self.format_resolution(format),
1930 note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
1931 )
1932 # Automatically determine file extension if missing
1933 if format.get('ext') is None:
1934 format['ext'] = determine_ext(format['url']).lower()
1935 # Automatically determine protocol if missing (useful for format
1936 # selection purposes)
1937 if format.get('protocol') is None:
1938 format['protocol'] = determine_protocol(format)
1939 # Add HTTP headers, so that external programs can use them from the
1940 # json output
1941 full_format_info = info_dict.copy()
1942 full_format_info.update(format)
1943 format['http_headers'] = self._calc_headers(full_format_info)
1944 # Remove private housekeeping stuff
1945 if '__x_forwarded_for_ip' in info_dict:
1946 del info_dict['__x_forwarded_for_ip']
1947
1948 # TODO Central sorting goes here
1949
1950 if formats[0] is not info_dict:
1951 # only set the 'formats' fields if the original info_dict list them
1952 # otherwise we end up with a circular reference, the first (and unique)
1953 # element in the 'formats' field in info_dict is info_dict itself,
1954 # which can't be exported to json
1955 info_dict['formats'] = formats
1956 if self.params.get('listformats'):
1957 self.list_formats(info_dict)
1958 return
1959
1960 req_format = self.params.get('format')
1961 if req_format is None:
1962 req_format = self._default_format_spec(info_dict, download=download)
1963 if self.params.get('verbose'):
1964 self.to_screen('[debug] Default format spec: %s' % req_format)
1965
1966 format_selector = self.build_format_selector(req_format)
1967
1968 # While in format selection we may need to have an access to the original
1969 # format set in order to calculate some metrics or do some processing.
1970 # For now we need to be able to guess whether original formats provided
1971 # by extractor are incomplete or not (i.e. whether extractor provides only
1972 # video-only or audio-only formats) for proper formats selection for
1973 # extractors with such incomplete formats (see
1974 # https://github.com/ytdl-org/youtube-dl/pull/5556).
1975 # Since formats may be filtered during format selection and may not match
1976 # the original formats the results may be incorrect. Thus original formats
1977 # or pre-calculated metrics should be passed to format selection routines
1978 # as well.
1979 # We will pass a context object containing all necessary additional data
1980 # instead of just formats.
1981 # This fixes incorrect format selection issue (see
1982 # https://github.com/ytdl-org/youtube-dl/issues/10083).
1983 incomplete_formats = (
1984 # All formats are video-only or
1985 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
1986 # all formats are audio-only
1987 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
1988
1989 ctx = {
1990 'formats': formats,
1991 'incomplete_formats': incomplete_formats,
1992 }
1993
1994 formats_to_download = list(format_selector(ctx))
1995 if not formats_to_download:
1996 raise ExtractorError('requested format not available',
1997 expected=True)
1998
1999 if download:
2000 self.to_screen('[info] Downloading format(s) %s' % ", ".join([f['format_id'] for f in formats_to_download]))
2001 if len(formats_to_download) > 1:
2002 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
2003 for format in formats_to_download:
2004 new_info = dict(info_dict)
2005 new_info.update(format)
2006 self.process_info(new_info)
2007 # We update the info dict with the best quality format (backwards compatibility)
2008 info_dict.update(formats_to_download[-1])
2009 return info_dict
2010
2011 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
2012 """Select the requested subtitles and their format"""
2013 available_subs = {}
2014 if normal_subtitles and self.params.get('writesubtitles'):
2015 available_subs.update(normal_subtitles)
2016 if automatic_captions and self.params.get('writeautomaticsub'):
2017 for lang, cap_info in automatic_captions.items():
2018 if lang not in available_subs:
2019 available_subs[lang] = cap_info
2020
2021 if (not self.params.get('writesubtitles') and not
2022 self.params.get('writeautomaticsub') or not
2023 available_subs):
2024 return None
2025
2026 if self.params.get('allsubtitles', False):
2027 requested_langs = available_subs.keys()
2028 else:
2029 if self.params.get('subtitleslangs', False):
2030 requested_langs = self.params.get('subtitleslangs')
2031 elif 'en' in available_subs:
2032 requested_langs = ['en']
2033 else:
2034 requested_langs = [list(available_subs.keys())[0]]
2035
2036 formats_query = self.params.get('subtitlesformat', 'best')
2037 formats_preference = formats_query.split('/') if formats_query else []
2038 subs = {}
2039 for lang in requested_langs:
2040 formats = available_subs.get(lang)
2041 if formats is None:
2042 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
2043 continue
2044 for ext in formats_preference:
2045 if ext == 'best':
2046 f = formats[-1]
2047 break
2048 matches = list(filter(lambda f: f['ext'] == ext, formats))
2049 if matches:
2050 f = matches[-1]
2051 break
2052 else:
2053 f = formats[-1]
2054 self.report_warning(
2055 'No subtitle format found matching "%s" for language %s, '
2056 'using %s' % (formats_query, lang, f['ext']))
2057 subs[lang] = f
2058 return subs
2059
2060 def __forced_printings(self, info_dict, filename, incomplete):
2061 def print_mandatory(field):
2062 if (self.params.get('force%s' % field, False)
2063 and (not incomplete or info_dict.get(field) is not None)):
2064 self.to_stdout(info_dict[field])
2065
2066 def print_optional(field):
2067 if (self.params.get('force%s' % field, False)
2068 and info_dict.get(field) is not None):
2069 self.to_stdout(info_dict[field])
2070
2071 print_mandatory('title')
2072 print_mandatory('id')
2073 if self.params.get('forceurl', False) and not incomplete:
2074 if info_dict.get('requested_formats') is not None:
2075 for f in info_dict['requested_formats']:
2076 self.to_stdout(f['url'] + f.get('play_path', ''))
2077 else:
2078 # For RTMP URLs, also include the playpath
2079 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
2080 print_optional('thumbnail')
2081 print_optional('description')
2082 if self.params.get('forcefilename', False) and filename is not None:
2083 self.to_stdout(filename)
2084 if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
2085 self.to_stdout(formatSeconds(info_dict['duration']))
2086 print_mandatory('format')
2087 if self.params.get('forcejson', False):
2088 self.post_extract(info_dict)
2089 self.to_stdout(json.dumps(info_dict, default=repr))
2090
2091 def process_info(self, info_dict):
2092 """Process a single resolved IE result."""
2093
2094 assert info_dict.get('_type', 'video') == 'video'
2095
2096 info_dict.setdefault('__postprocessors', [])
2097
2098 max_downloads = self.params.get('max_downloads')
2099 if max_downloads is not None:
2100 if self._num_downloads >= int(max_downloads):
2101 raise MaxDownloadsReached()
2102
2103 # TODO: backward compatibility, to be removed
2104 info_dict['fulltitle'] = info_dict['title']
2105
2106 if 'format' not in info_dict:
2107 info_dict['format'] = info_dict['ext']
2108
2109 if self._match_entry(info_dict, incomplete=False) is not None:
2110 return
2111
2112 self.post_extract(info_dict)
2113 self._num_downloads += 1
2114
2115 info_dict = self.pre_process(info_dict)
2116
2117 # info_dict['_filename'] needs to be set for backward compatibility
2118 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2119 temp_filename = self.prepare_filename(info_dict, 'temp')
2120 files_to_move = {}
2121 skip_dl = self.params.get('skip_download', False)
2122
2123 # Forced printings
2124 self.__forced_printings(info_dict, full_filename, incomplete=False)
2125
2126 if self.params.get('simulate', False):
2127 if self.params.get('force_write_download_archive', False):
2128 self.record_download_archive(info_dict)
2129
2130 # Do nothing else if in simulate mode
2131 return
2132
2133 if full_filename is None:
2134 return
2135
2136 if not self._ensure_dir_exists(encodeFilename(full_filename)):
2137 return
2138 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
2139 return
2140
2141 if self.params.get('writedescription', False):
2142 descfn = self.prepare_filename(info_dict, 'description')
2143 if not self._ensure_dir_exists(encodeFilename(descfn)):
2144 return
2145 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
2146 self.to_screen('[info] Video description is already present')
2147 elif info_dict.get('description') is None:
2148 self.report_warning('There\'s no description to write.')
2149 else:
2150 try:
2151 self.to_screen('[info] Writing video description to: ' + descfn)
2152 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
2153 descfile.write(info_dict['description'])
2154 except (OSError, IOError):
2155 self.report_error('Cannot write description file ' + descfn)
2156 return
2157
2158 if self.params.get('writeannotations', False):
2159 annofn = self.prepare_filename(info_dict, 'annotation')
2160 if not self._ensure_dir_exists(encodeFilename(annofn)):
2161 return
2162 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
2163 self.to_screen('[info] Video annotations are already present')
2164 elif not info_dict.get('annotations'):
2165 self.report_warning('There are no annotations to write.')
2166 else:
2167 try:
2168 self.to_screen('[info] Writing video annotations to: ' + annofn)
2169 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2170 annofile.write(info_dict['annotations'])
2171 except (KeyError, TypeError):
2172 self.report_warning('There are no annotations to write.')
2173 except (OSError, IOError):
2174 self.report_error('Cannot write annotations file: ' + annofn)
2175 return
2176
2177 def dl(name, info, subtitle=False):
2178 fd = get_suitable_downloader(info, self.params)(self, self.params)
2179 for ph in self._progress_hooks:
2180 fd.add_progress_hook(ph)
2181 if self.params.get('verbose'):
2182 self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
2183 new_info = dict(info)
2184 if new_info.get('http_headers') is None:
2185 new_info['http_headers'] = self._calc_headers(new_info)
2186 return fd.download(name, new_info, subtitle)
2187
2188 subtitles_are_requested = any([self.params.get('writesubtitles', False),
2189 self.params.get('writeautomaticsub')])
2190
2191 if subtitles_are_requested and info_dict.get('requested_subtitles'):
2192 # subtitles download errors are already managed as troubles in relevant IE
2193 # that way it will silently go on when used with unsupporting IE
2194 subtitles = info_dict['requested_subtitles']
2195 # ie = self.get_info_extractor(info_dict['extractor_key'])
2196 for sub_lang, sub_info in subtitles.items():
2197 sub_format = sub_info['ext']
2198 sub_fn = self.prepare_filename(info_dict, 'subtitle')
2199 sub_filename = subtitles_filename(
2200 temp_filename if not skip_dl else sub_fn,
2201 sub_lang, sub_format, info_dict.get('ext'))
2202 sub_filename_final = subtitles_filename(sub_fn, sub_lang, sub_format, info_dict.get('ext'))
2203 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(sub_filename)):
2204 self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
2205 sub_info['filepath'] = sub_filename
2206 files_to_move[sub_filename] = sub_filename_final
2207 else:
2208 self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
2209 if sub_info.get('data') is not None:
2210 try:
2211 # Use newline='' to prevent conversion of newline characters
2212 # See https://github.com/ytdl-org/youtube-dl/issues/10268
2213 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
2214 subfile.write(sub_info['data'])
2215 sub_info['filepath'] = sub_filename
2216 files_to_move[sub_filename] = sub_filename_final
2217 except (OSError, IOError):
2218 self.report_error('Cannot write subtitles file ' + sub_filename)
2219 return
2220 else:
2221 try:
2222 dl(sub_filename, sub_info.copy(), subtitle=True)
2223 sub_info['filepath'] = sub_filename
2224 files_to_move[sub_filename] = sub_filename_final
2225 except (ExtractorError, IOError, OSError, ValueError, compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
2226 self.report_warning('Unable to download subtitle for "%s": %s' %
2227 (sub_lang, error_to_compat_str(err)))
2228 continue
2229
2230 if skip_dl:
2231 if self.params.get('convertsubtitles', False):
2232 # subconv = FFmpegSubtitlesConvertorPP(self, format=self.params.get('convertsubtitles'))
2233 filename_real_ext = os.path.splitext(full_filename)[1][1:]
2234 filename_wo_ext = (
2235 os.path.splitext(full_filename)[0]
2236 if filename_real_ext == info_dict['ext']
2237 else full_filename)
2238 afilename = '%s.%s' % (filename_wo_ext, self.params.get('convertsubtitles'))
2239 # if subconv.available:
2240 # info_dict['__postprocessors'].append(subconv)
2241 if os.path.exists(encodeFilename(afilename)):
2242 self.to_screen(
2243 '[download] %s has already been downloaded and '
2244 'converted' % afilename)
2245 else:
2246 try:
2247 self.post_process(full_filename, info_dict, files_to_move)
2248 except PostProcessingError as err:
2249 self.report_error('Postprocessing: %s' % str(err))
2250 return
2251
2252 if self.params.get('writeinfojson', False):
2253 infofn = self.prepare_filename(info_dict, 'infojson')
2254 if not self._ensure_dir_exists(encodeFilename(infofn)):
2255 return
2256 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
2257 self.to_screen('[info] Video metadata is already present')
2258 else:
2259 self.to_screen('[info] Writing video metadata as JSON to: ' + infofn)
2260 try:
2261 write_json_file(self.filter_requested_info(info_dict, self.params.get('clean_infojson', True)), infofn)
2262 except (OSError, IOError):
2263 self.report_error('Cannot write video metadata to JSON file ' + infofn)
2264 return
2265 info_dict['__infojson_filename'] = infofn
2266
2267 thumbfn = self.prepare_filename(info_dict, 'thumbnail')
2268 thumb_fn_temp = temp_filename if not skip_dl else thumbfn
2269 for thumb_ext in self._write_thumbnails(info_dict, thumb_fn_temp):
2270 thumb_filename_temp = replace_extension(thumb_fn_temp, thumb_ext, info_dict.get('ext'))
2271 thumb_filename = replace_extension(thumbfn, thumb_ext, info_dict.get('ext'))
2272 files_to_move[thumb_filename_temp] = thumb_filename
2273
2274 # Write internet shortcut files
2275 url_link = webloc_link = desktop_link = False
2276 if self.params.get('writelink', False):
2277 if sys.platform == "darwin": # macOS.
2278 webloc_link = True
2279 elif sys.platform.startswith("linux"):
2280 desktop_link = True
2281 else: # if sys.platform in ['win32', 'cygwin']:
2282 url_link = True
2283 if self.params.get('writeurllink', False):
2284 url_link = True
2285 if self.params.get('writewebloclink', False):
2286 webloc_link = True
2287 if self.params.get('writedesktoplink', False):
2288 desktop_link = True
2289
2290 if url_link or webloc_link or desktop_link:
2291 if 'webpage_url' not in info_dict:
2292 self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
2293 return
2294 ascii_url = iri_to_uri(info_dict['webpage_url'])
2295
2296 def _write_link_file(extension, template, newline, embed_filename):
2297 linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
2298 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
2299 self.to_screen('[info] Internet shortcut is already present')
2300 else:
2301 try:
2302 self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
2303 with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
2304 template_vars = {'url': ascii_url}
2305 if embed_filename:
2306 template_vars['filename'] = linkfn[:-(len(extension) + 1)]
2307 linkfile.write(template % template_vars)
2308 except (OSError, IOError):
2309 self.report_error('Cannot write internet shortcut ' + linkfn)
2310 return False
2311 return True
2312
2313 if url_link:
2314 if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
2315 return
2316 if webloc_link:
2317 if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
2318 return
2319 if desktop_link:
2320 if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
2321 return
2322
2323 # Download
2324 must_record_download_archive = False
2325 if not skip_dl:
2326 try:
2327
2328 def existing_file(*filepaths):
2329 ext = info_dict.get('ext')
2330 final_ext = self.params.get('final_ext', ext)
2331 existing_files = []
2332 for file in orderedSet(filepaths):
2333 if final_ext != ext:
2334 converted = replace_extension(file, final_ext, ext)
2335 if os.path.exists(encodeFilename(converted)):
2336 existing_files.append(converted)
2337 if os.path.exists(encodeFilename(file)):
2338 existing_files.append(file)
2339
2340 if not existing_files or self.params.get('overwrites', False):
2341 for file in orderedSet(existing_files):
2342 self.report_file_delete(file)
2343 os.remove(encodeFilename(file))
2344 return None
2345
2346 self.report_file_already_downloaded(existing_files[0])
2347 info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
2348 return existing_files[0]
2349
2350 success = True
2351 if info_dict.get('requested_formats') is not None:
2352 downloaded = []
2353 merger = FFmpegMergerPP(self)
2354 if self.params.get('allow_unplayable_formats'):
2355 self.report_warning(
2356 'You have requested merging of multiple formats '
2357 'while also allowing unplayable formats to be downloaded. '
2358 'The formats won\'t be merged to prevent data corruption.')
2359 elif not merger.available:
2360 self.report_warning(
2361 'You have requested merging of multiple formats but ffmpeg is not installed. '
2362 'The formats won\'t be merged.')
2363
2364 def compatible_formats(formats):
2365 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
2366 video_formats = [format for format in formats if format.get('vcodec') != 'none']
2367 audio_formats = [format for format in formats if format.get('acodec') != 'none']
2368 if len(video_formats) > 2 or len(audio_formats) > 2:
2369 return False
2370
2371 # Check extension
2372 exts = set(format.get('ext') for format in formats)
2373 COMPATIBLE_EXTS = (
2374 set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
2375 set(('webm',)),
2376 )
2377 for ext_sets in COMPATIBLE_EXTS:
2378 if ext_sets.issuperset(exts):
2379 return True
2380 # TODO: Check acodec/vcodec
2381 return False
2382
2383 requested_formats = info_dict['requested_formats']
2384 old_ext = info_dict['ext']
2385 if self.params.get('merge_output_format') is None:
2386 if not compatible_formats(requested_formats):
2387 info_dict['ext'] = 'mkv'
2388 self.report_warning(
2389 'Requested formats are incompatible for merge and will be merged into mkv.')
2390 if (info_dict['ext'] == 'webm'
2391 and self.params.get('writethumbnail', False)
2392 and info_dict.get('thumbnails')):
2393 info_dict['ext'] = 'mkv'
2394 self.report_warning(
2395 'webm doesn\'t support embedding a thumbnail, mkv will be used.')
2396
2397 def correct_ext(filename):
2398 filename_real_ext = os.path.splitext(filename)[1][1:]
2399 filename_wo_ext = (
2400 os.path.splitext(filename)[0]
2401 if filename_real_ext == old_ext
2402 else filename)
2403 return '%s.%s' % (filename_wo_ext, info_dict['ext'])
2404
2405 # Ensure filename always has a correct extension for successful merge
2406 full_filename = correct_ext(full_filename)
2407 temp_filename = correct_ext(temp_filename)
2408 dl_filename = existing_file(full_filename, temp_filename)
2409 info_dict['__real_download'] = False
2410 if dl_filename is None:
2411 for f in requested_formats:
2412 new_info = dict(info_dict)
2413 new_info.update(f)
2414 fname = prepend_extension(
2415 self.prepare_filename(new_info, 'temp'),
2416 'f%s' % f['format_id'], new_info['ext'])
2417 if not self._ensure_dir_exists(fname):
2418 return
2419 downloaded.append(fname)
2420 partial_success, real_download = dl(fname, new_info)
2421 info_dict['__real_download'] = info_dict['__real_download'] or real_download
2422 success = success and partial_success
2423 if merger.available and not self.params.get('allow_unplayable_formats'):
2424 info_dict['__postprocessors'].append(merger)
2425 info_dict['__files_to_merge'] = downloaded
2426 # Even if there were no downloads, it is being merged only now
2427 info_dict['__real_download'] = True
2428 else:
2429 for file in downloaded:
2430 files_to_move[file] = None
2431 else:
2432 # Just a single file
2433 dl_filename = existing_file(full_filename, temp_filename)
2434 if dl_filename is None:
2435 success, real_download = dl(temp_filename, info_dict)
2436 info_dict['__real_download'] = real_download
2437
2438 dl_filename = dl_filename or temp_filename
2439 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2440
2441 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
2442 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
2443 return
2444 except (OSError, IOError) as err:
2445 raise UnavailableVideoError(err)
2446 except (ContentTooShortError, ) as err:
2447 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
2448 return
2449
2450 if success and full_filename != '-':
2451 # Fixup content
2452 fixup_policy = self.params.get('fixup')
2453 if fixup_policy is None:
2454 fixup_policy = 'detect_or_warn'
2455
2456 INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg to fix this automatically.'
2457
2458 stretched_ratio = info_dict.get('stretched_ratio')
2459 if stretched_ratio is not None and stretched_ratio != 1:
2460 if fixup_policy == 'warn':
2461 self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
2462 info_dict['id'], stretched_ratio))
2463 elif fixup_policy == 'detect_or_warn':
2464 stretched_pp = FFmpegFixupStretchedPP(self)
2465 if stretched_pp.available:
2466 info_dict['__postprocessors'].append(stretched_pp)
2467 else:
2468 self.report_warning(
2469 '%s: Non-uniform pixel ratio (%s). %s'
2470 % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
2471 else:
2472 assert fixup_policy in ('ignore', 'never')
2473
2474 if (info_dict.get('requested_formats') is None
2475 and info_dict.get('container') == 'm4a_dash'
2476 and info_dict.get('ext') == 'm4a'):
2477 if fixup_policy == 'warn':
2478 self.report_warning(
2479 '%s: writing DASH m4a. '
2480 'Only some players support this container.'
2481 % info_dict['id'])
2482 elif fixup_policy == 'detect_or_warn':
2483 fixup_pp = FFmpegFixupM4aPP(self)
2484 if fixup_pp.available:
2485 info_dict['__postprocessors'].append(fixup_pp)
2486 else:
2487 self.report_warning(
2488 '%s: writing DASH m4a. '
2489 'Only some players support this container. %s'
2490 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
2491 else:
2492 assert fixup_policy in ('ignore', 'never')
2493
2494 if ('protocol' in info_dict
2495 and get_suitable_downloader(info_dict, self.params).__name__ == 'HlsFD'):
2496 if fixup_policy == 'warn':
2497 self.report_warning('%s: malformed AAC bitstream detected.' % (
2498 info_dict['id']))
2499 elif fixup_policy == 'detect_or_warn':
2500 fixup_pp = FFmpegFixupM3u8PP(self)
2501 if fixup_pp.available:
2502 info_dict['__postprocessors'].append(fixup_pp)
2503 else:
2504 self.report_warning(
2505 '%s: malformed AAC bitstream detected. %s'
2506 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
2507 else:
2508 assert fixup_policy in ('ignore', 'never')
2509
2510 try:
2511 info_dict = self.post_process(dl_filename, info_dict, files_to_move)
2512 except PostProcessingError as err:
2513 self.report_error('Postprocessing: %s' % str(err))
2514 return
2515 try:
2516 for ph in self._post_hooks:
2517 ph(info_dict['filepath'])
2518 except Exception as err:
2519 self.report_error('post hooks: %s' % str(err))
2520 return
2521 must_record_download_archive = True
2522
2523 if must_record_download_archive or self.params.get('force_write_download_archive', False):
2524 self.record_download_archive(info_dict)
2525 max_downloads = self.params.get('max_downloads')
2526 if max_downloads is not None and self._num_downloads >= int(max_downloads):
2527 raise MaxDownloadsReached()
2528
2529 def download(self, url_list):
2530 """Download a given list of URLs."""
2531 outtmpl = self.outtmpl_dict['default']
2532 if (len(url_list) > 1
2533 and outtmpl != '-'
2534 and '%' not in outtmpl
2535 and self.params.get('max_downloads') != 1):
2536 raise SameFileError(outtmpl)
2537
2538 for url in url_list:
2539 try:
2540 # It also downloads the videos
2541 res = self.extract_info(
2542 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
2543 except UnavailableVideoError:
2544 self.report_error('unable to download video')
2545 except MaxDownloadsReached:
2546 self.to_screen('[info] Maximum number of downloaded files reached')
2547 raise
2548 except ExistingVideoReached:
2549 self.to_screen('[info] Encountered a file that is already in the archive, stopping due to --break-on-existing')
2550 raise
2551 except RejectedVideoReached:
2552 self.to_screen('[info] Encountered a file that did not match filter, stopping due to --break-on-reject')
2553 raise
2554 else:
2555 if self.params.get('dump_single_json', False):
2556 self.post_extract(res)
2557 self.to_stdout(json.dumps(res, default=repr))
2558
2559 return self._download_retcode
2560
2561 def download_with_info_file(self, info_filename):
2562 with contextlib.closing(fileinput.FileInput(
2563 [info_filename], mode='r',
2564 openhook=fileinput.hook_encoded('utf-8'))) as f:
2565 # FileInput doesn't have a read method, we can't call json.load
2566 info = self.filter_requested_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
2567 try:
2568 self.process_ie_result(info, download=True)
2569 except (DownloadError, EntryNotInPlaylist):
2570 webpage_url = info.get('webpage_url')
2571 if webpage_url is not None:
2572 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
2573 return self.download([webpage_url])
2574 else:
2575 raise
2576 return self._download_retcode
2577
2578 @staticmethod
2579 def filter_requested_info(info_dict, actually_filter=True):
2580 if not actually_filter:
2581 info_dict['epoch'] = int(time.time())
2582 return info_dict
2583 exceptions = {
2584 'remove': ['requested_formats', 'requested_subtitles', 'requested_entries', 'filepath', 'entries'],
2585 'keep': ['_type'],
2586 }
2587 keep_key = lambda k: k in exceptions['keep'] or not (k.startswith('_') or k in exceptions['remove'])
2588 filter_fn = lambda obj: (
2589 list(map(filter_fn, obj)) if isinstance(obj, (list, tuple))
2590 else obj if not isinstance(obj, dict)
2591 else dict((k, filter_fn(v)) for k, v in obj.items() if keep_key(k)))
2592 return filter_fn(info_dict)
2593
2594 def run_pp(self, pp, infodict):
2595 files_to_delete = []
2596 if '__files_to_move' not in infodict:
2597 infodict['__files_to_move'] = {}
2598 files_to_delete, infodict = pp.run(infodict)
2599 if not files_to_delete:
2600 return infodict
2601
2602 if self.params.get('keepvideo', False):
2603 for f in files_to_delete:
2604 infodict['__files_to_move'].setdefault(f, '')
2605 else:
2606 for old_filename in set(files_to_delete):
2607 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2608 try:
2609 os.remove(encodeFilename(old_filename))
2610 except (IOError, OSError):
2611 self.report_warning('Unable to remove downloaded original file')
2612 if old_filename in infodict['__files_to_move']:
2613 del infodict['__files_to_move'][old_filename]
2614 return infodict
2615
2616 @staticmethod
2617 def post_extract(info_dict):
2618 def actual_post_extract(info_dict):
2619 if info_dict.get('_type') in ('playlist', 'multi_video'):
2620 for video_dict in info_dict.get('entries', {}):
2621 actual_post_extract(video_dict or {})
2622 return
2623
2624 if '__post_extractor' not in info_dict:
2625 return
2626 post_extractor = info_dict['__post_extractor']
2627 if post_extractor:
2628 info_dict.update(post_extractor().items())
2629 del info_dict['__post_extractor']
2630 return
2631
2632 actual_post_extract(info_dict or {})
2633
2634 def pre_process(self, ie_info):
2635 info = dict(ie_info)
2636 for pp in self._pps['beforedl']:
2637 info = self.run_pp(pp, info)
2638 return info
2639
2640 def post_process(self, filename, ie_info, files_to_move=None):
2641 """Run all the postprocessors on the given file."""
2642 info = dict(ie_info)
2643 info['filepath'] = filename
2644 info['__files_to_move'] = files_to_move or {}
2645
2646 for pp in ie_info.get('__postprocessors', []) + self._pps['normal']:
2647 info = self.run_pp(pp, info)
2648 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
2649 del info['__files_to_move']
2650 for pp in self._pps['aftermove']:
2651 info = self.run_pp(pp, info)
2652 return info
2653
2654 def _make_archive_id(self, info_dict):
2655 video_id = info_dict.get('id')
2656 if not video_id:
2657 return
2658 # Future-proof against any change in case
2659 # and backwards compatibility with prior versions
2660 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
2661 if extractor is None:
2662 url = str_or_none(info_dict.get('url'))
2663 if not url:
2664 return
2665 # Try to find matching extractor for the URL and take its ie_key
2666 for ie in self._ies:
2667 if ie.suitable(url):
2668 extractor = ie.ie_key()
2669 break
2670 else:
2671 return
2672 return '%s %s' % (extractor.lower(), video_id)
2673
2674 def in_download_archive(self, info_dict):
2675 fn = self.params.get('download_archive')
2676 if fn is None:
2677 return False
2678
2679 vid_id = self._make_archive_id(info_dict)
2680 if not vid_id:
2681 return False # Incomplete video information
2682
2683 return vid_id in self.archive
2684
2685 def record_download_archive(self, info_dict):
2686 fn = self.params.get('download_archive')
2687 if fn is None:
2688 return
2689 vid_id = self._make_archive_id(info_dict)
2690 assert vid_id
2691 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
2692 archive_file.write(vid_id + '\n')
2693 self.archive.add(vid_id)
2694
2695 @staticmethod
2696 def format_resolution(format, default='unknown'):
2697 if format.get('vcodec') == 'none':
2698 return 'audio only'
2699 if format.get('resolution') is not None:
2700 return format['resolution']
2701 if format.get('width') and format.get('height'):
2702 res = '%dx%d' % (format['width'], format['height'])
2703 elif format.get('height'):
2704 res = '%sp' % format['height']
2705 elif format.get('width'):
2706 res = '%dx?' % format['width']
2707 else:
2708 res = default
2709 return res
2710
2711 def _format_note(self, fdict):
2712 res = ''
2713 if fdict.get('ext') in ['f4f', 'f4m']:
2714 res += '(unsupported) '
2715 if fdict.get('language'):
2716 if res:
2717 res += ' '
2718 res += '[%s] ' % fdict['language']
2719 if fdict.get('format_note') is not None:
2720 res += fdict['format_note'] + ' '
2721 if fdict.get('tbr') is not None:
2722 res += '%4dk ' % fdict['tbr']
2723 if fdict.get('container') is not None:
2724 if res:
2725 res += ', '
2726 res += '%s container' % fdict['container']
2727 if (fdict.get('vcodec') is not None
2728 and fdict.get('vcodec') != 'none'):
2729 if res:
2730 res += ', '
2731 res += fdict['vcodec']
2732 if fdict.get('vbr') is not None:
2733 res += '@'
2734 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
2735 res += 'video@'
2736 if fdict.get('vbr') is not None:
2737 res += '%4dk' % fdict['vbr']
2738 if fdict.get('fps') is not None:
2739 if res:
2740 res += ', '
2741 res += '%sfps' % fdict['fps']
2742 if fdict.get('acodec') is not None:
2743 if res:
2744 res += ', '
2745 if fdict['acodec'] == 'none':
2746 res += 'video only'
2747 else:
2748 res += '%-5s' % fdict['acodec']
2749 elif fdict.get('abr') is not None:
2750 if res:
2751 res += ', '
2752 res += 'audio'
2753 if fdict.get('abr') is not None:
2754 res += '@%3dk' % fdict['abr']
2755 if fdict.get('asr') is not None:
2756 res += ' (%5dHz)' % fdict['asr']
2757 if fdict.get('filesize') is not None:
2758 if res:
2759 res += ', '
2760 res += format_bytes(fdict['filesize'])
2761 elif fdict.get('filesize_approx') is not None:
2762 if res:
2763 res += ', '
2764 res += '~' + format_bytes(fdict['filesize_approx'])
2765 return res
2766
2767 def _format_note_table(self, f):
2768 def join_fields(*vargs):
2769 return ', '.join((val for val in vargs if val != ''))
2770
2771 return join_fields(
2772 'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
2773 format_field(f, 'language', '[%s]'),
2774 format_field(f, 'format_note'),
2775 format_field(f, 'container', ignore=(None, f.get('ext'))),
2776 format_field(f, 'asr', '%5dHz'))
2777
2778 def list_formats(self, info_dict):
2779 formats = info_dict.get('formats', [info_dict])
2780 new_format = self.params.get('listformats_table', False)
2781 if new_format:
2782 table = [
2783 [
2784 format_field(f, 'format_id'),
2785 format_field(f, 'ext'),
2786 self.format_resolution(f),
2787 format_field(f, 'fps', '%d'),
2788 '|',
2789 format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
2790 format_field(f, 'tbr', '%4dk'),
2791 shorten_protocol_name(f.get('protocol', '').replace("native", "n")),
2792 '|',
2793 format_field(f, 'vcodec', default='unknown').replace('none', ''),
2794 format_field(f, 'vbr', '%4dk'),
2795 format_field(f, 'acodec', default='unknown').replace('none', ''),
2796 format_field(f, 'abr', '%3dk'),
2797 format_field(f, 'asr', '%5dHz'),
2798 self._format_note_table(f)]
2799 for f in formats
2800 if f.get('preference') is None or f['preference'] >= -1000]
2801 header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', ' TBR', 'PROTO',
2802 '|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'NOTE']
2803 else:
2804 table = [
2805 [
2806 format_field(f, 'format_id'),
2807 format_field(f, 'ext'),
2808 self.format_resolution(f),
2809 self._format_note(f)]
2810 for f in formats
2811 if f.get('preference') is None or f['preference'] >= -1000]
2812 header_line = ['format code', 'extension', 'resolution', 'note']
2813
2814 self.to_screen(
2815 '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(
2816 header_line,
2817 table,
2818 delim=new_format,
2819 extraGap=(0 if new_format else 1),
2820 hideEmpty=new_format)))
2821
2822 def list_thumbnails(self, info_dict):
2823 thumbnails = info_dict.get('thumbnails')
2824 if not thumbnails:
2825 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
2826 return
2827
2828 self.to_screen(
2829 '[info] Thumbnails for %s:' % info_dict['id'])
2830 self.to_screen(render_table(
2831 ['ID', 'width', 'height', 'URL'],
2832 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
2833
2834 def list_subtitles(self, video_id, subtitles, name='subtitles'):
2835 if not subtitles:
2836 self.to_screen('%s has no %s' % (video_id, name))
2837 return
2838 self.to_screen(
2839 'Available %s for %s:' % (name, video_id))
2840 self.to_screen(render_table(
2841 ['Language', 'formats'],
2842 [[lang, ', '.join(f['ext'] for f in reversed(formats))]
2843 for lang, formats in subtitles.items()]))
2844
2845 def urlopen(self, req):
2846 """ Start an HTTP download """
2847 if isinstance(req, compat_basestring):
2848 req = sanitized_Request(req)
2849 return self._opener.open(req, timeout=self._socket_timeout)
2850
2851 def print_debug_header(self):
2852 if not self.params.get('verbose'):
2853 return
2854
2855 if type('') is not compat_str:
2856 # Python 2.6 on SLES11 SP1 (https://github.com/ytdl-org/youtube-dl/issues/3326)
2857 self.report_warning(
2858 'Your Python is broken! Update to a newer and supported version')
2859
2860 stdout_encoding = getattr(
2861 sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
2862 encoding_str = (
2863 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
2864 locale.getpreferredencoding(),
2865 sys.getfilesystemencoding(),
2866 stdout_encoding,
2867 self.get_encoding()))
2868 write_string(encoding_str, encoding=None)
2869
2870 source = (
2871 '(exe)' if hasattr(sys, 'frozen')
2872 else '(zip)' if isinstance(globals().get('__loader__'), zipimporter)
2873 else '(source)' if os.path.basename(sys.argv[0]) == '__main__.py'
2874 else '')
2875 self._write_string('[debug] yt-dlp version %s %s\n' % (__version__, source))
2876 if _LAZY_LOADER:
2877 self._write_string('[debug] Lazy loading extractors enabled\n')
2878 if _PLUGIN_CLASSES:
2879 self._write_string(
2880 '[debug] Plugin Extractors: %s\n' % [ie.ie_key() for ie in _PLUGIN_CLASSES])
2881 try:
2882 sp = subprocess.Popen(
2883 ['git', 'rev-parse', '--short', 'HEAD'],
2884 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2885 cwd=os.path.dirname(os.path.abspath(__file__)))
2886 out, err = process_communicate_or_kill(sp)
2887 out = out.decode().strip()
2888 if re.match('[0-9a-f]+', out):
2889 self._write_string('[debug] Git HEAD: %s\n' % out)
2890 except Exception:
2891 try:
2892 sys.exc_clear()
2893 except Exception:
2894 pass
2895
2896 def python_implementation():
2897 impl_name = platform.python_implementation()
2898 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2899 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
2900 return impl_name
2901
2902 self._write_string('[debug] Python version %s (%s %s) - %s\n' % (
2903 platform.python_version(),
2904 python_implementation(),
2905 platform.architecture()[0],
2906 platform_name()))
2907
2908 exe_versions = FFmpegPostProcessor.get_versions(self)
2909 exe_versions['rtmpdump'] = rtmpdump_version()
2910 exe_versions['phantomjs'] = PhantomJSwrapper._version()
2911 exe_str = ', '.join(
2912 '%s %s' % (exe, v)
2913 for exe, v in sorted(exe_versions.items())
2914 if v
2915 )
2916 if not exe_str:
2917 exe_str = 'none'
2918 self._write_string('[debug] exe versions: %s\n' % exe_str)
2919
2920 proxy_map = {}
2921 for handler in self._opener.handlers:
2922 if hasattr(handler, 'proxies'):
2923 proxy_map.update(handler.proxies)
2924 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
2925
2926 if self.params.get('call_home', False):
2927 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
2928 self._write_string('[debug] Public IP address: %s\n' % ipaddr)
2929 return
2930 latest_version = self.urlopen(
2931 'https://yt-dl.org/latest/version').read().decode('utf-8')
2932 if version_tuple(latest_version) > version_tuple(__version__):
2933 self.report_warning(
2934 'You are using an outdated version (newest version: %s)! '
2935 'See https://yt-dl.org/update if you need help updating.' %
2936 latest_version)
2937
2938 def _setup_opener(self):
2939 timeout_val = self.params.get('socket_timeout')
2940 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
2941
2942 opts_cookiefile = self.params.get('cookiefile')
2943 opts_proxy = self.params.get('proxy')
2944
2945 if opts_cookiefile is None:
2946 self.cookiejar = compat_cookiejar.CookieJar()
2947 else:
2948 opts_cookiefile = expand_path(opts_cookiefile)
2949 self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
2950 if os.access(opts_cookiefile, os.R_OK):
2951 self.cookiejar.load(ignore_discard=True, ignore_expires=True)
2952
2953 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
2954 if opts_proxy is not None:
2955 if opts_proxy == '':
2956 proxies = {}
2957 else:
2958 proxies = {'http': opts_proxy, 'https': opts_proxy}
2959 else:
2960 proxies = compat_urllib_request.getproxies()
2961 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
2962 if 'http' in proxies and 'https' not in proxies:
2963 proxies['https'] = proxies['http']
2964 proxy_handler = PerRequestProxyHandler(proxies)
2965
2966 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
2967 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
2968 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
2969 redirect_handler = YoutubeDLRedirectHandler()
2970 data_handler = compat_urllib_request_DataHandler()
2971
2972 # When passing our own FileHandler instance, build_opener won't add the
2973 # default FileHandler and allows us to disable the file protocol, which
2974 # can be used for malicious purposes (see
2975 # https://github.com/ytdl-org/youtube-dl/issues/8227)
2976 file_handler = compat_urllib_request.FileHandler()
2977
2978 def file_open(*args, **kwargs):
2979 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
2980 file_handler.file_open = file_open
2981
2982 opener = compat_urllib_request.build_opener(
2983 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2984
2985 # Delete the default user-agent header, which would otherwise apply in
2986 # cases where our custom HTTP handler doesn't come into play
2987 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
2988 opener.addheaders = []
2989 self._opener = opener
2990
2991 def encode(self, s):
2992 if isinstance(s, bytes):
2993 return s # Already encoded
2994
2995 try:
2996 return s.encode(self.get_encoding())
2997 except UnicodeEncodeError as err:
2998 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
2999 raise
3000
3001 def get_encoding(self):
3002 encoding = self.params.get('encoding')
3003 if encoding is None:
3004 encoding = preferredencoding()
3005 return encoding
3006
3007 def _write_thumbnails(self, info_dict, filename): # return the extensions
3008 write_all = self.params.get('write_all_thumbnails', False)
3009 thumbnails = []
3010 if write_all or self.params.get('writethumbnail', False):
3011 thumbnails = info_dict.get('thumbnails') or []
3012 multiple = write_all and len(thumbnails) > 1
3013
3014 ret = []
3015 for t in thumbnails[::1 if write_all else -1]:
3016 thumb_ext = determine_ext(t['url'], 'jpg')
3017 suffix = '%s.' % t['id'] if multiple else ''
3018 thumb_display_id = '%s ' % t['id'] if multiple else ''
3019 t['filepath'] = thumb_filename = replace_extension(filename, suffix + thumb_ext, info_dict.get('ext'))
3020
3021 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(thumb_filename)):
3022 ret.append(suffix + thumb_ext)
3023 self.to_screen('[%s] %s: Thumbnail %sis already present' %
3024 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3025 else:
3026 self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
3027 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3028 try:
3029 uf = self.urlopen(t['url'])
3030 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
3031 shutil.copyfileobj(uf, thumbf)
3032 ret.append(suffix + thumb_ext)
3033 self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
3034 (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
3035 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
3036 self.report_warning('Unable to download thumbnail "%s": %s' %
3037 (t['url'], error_to_compat_str(err)))
3038 if ret and not write_all:
3039 break
3040 return ret