]> jfr.im git - yt-dlp.git/blob - yt_dlp/YoutubeDL.py
Fix some typos and linter
[yt-dlp.git] / yt_dlp / YoutubeDL.py
1 #!/usr/bin/env python
2 # coding: utf-8
3
4 from __future__ import absolute_import, unicode_literals
5
6 import collections
7 import contextlib
8 import copy
9 import datetime
10 import errno
11 import fileinput
12 import io
13 import itertools
14 import json
15 import locale
16 import operator
17 import os
18 import platform
19 import re
20 import shutil
21 import subprocess
22 import socket
23 import sys
24 import time
25 import tokenize
26 import traceback
27 import random
28
29 from string import ascii_letters
30 from zipimport import zipimporter
31
32 from .compat import (
33 compat_basestring,
34 compat_cookiejar,
35 compat_get_terminal_size,
36 compat_http_client,
37 compat_kwargs,
38 compat_numeric_types,
39 compat_os_name,
40 compat_str,
41 compat_tokenize_tokenize,
42 compat_urllib_error,
43 compat_urllib_request,
44 compat_urllib_request_DataHandler,
45 )
46 from .utils import (
47 age_restricted,
48 args_to_str,
49 ContentTooShortError,
50 date_from_str,
51 DateRange,
52 DEFAULT_OUTTMPL,
53 OUTTMPL_TYPES,
54 determine_ext,
55 determine_protocol,
56 DOT_DESKTOP_LINK_TEMPLATE,
57 DOT_URL_LINK_TEMPLATE,
58 DOT_WEBLOC_LINK_TEMPLATE,
59 DownloadError,
60 encode_compat_str,
61 encodeFilename,
62 error_to_compat_str,
63 EntryNotInPlaylist,
64 ExistingVideoReached,
65 expand_path,
66 ExtractorError,
67 float_or_none,
68 format_bytes,
69 format_field,
70 FORMAT_RE,
71 formatSeconds,
72 GeoRestrictedError,
73 int_or_none,
74 iri_to_uri,
75 ISO3166Utils,
76 locked_file,
77 make_dir,
78 make_HTTPS_handler,
79 MaxDownloadsReached,
80 orderedSet,
81 PagedList,
82 parse_filesize,
83 PerRequestProxyHandler,
84 platform_name,
85 PostProcessingError,
86 preferredencoding,
87 prepend_extension,
88 register_socks_protocols,
89 render_table,
90 replace_extension,
91 RejectedVideoReached,
92 SameFileError,
93 sanitize_filename,
94 sanitize_path,
95 sanitize_url,
96 sanitized_Request,
97 std_headers,
98 str_or_none,
99 strftime_or_none,
100 subtitles_filename,
101 to_high_limit_path,
102 UnavailableVideoError,
103 url_basename,
104 version_tuple,
105 write_json_file,
106 write_string,
107 YoutubeDLCookieJar,
108 YoutubeDLCookieProcessor,
109 YoutubeDLHandler,
110 YoutubeDLRedirectHandler,
111 process_communicate_or_kill,
112 )
113 from .cache import Cache
114 from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER, _PLUGIN_CLASSES
115 from .extractor.openload import PhantomJSwrapper
116 from .downloader import get_suitable_downloader
117 from .downloader.rtmp import rtmpdump_version
118 from .postprocessor import (
119 FFmpegFixupM3u8PP,
120 FFmpegFixupM4aPP,
121 FFmpegFixupStretchedPP,
122 FFmpegMergerPP,
123 FFmpegPostProcessor,
124 # FFmpegSubtitlesConvertorPP,
125 get_postprocessor,
126 MoveFilesAfterDownloadPP,
127 )
128 from .version import __version__
129
130 if compat_os_name == 'nt':
131 import ctypes
132
133
134 class YoutubeDL(object):
135 """YoutubeDL class.
136
137 YoutubeDL objects are the ones responsible of downloading the
138 actual video file and writing it to disk if the user has requested
139 it, among some other tasks. In most cases there should be one per
140 program. As, given a video URL, the downloader doesn't know how to
141 extract all the needed information, task that InfoExtractors do, it
142 has to pass the URL to one of them.
143
144 For this, YoutubeDL objects have a method that allows
145 InfoExtractors to be registered in a given order. When it is passed
146 a URL, the YoutubeDL object handles it to the first InfoExtractor it
147 finds that reports being able to handle it. The InfoExtractor extracts
148 all the information about the video or videos the URL refers to, and
149 YoutubeDL process the extracted information, possibly using a File
150 Downloader to download the video.
151
152 YoutubeDL objects accept a lot of parameters. In order not to saturate
153 the object constructor with arguments, it receives a dictionary of
154 options instead. These options are available through the params
155 attribute for the InfoExtractors to use. The YoutubeDL also
156 registers itself as the downloader in charge for the InfoExtractors
157 that are added to it, so this is a "mutual registration".
158
159 Available options:
160
161 username: Username for authentication purposes.
162 password: Password for authentication purposes.
163 videopassword: Password for accessing a video.
164 ap_mso: Adobe Pass multiple-system operator identifier.
165 ap_username: Multiple-system operator account username.
166 ap_password: Multiple-system operator account password.
167 usenetrc: Use netrc for authentication instead.
168 verbose: Print additional info to stdout.
169 quiet: Do not print messages to stdout.
170 no_warnings: Do not print out anything for warnings.
171 forceurl: Force printing final URL.
172 forcetitle: Force printing title.
173 forceid: Force printing ID.
174 forcethumbnail: Force printing thumbnail URL.
175 forcedescription: Force printing description.
176 forcefilename: Force printing final filename.
177 forceduration: Force printing duration.
178 forcejson: Force printing info_dict as JSON.
179 dump_single_json: Force printing the info_dict of the whole playlist
180 (or video) as a single JSON line.
181 force_write_download_archive: Force writing download archive regardless
182 of 'skip_download' or 'simulate'.
183 simulate: Do not download the video files.
184 format: Video format code. see "FORMAT SELECTION" for more details.
185 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
186 format_sort: How to sort the video formats. see "Sorting Formats"
187 for more details.
188 format_sort_force: Force the given format_sort. see "Sorting Formats"
189 for more details.
190 allow_multiple_video_streams: Allow multiple video streams to be merged
191 into a single file
192 allow_multiple_audio_streams: Allow multiple audio streams to be merged
193 into a single file
194 paths: Dictionary of output paths. The allowed keys are 'home'
195 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
196 outtmpl: Dictionary of templates for output names. Allowed keys
197 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
198 A string a also accepted for backward compatibility
199 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
200 restrictfilenames: Do not allow "&" and spaces in file names
201 trim_file_name: Limit length of filename (extension excluded)
202 windowsfilenames: Force the filenames to be windows compatible
203 ignoreerrors: Do not stop on download errors
204 (Default True when running yt-dlp,
205 but False when directly accessing YoutubeDL class)
206 force_generic_extractor: Force downloader to use the generic extractor
207 overwrites: Overwrite all video and metadata files if True,
208 overwrite only non-video files if None
209 and don't overwrite any file if False
210 playliststart: Playlist item to start at.
211 playlistend: Playlist item to end at.
212 playlist_items: Specific indices of playlist to download.
213 playlistreverse: Download playlist items in reverse order.
214 playlistrandom: Download playlist items in random order.
215 matchtitle: Download only matching titles.
216 rejecttitle: Reject downloads for matching titles.
217 logger: Log messages to a logging.Logger instance.
218 logtostderr: Log messages to stderr instead of stdout.
219 writedescription: Write the video description to a .description file
220 writeinfojson: Write the video description to a .info.json file
221 clean_infojson: Remove private fields from the infojson
222 writecomments: Extract video comments. This will not be written to disk
223 unless writeinfojson is also given
224 writeannotations: Write the video annotations to a .annotations.xml file
225 writethumbnail: Write the thumbnail image to a file
226 allow_playlist_files: Whether to write playlists' description, infojson etc
227 also to disk when using the 'write*' options
228 write_all_thumbnails: Write all thumbnail formats to files
229 writelink: Write an internet shortcut file, depending on the
230 current platform (.url/.webloc/.desktop)
231 writeurllink: Write a Windows internet shortcut file (.url)
232 writewebloclink: Write a macOS internet shortcut file (.webloc)
233 writedesktoplink: Write a Linux internet shortcut file (.desktop)
234 writesubtitles: Write the video subtitles to a file
235 writeautomaticsub: Write the automatically generated subtitles to a file
236 allsubtitles: Downloads all the subtitles of the video
237 (requires writesubtitles or writeautomaticsub)
238 listsubtitles: Lists all available subtitles for the video
239 subtitlesformat: The format code for subtitles
240 subtitleslangs: List of languages of the subtitles to download
241 keepvideo: Keep the video file after post-processing
242 daterange: A DateRange object, download only if the upload_date is in the range.
243 skip_download: Skip the actual download of the video file
244 cachedir: Location of the cache files in the filesystem.
245 False to disable filesystem cache.
246 noplaylist: Download single video instead of a playlist if in doubt.
247 age_limit: An integer representing the user's age in years.
248 Unsuitable videos for the given age are skipped.
249 min_views: An integer representing the minimum view count the video
250 must have in order to not be skipped.
251 Videos without view count information are always
252 downloaded. None for no limit.
253 max_views: An integer representing the maximum view count.
254 Videos that are more popular than that are not
255 downloaded.
256 Videos without view count information are always
257 downloaded. None for no limit.
258 download_archive: File name of a file where all downloads are recorded.
259 Videos already present in the file are not downloaded
260 again.
261 break_on_existing: Stop the download process after attempting to download a
262 file that is in the archive.
263 break_on_reject: Stop the download process when encountering a video that
264 has been filtered out.
265 cookiefile: File name where cookies should be read from and dumped to
266 nocheckcertificate:Do not verify SSL certificates
267 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
268 At the moment, this is only supported by YouTube.
269 proxy: URL of the proxy server to use
270 geo_verification_proxy: URL of the proxy to use for IP address verification
271 on geo-restricted sites.
272 socket_timeout: Time to wait for unresponsive hosts, in seconds
273 bidi_workaround: Work around buggy terminals without bidirectional text
274 support, using fridibi
275 debug_printtraffic:Print out sent and received HTTP traffic
276 include_ads: Download ads as well
277 default_search: Prepend this string if an input url is not valid.
278 'auto' for elaborate guessing
279 encoding: Use this encoding instead of the system-specified.
280 extract_flat: Do not resolve URLs, return the immediate result.
281 Pass in 'in_playlist' to only show this behavior for
282 playlist items.
283 postprocessors: A list of dictionaries, each with an entry
284 * key: The name of the postprocessor. See
285 yt_dlp/postprocessor/__init__.py for a list.
286 * _after_move: Optional. If True, run this post_processor
287 after 'MoveFilesAfterDownload'
288 as well as any further keyword arguments for the
289 postprocessor.
290 post_hooks: A list of functions that get called as the final step
291 for each video file, after all postprocessors have been
292 called. The filename will be passed as the only argument.
293 progress_hooks: A list of functions that get called on download
294 progress, with a dictionary with the entries
295 * status: One of "downloading", "error", or "finished".
296 Check this first and ignore unknown values.
297
298 If status is one of "downloading", or "finished", the
299 following properties may also be present:
300 * filename: The final filename (always present)
301 * tmpfilename: The filename we're currently writing to
302 * downloaded_bytes: Bytes on disk
303 * total_bytes: Size of the whole file, None if unknown
304 * total_bytes_estimate: Guess of the eventual file size,
305 None if unavailable.
306 * elapsed: The number of seconds since download started.
307 * eta: The estimated time in seconds, None if unknown
308 * speed: The download speed in bytes/second, None if
309 unknown
310 * fragment_index: The counter of the currently
311 downloaded video fragment.
312 * fragment_count: The number of fragments (= individual
313 files that will be merged)
314
315 Progress hooks are guaranteed to be called at least once
316 (with status "finished") if the download is successful.
317 merge_output_format: Extension to use when merging formats.
318 final_ext: Expected final extension; used to detect when the file was
319 already downloaded and converted. "merge_output_format" is
320 replaced by this extension when given
321 fixup: Automatically correct known faults of the file.
322 One of:
323 - "never": do nothing
324 - "warn": only emit a warning
325 - "detect_or_warn": check whether we can do anything
326 about it, warn otherwise (default)
327 source_address: Client-side IP address to bind to.
328 call_home: Boolean, true iff we are allowed to contact the
329 yt-dlp servers for debugging. (BROKEN)
330 sleep_interval_requests: Number of seconds to sleep between requests
331 during extraction
332 sleep_interval: Number of seconds to sleep before each download when
333 used alone or a lower bound of a range for randomized
334 sleep before each download (minimum possible number
335 of seconds to sleep) when used along with
336 max_sleep_interval.
337 max_sleep_interval:Upper bound of a range for randomized sleep before each
338 download (maximum possible number of seconds to sleep).
339 Must only be used along with sleep_interval.
340 Actual sleep time will be a random float from range
341 [sleep_interval; max_sleep_interval].
342 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
343 listformats: Print an overview of available video formats and exit.
344 list_thumbnails: Print a table of all thumbnails and exit.
345 match_filter: A function that gets called with the info_dict of
346 every video.
347 If it returns a message, the video is ignored.
348 If it returns None, the video is downloaded.
349 match_filter_func in utils.py is one example for this.
350 no_color: Do not emit color codes in output.
351 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
352 HTTP header
353 geo_bypass_country:
354 Two-letter ISO 3166-2 country code that will be used for
355 explicit geographic restriction bypassing via faking
356 X-Forwarded-For HTTP header
357 geo_bypass_ip_block:
358 IP range in CIDR notation that will be used similarly to
359 geo_bypass_country
360
361 The following options determine which downloader is picked:
362 external_downloader: Executable of the external downloader to call.
363 None or unset for standard (built-in) downloader.
364 hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
365 if True, otherwise use ffmpeg/avconv if False, otherwise
366 use downloader suggested by extractor if None.
367
368 The following parameters are not used by YoutubeDL itself, they are used by
369 the downloader (see yt_dlp/downloader/common.py):
370 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
371 noresizebuffer, retries, continuedl, noprogress, consoletitle,
372 xattr_set_filesize, external_downloader_args, hls_use_mpegts,
373 http_chunk_size.
374
375 The following options are used by the post processors:
376 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
377 otherwise prefer ffmpeg. (avconv support is deprecated)
378 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
379 to the binary or its containing directory.
380 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
381 and a list of additional command-line arguments for the
382 postprocessor/executable. The dict can also have "PP+EXE" keys
383 which are used when the given exe is used by the given PP.
384 Use 'default' as the name for arguments to passed to all PP
385
386 The following options are used by the extractors:
387 extractor_retries: Number of times to retry for known errors
388 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
389 hls_split_discontinuity: Split HLS playlists to different formats at
390 discontinuities such as ad breaks (default: False)
391 youtube_include_dash_manifest: If True (default), DASH manifests and related
392 data will be downloaded and processed by extractor.
393 You can reduce network I/O by disabling it if you don't
394 care about DASH. (only for youtube)
395 youtube_include_hls_manifest: If True (default), HLS manifests and related
396 data will be downloaded and processed by extractor.
397 You can reduce network I/O by disabling it if you don't
398 care about HLS. (only for youtube)
399 """
400
401 _NUMERIC_FIELDS = set((
402 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
403 'timestamp', 'upload_year', 'upload_month', 'upload_day',
404 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
405 'average_rating', 'comment_count', 'age_limit',
406 'start_time', 'end_time',
407 'chapter_number', 'season_number', 'episode_number',
408 'track_number', 'disc_number', 'release_year',
409 'playlist_index',
410 ))
411
412 params = None
413 _ies = []
414 _pps = {'beforedl': [], 'aftermove': [], 'normal': []}
415 __prepare_filename_warned = False
416 _first_webpage_request = True
417 _download_retcode = None
418 _num_downloads = None
419 _playlist_level = 0
420 _playlist_urls = set()
421 _screen_file = None
422
423 def __init__(self, params=None, auto_init=True):
424 """Create a FileDownloader object with the given options."""
425 if params is None:
426 params = {}
427 self._ies = []
428 self._ies_instances = {}
429 self._pps = {'beforedl': [], 'aftermove': [], 'normal': []}
430 self.__prepare_filename_warned = False
431 self._first_webpage_request = True
432 self._post_hooks = []
433 self._progress_hooks = []
434 self._download_retcode = 0
435 self._num_downloads = 0
436 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
437 self._err_file = sys.stderr
438 self.params = {
439 # Default parameters
440 'nocheckcertificate': False,
441 }
442 self.params.update(params)
443 self.cache = Cache(self)
444 self.archive = set()
445
446 """Preload the archive, if any is specified"""
447 def preload_download_archive(self):
448 fn = self.params.get('download_archive')
449 if fn is None:
450 return False
451 try:
452 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
453 for line in archive_file:
454 self.archive.add(line.strip())
455 except IOError as ioe:
456 if ioe.errno != errno.ENOENT:
457 raise
458 return False
459 return True
460
461 def check_deprecated(param, option, suggestion):
462 if self.params.get(param) is not None:
463 self.report_warning(
464 '%s is deprecated. Use %s instead.' % (option, suggestion))
465 return True
466 return False
467
468 if self.params.get('verbose'):
469 self.to_stdout('[debug] Loading archive file %r' % self.params.get('download_archive'))
470
471 preload_download_archive(self)
472
473 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
474 if self.params.get('geo_verification_proxy') is None:
475 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
476
477 if self.params.get('final_ext'):
478 if self.params.get('merge_output_format'):
479 self.report_warning('--merge-output-format will be ignored since --remux-video or --recode-video is given')
480 self.params['merge_output_format'] = self.params['final_ext']
481
482 if 'overwrites' in self.params and self.params['overwrites'] is None:
483 del self.params['overwrites']
484
485 check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
486 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
487 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
488
489 if params.get('bidi_workaround', False):
490 try:
491 import pty
492 master, slave = pty.openpty()
493 width = compat_get_terminal_size().columns
494 if width is None:
495 width_args = []
496 else:
497 width_args = ['-w', str(width)]
498 sp_kwargs = dict(
499 stdin=subprocess.PIPE,
500 stdout=slave,
501 stderr=self._err_file)
502 try:
503 self._output_process = subprocess.Popen(
504 ['bidiv'] + width_args, **sp_kwargs
505 )
506 except OSError:
507 self._output_process = subprocess.Popen(
508 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
509 self._output_channel = os.fdopen(master, 'rb')
510 except OSError as ose:
511 if ose.errno == errno.ENOENT:
512 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
513 else:
514 raise
515
516 if (sys.platform != 'win32'
517 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
518 and not params.get('restrictfilenames', False)):
519 # Unicode filesystem API will throw errors (#1474, #13027)
520 self.report_warning(
521 'Assuming --restrict-filenames since file system encoding '
522 'cannot encode all characters. '
523 'Set the LC_ALL environment variable to fix this.')
524 self.params['restrictfilenames'] = True
525
526 self.outtmpl_dict = self.parse_outtmpl()
527
528 self._setup_opener()
529
530 if auto_init:
531 self.print_debug_header()
532 self.add_default_info_extractors()
533
534 for pp_def_raw in self.params.get('postprocessors', []):
535 pp_class = get_postprocessor(pp_def_raw['key'])
536 pp_def = dict(pp_def_raw)
537 del pp_def['key']
538 if 'when' in pp_def:
539 when = pp_def['when']
540 del pp_def['when']
541 else:
542 when = 'normal'
543 pp = pp_class(self, **compat_kwargs(pp_def))
544 self.add_post_processor(pp, when=when)
545
546 for ph in self.params.get('post_hooks', []):
547 self.add_post_hook(ph)
548
549 for ph in self.params.get('progress_hooks', []):
550 self.add_progress_hook(ph)
551
552 register_socks_protocols()
553
554 def warn_if_short_id(self, argv):
555 # short YouTube ID starting with dash?
556 idxs = [
557 i for i, a in enumerate(argv)
558 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
559 if idxs:
560 correct_argv = (
561 ['yt-dlp']
562 + [a for i, a in enumerate(argv) if i not in idxs]
563 + ['--'] + [argv[i] for i in idxs]
564 )
565 self.report_warning(
566 'Long argument string detected. '
567 'Use -- to separate parameters and URLs, like this:\n%s\n' %
568 args_to_str(correct_argv))
569
570 def add_info_extractor(self, ie):
571 """Add an InfoExtractor object to the end of the list."""
572 self._ies.append(ie)
573 if not isinstance(ie, type):
574 self._ies_instances[ie.ie_key()] = ie
575 ie.set_downloader(self)
576
577 def get_info_extractor(self, ie_key):
578 """
579 Get an instance of an IE with name ie_key, it will try to get one from
580 the _ies list, if there's no instance it will create a new one and add
581 it to the extractor list.
582 """
583 ie = self._ies_instances.get(ie_key)
584 if ie is None:
585 ie = get_info_extractor(ie_key)()
586 self.add_info_extractor(ie)
587 return ie
588
589 def add_default_info_extractors(self):
590 """
591 Add the InfoExtractors returned by gen_extractors to the end of the list
592 """
593 for ie in gen_extractor_classes():
594 self.add_info_extractor(ie)
595
596 def add_post_processor(self, pp, when='normal'):
597 """Add a PostProcessor object to the end of the chain."""
598 self._pps[when].append(pp)
599 pp.set_downloader(self)
600
601 def add_post_hook(self, ph):
602 """Add the post hook"""
603 self._post_hooks.append(ph)
604
605 def add_progress_hook(self, ph):
606 """Add the progress hook (currently only for the file downloader)"""
607 self._progress_hooks.append(ph)
608
609 def _bidi_workaround(self, message):
610 if not hasattr(self, '_output_channel'):
611 return message
612
613 assert hasattr(self, '_output_process')
614 assert isinstance(message, compat_str)
615 line_count = message.count('\n') + 1
616 self._output_process.stdin.write((message + '\n').encode('utf-8'))
617 self._output_process.stdin.flush()
618 res = ''.join(self._output_channel.readline().decode('utf-8')
619 for _ in range(line_count))
620 return res[:-len('\n')]
621
622 def to_screen(self, message, skip_eol=False):
623 """Print message to stdout if not in quiet mode."""
624 return self.to_stdout(message, skip_eol, check_quiet=True)
625
626 def _write_string(self, s, out=None):
627 write_string(s, out=out, encoding=self.params.get('encoding'))
628
629 def to_stdout(self, message, skip_eol=False, check_quiet=False):
630 """Print message to stdout if not in quiet mode."""
631 if self.params.get('logger'):
632 self.params['logger'].debug(message)
633 elif not check_quiet or not self.params.get('quiet', False):
634 message = self._bidi_workaround(message)
635 terminator = ['\n', ''][skip_eol]
636 output = message + terminator
637
638 self._write_string(output, self._screen_file)
639
640 def to_stderr(self, message):
641 """Print message to stderr."""
642 assert isinstance(message, compat_str)
643 if self.params.get('logger'):
644 self.params['logger'].error(message)
645 else:
646 message = self._bidi_workaround(message)
647 output = message + '\n'
648 self._write_string(output, self._err_file)
649
650 def to_console_title(self, message):
651 if not self.params.get('consoletitle', False):
652 return
653 if compat_os_name == 'nt':
654 if ctypes.windll.kernel32.GetConsoleWindow():
655 # c_wchar_p() might not be necessary if `message` is
656 # already of type unicode()
657 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
658 elif 'TERM' in os.environ:
659 self._write_string('\033]0;%s\007' % message, self._screen_file)
660
661 def save_console_title(self):
662 if not self.params.get('consoletitle', False):
663 return
664 if self.params.get('simulate', False):
665 return
666 if compat_os_name != 'nt' and 'TERM' in os.environ:
667 # Save the title on stack
668 self._write_string('\033[22;0t', self._screen_file)
669
670 def restore_console_title(self):
671 if not self.params.get('consoletitle', False):
672 return
673 if self.params.get('simulate', False):
674 return
675 if compat_os_name != 'nt' and 'TERM' in os.environ:
676 # Restore the title from stack
677 self._write_string('\033[23;0t', self._screen_file)
678
679 def __enter__(self):
680 self.save_console_title()
681 return self
682
683 def __exit__(self, *args):
684 self.restore_console_title()
685
686 if self.params.get('cookiefile') is not None:
687 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
688
689 def trouble(self, message=None, tb=None):
690 """Determine action to take when a download problem appears.
691
692 Depending on if the downloader has been configured to ignore
693 download errors or not, this method may throw an exception or
694 not when errors are found, after printing the message.
695
696 tb, if given, is additional traceback information.
697 """
698 if message is not None:
699 self.to_stderr(message)
700 if self.params.get('verbose'):
701 if tb is None:
702 if sys.exc_info()[0]: # if .trouble has been called from an except block
703 tb = ''
704 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
705 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
706 tb += encode_compat_str(traceback.format_exc())
707 else:
708 tb_data = traceback.format_list(traceback.extract_stack())
709 tb = ''.join(tb_data)
710 self.to_stderr(tb)
711 if not self.params.get('ignoreerrors', False):
712 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
713 exc_info = sys.exc_info()[1].exc_info
714 else:
715 exc_info = sys.exc_info()
716 raise DownloadError(message, exc_info)
717 self._download_retcode = 1
718
719 def report_warning(self, message):
720 '''
721 Print the message to stderr, it will be prefixed with 'WARNING:'
722 If stderr is a tty file the 'WARNING:' will be colored
723 '''
724 if self.params.get('logger') is not None:
725 self.params['logger'].warning(message)
726 else:
727 if self.params.get('no_warnings'):
728 return
729 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
730 _msg_header = '\033[0;33mWARNING:\033[0m'
731 else:
732 _msg_header = 'WARNING:'
733 warning_message = '%s %s' % (_msg_header, message)
734 self.to_stderr(warning_message)
735
736 def report_error(self, message, tb=None):
737 '''
738 Do the same as trouble, but prefixes the message with 'ERROR:', colored
739 in red if stderr is a tty file.
740 '''
741 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
742 _msg_header = '\033[0;31mERROR:\033[0m'
743 else:
744 _msg_header = 'ERROR:'
745 error_message = '%s %s' % (_msg_header, message)
746 self.trouble(error_message, tb)
747
748 def report_file_already_downloaded(self, file_name):
749 """Report file has already been fully downloaded."""
750 try:
751 self.to_screen('[download] %s has already been downloaded' % file_name)
752 except UnicodeEncodeError:
753 self.to_screen('[download] The file has already been downloaded')
754
755 def report_file_delete(self, file_name):
756 """Report that existing file will be deleted."""
757 try:
758 self.to_screen('Deleting existing file %s' % file_name)
759 except UnicodeEncodeError:
760 self.to_screen('Deleting existing file')
761
762 def parse_outtmpl(self):
763 outtmpl_dict = self.params.get('outtmpl', {})
764 if not isinstance(outtmpl_dict, dict):
765 outtmpl_dict = {'default': outtmpl_dict}
766 outtmpl_dict.update({
767 k: v for k, v in DEFAULT_OUTTMPL.items()
768 if not outtmpl_dict.get(k)})
769 for key, val in outtmpl_dict.items():
770 if isinstance(val, bytes):
771 self.report_warning(
772 'Parameter outtmpl is bytes, but should be a unicode string. '
773 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
774 return outtmpl_dict
775
776 def prepare_outtmpl(self, outtmpl, info_dict, sanitize=None):
777 """ Make the template and info_dict suitable for substitution (outtmpl % info_dict)"""
778 template_dict = dict(info_dict)
779
780 # duration_string
781 template_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
782 formatSeconds(info_dict['duration'], '-')
783 if info_dict.get('duration', None) is not None
784 else None)
785
786 # epoch
787 template_dict['epoch'] = int(time.time())
788
789 # autonumber
790 autonumber_size = self.params.get('autonumber_size')
791 if autonumber_size is None:
792 autonumber_size = 5
793 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
794
795 # resolution if not defined
796 if template_dict.get('resolution') is None:
797 if template_dict.get('width') and template_dict.get('height'):
798 template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
799 elif template_dict.get('height'):
800 template_dict['resolution'] = '%sp' % template_dict['height']
801 elif template_dict.get('width'):
802 template_dict['resolution'] = '%dx?' % template_dict['width']
803
804 if sanitize is None:
805 sanitize = lambda k, v: v
806 template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
807 for k, v in template_dict.items()
808 if v is not None and not isinstance(v, (list, tuple, dict)))
809 na = self.params.get('outtmpl_na_placeholder', 'NA')
810 template_dict = collections.defaultdict(lambda: na, template_dict)
811
812 # For fields playlist_index and autonumber convert all occurrences
813 # of %(field)s to %(field)0Nd for backward compatibility
814 field_size_compat_map = {
815 'playlist_index': len(str(template_dict['n_entries'])),
816 'autonumber': autonumber_size,
817 }
818 FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
819 mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
820 if mobj:
821 outtmpl = re.sub(
822 FIELD_SIZE_COMPAT_RE,
823 r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
824 outtmpl)
825
826 numeric_fields = list(self._NUMERIC_FIELDS)
827
828 # Format date
829 FORMAT_DATE_RE = FORMAT_RE.format(r'(?P<key>(?P<field>\w+)>(?P<format>.+?))')
830 for mobj in re.finditer(FORMAT_DATE_RE, outtmpl):
831 conv_type, field, frmt, key = mobj.group('type', 'field', 'format', 'key')
832 if key in template_dict:
833 continue
834 value = strftime_or_none(template_dict.get(field), frmt, na)
835 if conv_type in 'crs': # string
836 value = sanitize(field, value)
837 else: # number
838 numeric_fields.append(key)
839 value = float_or_none(value, default=None)
840 if value is not None:
841 template_dict[key] = value
842
843 # Missing numeric fields used together with integer presentation types
844 # in format specification will break the argument substitution since
845 # string NA placeholder is returned for missing fields. We will patch
846 # output template for missing fields to meet string presentation type.
847 for numeric_field in numeric_fields:
848 if numeric_field not in template_dict:
849 outtmpl = re.sub(
850 FORMAT_RE.format(re.escape(numeric_field)),
851 r'%({0})s'.format(numeric_field), outtmpl)
852
853 return outtmpl, template_dict
854
855 def _prepare_filename(self, info_dict, tmpl_type='default'):
856 try:
857 sanitize = lambda k, v: sanitize_filename(
858 compat_str(v),
859 restricted=self.params.get('restrictfilenames'),
860 is_id=(k == 'id' or k.endswith('_id')))
861 outtmpl = self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default'])
862 outtmpl, template_dict = self.prepare_outtmpl(outtmpl, info_dict, sanitize)
863
864 # expand_path translates '%%' into '%' and '$$' into '$'
865 # correspondingly that is not what we want since we need to keep
866 # '%%' intact for template dict substitution step. Working around
867 # with boundary-alike separator hack.
868 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
869 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
870
871 # outtmpl should be expand_path'ed before template dict substitution
872 # because meta fields may contain env variables we don't want to
873 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
874 # title "Hello $PATH", we don't want `$PATH` to be expanded.
875 filename = expand_path(outtmpl).replace(sep, '') % template_dict
876
877 force_ext = OUTTMPL_TYPES.get(tmpl_type)
878 if force_ext is not None:
879 filename = replace_extension(filename, force_ext, template_dict.get('ext'))
880
881 # https://github.com/blackjack4494/youtube-dlc/issues/85
882 trim_file_name = self.params.get('trim_file_name', False)
883 if trim_file_name:
884 fn_groups = filename.rsplit('.')
885 ext = fn_groups[-1]
886 sub_ext = ''
887 if len(fn_groups) > 2:
888 sub_ext = fn_groups[-2]
889 filename = '.'.join(filter(None, [fn_groups[0][:trim_file_name], sub_ext, ext]))
890
891 return filename
892 except ValueError as err:
893 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
894 return None
895
896 def prepare_filename(self, info_dict, dir_type='', warn=False):
897 """Generate the output filename."""
898 paths = self.params.get('paths', {})
899 assert isinstance(paths, dict)
900 filename = self._prepare_filename(info_dict, dir_type or 'default')
901
902 if warn and not self.__prepare_filename_warned:
903 if not paths:
904 pass
905 elif filename == '-':
906 self.report_warning('--paths is ignored when an outputting to stdout')
907 elif os.path.isabs(filename):
908 self.report_warning('--paths is ignored since an absolute path is given in output template')
909 self.__prepare_filename_warned = True
910 if filename == '-' or not filename:
911 return filename
912
913 homepath = expand_path(paths.get('home', '').strip())
914 assert isinstance(homepath, compat_str)
915 subdir = expand_path(paths.get(dir_type, '').strip()) if dir_type else ''
916 assert isinstance(subdir, compat_str)
917 path = os.path.join(homepath, subdir, filename)
918
919 # Temporary fix for #4787
920 # 'Treat' all problem characters by passing filename through preferredencoding
921 # to workaround encoding issues with subprocess on python2 @ Windows
922 if sys.version_info < (3, 0) and sys.platform == 'win32':
923 path = encodeFilename(path, True).decode(preferredencoding())
924 return sanitize_path(path, force=self.params.get('windowsfilenames'))
925
926 def _match_entry(self, info_dict, incomplete):
927 """ Returns None if the file should be downloaded """
928
929 def check_filter():
930 video_title = info_dict.get('title', info_dict.get('id', 'video'))
931 if 'title' in info_dict:
932 # This can happen when we're just evaluating the playlist
933 title = info_dict['title']
934 matchtitle = self.params.get('matchtitle', False)
935 if matchtitle:
936 if not re.search(matchtitle, title, re.IGNORECASE):
937 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
938 rejecttitle = self.params.get('rejecttitle', False)
939 if rejecttitle:
940 if re.search(rejecttitle, title, re.IGNORECASE):
941 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
942 date = info_dict.get('upload_date')
943 if date is not None:
944 dateRange = self.params.get('daterange', DateRange())
945 if date not in dateRange:
946 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
947 view_count = info_dict.get('view_count')
948 if view_count is not None:
949 min_views = self.params.get('min_views')
950 if min_views is not None and view_count < min_views:
951 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
952 max_views = self.params.get('max_views')
953 if max_views is not None and view_count > max_views:
954 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
955 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
956 return 'Skipping "%s" because it is age restricted' % video_title
957 if self.in_download_archive(info_dict):
958 return '%s has already been recorded in archive' % video_title
959
960 if not incomplete:
961 match_filter = self.params.get('match_filter')
962 if match_filter is not None:
963 ret = match_filter(info_dict)
964 if ret is not None:
965 return ret
966 return None
967
968 reason = check_filter()
969 if reason is not None:
970 self.to_screen('[download] ' + reason)
971 if reason.endswith('has already been recorded in the archive') and self.params.get('break_on_existing', False):
972 raise ExistingVideoReached()
973 elif self.params.get('break_on_reject', False):
974 raise RejectedVideoReached()
975 return reason
976
977 @staticmethod
978 def add_extra_info(info_dict, extra_info):
979 '''Set the keys from extra_info in info dict if they are missing'''
980 for key, value in extra_info.items():
981 info_dict.setdefault(key, value)
982
983 def extract_info(self, url, download=True, ie_key=None, info_dict=None, extra_info={},
984 process=True, force_generic_extractor=False):
985 '''
986 Returns a list with a dictionary for each video we find.
987 If 'download', also downloads the videos.
988 extra_info is a dict containing the extra values to add to each result
989 '''
990
991 if not ie_key and force_generic_extractor:
992 ie_key = 'Generic'
993
994 if ie_key:
995 ies = [self.get_info_extractor(ie_key)]
996 else:
997 ies = self._ies
998
999 for ie in ies:
1000 if not ie.suitable(url):
1001 continue
1002
1003 ie_key = ie.ie_key()
1004 ie = self.get_info_extractor(ie_key)
1005 if not ie.working():
1006 self.report_warning('The program functionality for this site has been marked as broken, '
1007 'and will probably not work.')
1008
1009 try:
1010 temp_id = str_or_none(
1011 ie.extract_id(url) if callable(getattr(ie, 'extract_id', None))
1012 else ie._match_id(url))
1013 except (AssertionError, IndexError, AttributeError):
1014 temp_id = None
1015 if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
1016 self.to_screen("[%s] %s: has already been recorded in archive" % (
1017 ie_key, temp_id))
1018 break
1019 return self.__extract_info(url, ie, download, extra_info, process, info_dict)
1020 else:
1021 self.report_error('no suitable InfoExtractor for URL %s' % url)
1022
1023 def __handle_extraction_exceptions(func):
1024 def wrapper(self, *args, **kwargs):
1025 try:
1026 return func(self, *args, **kwargs)
1027 except GeoRestrictedError as e:
1028 msg = e.msg
1029 if e.countries:
1030 msg += '\nThis video is available in %s.' % ', '.join(
1031 map(ISO3166Utils.short2full, e.countries))
1032 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1033 self.report_error(msg)
1034 except ExtractorError as e: # An error we somewhat expected
1035 self.report_error(compat_str(e), e.format_traceback())
1036 except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
1037 raise
1038 except Exception as e:
1039 if self.params.get('ignoreerrors', False):
1040 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
1041 else:
1042 raise
1043 return wrapper
1044
1045 @__handle_extraction_exceptions
1046 def __extract_info(self, url, ie, download, extra_info, process, info_dict):
1047 ie_result = ie.extract(url)
1048 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1049 return
1050 if isinstance(ie_result, list):
1051 # Backwards compatibility: old IE result format
1052 ie_result = {
1053 '_type': 'compat_list',
1054 'entries': ie_result,
1055 }
1056 if info_dict:
1057 if info_dict.get('id'):
1058 ie_result['id'] = info_dict['id']
1059 if info_dict.get('title'):
1060 ie_result['title'] = info_dict['title']
1061 self.add_default_extra_info(ie_result, ie, url)
1062 if process:
1063 return self.process_ie_result(ie_result, download, extra_info)
1064 else:
1065 return ie_result
1066
1067 def add_default_extra_info(self, ie_result, ie, url):
1068 self.add_extra_info(ie_result, {
1069 'extractor': ie.IE_NAME,
1070 'webpage_url': url,
1071 'webpage_url_basename': url_basename(url),
1072 'extractor_key': ie.ie_key(),
1073 })
1074
1075 def process_ie_result(self, ie_result, download=True, extra_info={}):
1076 """
1077 Take the result of the ie(may be modified) and resolve all unresolved
1078 references (URLs, playlist items).
1079
1080 It will also download the videos if 'download'.
1081 Returns the resolved ie_result.
1082 """
1083 result_type = ie_result.get('_type', 'video')
1084
1085 if result_type in ('url', 'url_transparent'):
1086 ie_result['url'] = sanitize_url(ie_result['url'])
1087 extract_flat = self.params.get('extract_flat', False)
1088 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
1089 or extract_flat is True):
1090 self.__forced_printings(ie_result, self.prepare_filename(ie_result), incomplete=True)
1091 return ie_result
1092
1093 if result_type == 'video':
1094 self.add_extra_info(ie_result, extra_info)
1095 return self.process_video_result(ie_result, download=download)
1096 elif result_type == 'url':
1097 # We have to add extra_info to the results because it may be
1098 # contained in a playlist
1099 return self.extract_info(ie_result['url'],
1100 download, info_dict=ie_result,
1101 ie_key=ie_result.get('ie_key'),
1102 extra_info=extra_info)
1103 elif result_type == 'url_transparent':
1104 # Use the information from the embedding page
1105 info = self.extract_info(
1106 ie_result['url'], ie_key=ie_result.get('ie_key'),
1107 extra_info=extra_info, download=False, process=False)
1108
1109 # extract_info may return None when ignoreerrors is enabled and
1110 # extraction failed with an error, don't crash and return early
1111 # in this case
1112 if not info:
1113 return info
1114
1115 force_properties = dict(
1116 (k, v) for k, v in ie_result.items() if v is not None)
1117 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
1118 if f in force_properties:
1119 del force_properties[f]
1120 new_result = info.copy()
1121 new_result.update(force_properties)
1122
1123 # Extracted info may not be a video result (i.e.
1124 # info.get('_type', 'video') != video) but rather an url or
1125 # url_transparent. In such cases outer metadata (from ie_result)
1126 # should be propagated to inner one (info). For this to happen
1127 # _type of info should be overridden with url_transparent. This
1128 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1129 if new_result.get('_type') == 'url':
1130 new_result['_type'] = 'url_transparent'
1131
1132 return self.process_ie_result(
1133 new_result, download=download, extra_info=extra_info)
1134 elif result_type in ('playlist', 'multi_video'):
1135 # Protect from infinite recursion due to recursively nested playlists
1136 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1137 webpage_url = ie_result['webpage_url']
1138 if webpage_url in self._playlist_urls:
1139 self.to_screen(
1140 '[download] Skipping already downloaded playlist: %s'
1141 % ie_result.get('title') or ie_result.get('id'))
1142 return
1143
1144 self._playlist_level += 1
1145 self._playlist_urls.add(webpage_url)
1146 try:
1147 return self.__process_playlist(ie_result, download)
1148 finally:
1149 self._playlist_level -= 1
1150 if not self._playlist_level:
1151 self._playlist_urls.clear()
1152 elif result_type == 'compat_list':
1153 self.report_warning(
1154 'Extractor %s returned a compat_list result. '
1155 'It needs to be updated.' % ie_result.get('extractor'))
1156
1157 def _fixup(r):
1158 self.add_extra_info(
1159 r,
1160 {
1161 'extractor': ie_result['extractor'],
1162 'webpage_url': ie_result['webpage_url'],
1163 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1164 'extractor_key': ie_result['extractor_key'],
1165 }
1166 )
1167 return r
1168 ie_result['entries'] = [
1169 self.process_ie_result(_fixup(r), download, extra_info)
1170 for r in ie_result['entries']
1171 ]
1172 return ie_result
1173 else:
1174 raise Exception('Invalid result type: %s' % result_type)
1175
1176 def _ensure_dir_exists(self, path):
1177 return make_dir(path, self.report_error)
1178
1179 def __process_playlist(self, ie_result, download):
1180 # We process each entry in the playlist
1181 playlist = ie_result.get('title') or ie_result.get('id')
1182 self.to_screen('[download] Downloading playlist: %s' % playlist)
1183
1184 if 'entries' not in ie_result:
1185 raise EntryNotInPlaylist()
1186 incomplete_entries = bool(ie_result.get('requested_entries'))
1187 if incomplete_entries:
1188 def fill_missing_entries(entries, indexes):
1189 ret = [None] * max(*indexes)
1190 for i, entry in zip(indexes, entries):
1191 ret[i - 1] = entry
1192 return ret
1193 ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
1194
1195 playlist_results = []
1196
1197 playliststart = self.params.get('playliststart', 1) - 1
1198 playlistend = self.params.get('playlistend')
1199 # For backwards compatibility, interpret -1 as whole list
1200 if playlistend == -1:
1201 playlistend = None
1202
1203 playlistitems_str = self.params.get('playlist_items')
1204 playlistitems = None
1205 if playlistitems_str is not None:
1206 def iter_playlistitems(format):
1207 for string_segment in format.split(','):
1208 if '-' in string_segment:
1209 start, end = string_segment.split('-')
1210 for item in range(int(start), int(end) + 1):
1211 yield int(item)
1212 else:
1213 yield int(string_segment)
1214 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
1215
1216 ie_entries = ie_result['entries']
1217
1218 def make_playlistitems_entries(list_ie_entries):
1219 num_entries = len(list_ie_entries)
1220 for i in playlistitems:
1221 if -num_entries < i <= num_entries:
1222 yield list_ie_entries[i - 1]
1223 elif incomplete_entries:
1224 raise EntryNotInPlaylist()
1225
1226 if isinstance(ie_entries, list):
1227 n_all_entries = len(ie_entries)
1228 if playlistitems:
1229 entries = list(make_playlistitems_entries(ie_entries))
1230 else:
1231 entries = ie_entries[playliststart:playlistend]
1232 n_entries = len(entries)
1233 msg = 'Collected %d videos; downloading %d of them' % (n_all_entries, n_entries)
1234 elif isinstance(ie_entries, PagedList):
1235 if playlistitems:
1236 entries = []
1237 for item in playlistitems:
1238 entries.extend(ie_entries.getslice(
1239 item - 1, item
1240 ))
1241 else:
1242 entries = ie_entries.getslice(
1243 playliststart, playlistend)
1244 n_entries = len(entries)
1245 msg = 'Downloading %d videos' % n_entries
1246 else: # iterable
1247 if playlistitems:
1248 entries = list(make_playlistitems_entries(list(itertools.islice(
1249 ie_entries, 0, max(playlistitems)))))
1250 else:
1251 entries = list(itertools.islice(
1252 ie_entries, playliststart, playlistend))
1253 n_entries = len(entries)
1254 msg = 'Downloading %d videos' % n_entries
1255
1256 if any((entry is None for entry in entries)):
1257 raise EntryNotInPlaylist()
1258 if not playlistitems and (playliststart or playlistend):
1259 playlistitems = list(range(1 + playliststart, 1 + playliststart + len(entries)))
1260 ie_result['entries'] = entries
1261 ie_result['requested_entries'] = playlistitems
1262
1263 if self.params.get('allow_playlist_files', True):
1264 ie_copy = {
1265 'playlist': playlist,
1266 'playlist_id': ie_result.get('id'),
1267 'playlist_title': ie_result.get('title'),
1268 'playlist_uploader': ie_result.get('uploader'),
1269 'playlist_uploader_id': ie_result.get('uploader_id'),
1270 'playlist_index': 0
1271 }
1272 ie_copy.update(dict(ie_result))
1273
1274 if self.params.get('writeinfojson', False):
1275 infofn = self.prepare_filename(ie_copy, 'pl_infojson')
1276 if not self._ensure_dir_exists(encodeFilename(infofn)):
1277 return
1278 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
1279 self.to_screen('[info] Playlist metadata is already present')
1280 else:
1281 self.to_screen('[info] Writing playlist metadata as JSON to: ' + infofn)
1282 try:
1283 write_json_file(self.filter_requested_info(ie_result, self.params.get('clean_infojson', True)), infofn)
1284 except (OSError, IOError):
1285 self.report_error('Cannot write playlist metadata to JSON file ' + infofn)
1286
1287 if self.params.get('writedescription', False):
1288 descfn = self.prepare_filename(ie_copy, 'pl_description')
1289 if not self._ensure_dir_exists(encodeFilename(descfn)):
1290 return
1291 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
1292 self.to_screen('[info] Playlist description is already present')
1293 elif ie_result.get('description') is None:
1294 self.report_warning('There\'s no playlist description to write.')
1295 else:
1296 try:
1297 self.to_screen('[info] Writing playlist description to: ' + descfn)
1298 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1299 descfile.write(ie_result['description'])
1300 except (OSError, IOError):
1301 self.report_error('Cannot write playlist description file ' + descfn)
1302 return
1303
1304 if self.params.get('playlistreverse', False):
1305 entries = entries[::-1]
1306 if self.params.get('playlistrandom', False):
1307 random.shuffle(entries)
1308
1309 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
1310
1311 self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg))
1312 for i, entry in enumerate(entries, 1):
1313 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
1314 # This __x_forwarded_for_ip thing is a bit ugly but requires
1315 # minimal changes
1316 if x_forwarded_for:
1317 entry['__x_forwarded_for_ip'] = x_forwarded_for
1318 extra = {
1319 'n_entries': n_entries,
1320 'playlist': playlist,
1321 'playlist_id': ie_result.get('id'),
1322 'playlist_title': ie_result.get('title'),
1323 'playlist_uploader': ie_result.get('uploader'),
1324 'playlist_uploader_id': ie_result.get('uploader_id'),
1325 'playlist_index': playlistitems[i - 1] if playlistitems else i,
1326 'extractor': ie_result['extractor'],
1327 'webpage_url': ie_result['webpage_url'],
1328 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1329 'extractor_key': ie_result['extractor_key'],
1330 }
1331
1332 if self._match_entry(entry, incomplete=True) is not None:
1333 continue
1334
1335 entry_result = self.__process_iterable_entry(entry, download, extra)
1336 # TODO: skip failed (empty) entries?
1337 playlist_results.append(entry_result)
1338 ie_result['entries'] = playlist_results
1339 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1340 return ie_result
1341
1342 @__handle_extraction_exceptions
1343 def __process_iterable_entry(self, entry, download, extra_info):
1344 return self.process_ie_result(
1345 entry, download=download, extra_info=extra_info)
1346
1347 def _build_format_filter(self, filter_spec):
1348 " Returns a function to filter the formats according to the filter_spec "
1349
1350 OPERATORS = {
1351 '<': operator.lt,
1352 '<=': operator.le,
1353 '>': operator.gt,
1354 '>=': operator.ge,
1355 '=': operator.eq,
1356 '!=': operator.ne,
1357 }
1358 operator_rex = re.compile(r'''(?x)\s*
1359 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
1360 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1361 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
1362 $
1363 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1364 m = operator_rex.search(filter_spec)
1365 if m:
1366 try:
1367 comparison_value = int(m.group('value'))
1368 except ValueError:
1369 comparison_value = parse_filesize(m.group('value'))
1370 if comparison_value is None:
1371 comparison_value = parse_filesize(m.group('value') + 'B')
1372 if comparison_value is None:
1373 raise ValueError(
1374 'Invalid value %r in format specification %r' % (
1375 m.group('value'), filter_spec))
1376 op = OPERATORS[m.group('op')]
1377
1378 if not m:
1379 STR_OPERATORS = {
1380 '=': operator.eq,
1381 '^=': lambda attr, value: attr.startswith(value),
1382 '$=': lambda attr, value: attr.endswith(value),
1383 '*=': lambda attr, value: value in attr,
1384 }
1385 str_operator_rex = re.compile(r'''(?x)
1386 \s*(?P<key>[a-zA-Z0-9._-]+)
1387 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
1388 \s*(?P<value>[a-zA-Z0-9._-]+)
1389 \s*$
1390 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1391 m = str_operator_rex.search(filter_spec)
1392 if m:
1393 comparison_value = m.group('value')
1394 str_op = STR_OPERATORS[m.group('op')]
1395 if m.group('negation'):
1396 op = lambda attr, value: not str_op(attr, value)
1397 else:
1398 op = str_op
1399
1400 if not m:
1401 raise ValueError('Invalid filter specification %r' % filter_spec)
1402
1403 def _filter(f):
1404 actual_value = f.get(m.group('key'))
1405 if actual_value is None:
1406 return m.group('none_inclusive')
1407 return op(actual_value, comparison_value)
1408 return _filter
1409
1410 def _default_format_spec(self, info_dict, download=True):
1411
1412 def can_merge():
1413 merger = FFmpegMergerPP(self)
1414 return merger.available and merger.can_merge()
1415
1416 prefer_best = (
1417 not self.params.get('simulate', False)
1418 and download
1419 and (
1420 not can_merge()
1421 or info_dict.get('is_live', False)
1422 or self.outtmpl_dict['default'] == '-'))
1423
1424 return (
1425 'best/bestvideo+bestaudio'
1426 if prefer_best
1427 else 'bestvideo*+bestaudio/best'
1428 if not self.params.get('allow_multiple_audio_streams', False)
1429 else 'bestvideo+bestaudio/best')
1430
1431 def build_format_selector(self, format_spec):
1432 def syntax_error(note, start):
1433 message = (
1434 'Invalid format specification: '
1435 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1436 return SyntaxError(message)
1437
1438 PICKFIRST = 'PICKFIRST'
1439 MERGE = 'MERGE'
1440 SINGLE = 'SINGLE'
1441 GROUP = 'GROUP'
1442 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1443
1444 allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
1445 'video': self.params.get('allow_multiple_video_streams', False)}
1446
1447 def _parse_filter(tokens):
1448 filter_parts = []
1449 for type, string, start, _, _ in tokens:
1450 if type == tokenize.OP and string == ']':
1451 return ''.join(filter_parts)
1452 else:
1453 filter_parts.append(string)
1454
1455 def _remove_unused_ops(tokens):
1456 # Remove operators that we don't use and join them with the surrounding strings
1457 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1458 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1459 last_string, last_start, last_end, last_line = None, None, None, None
1460 for type, string, start, end, line in tokens:
1461 if type == tokenize.OP and string == '[':
1462 if last_string:
1463 yield tokenize.NAME, last_string, last_start, last_end, last_line
1464 last_string = None
1465 yield type, string, start, end, line
1466 # everything inside brackets will be handled by _parse_filter
1467 for type, string, start, end, line in tokens:
1468 yield type, string, start, end, line
1469 if type == tokenize.OP and string == ']':
1470 break
1471 elif type == tokenize.OP and string in ALLOWED_OPS:
1472 if last_string:
1473 yield tokenize.NAME, last_string, last_start, last_end, last_line
1474 last_string = None
1475 yield type, string, start, end, line
1476 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1477 if not last_string:
1478 last_string = string
1479 last_start = start
1480 last_end = end
1481 else:
1482 last_string += string
1483 if last_string:
1484 yield tokenize.NAME, last_string, last_start, last_end, last_line
1485
1486 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
1487 selectors = []
1488 current_selector = None
1489 for type, string, start, _, _ in tokens:
1490 # ENCODING is only defined in python 3.x
1491 if type == getattr(tokenize, 'ENCODING', None):
1492 continue
1493 elif type in [tokenize.NAME, tokenize.NUMBER]:
1494 current_selector = FormatSelector(SINGLE, string, [])
1495 elif type == tokenize.OP:
1496 if string == ')':
1497 if not inside_group:
1498 # ')' will be handled by the parentheses group
1499 tokens.restore_last_token()
1500 break
1501 elif inside_merge and string in ['/', ',']:
1502 tokens.restore_last_token()
1503 break
1504 elif inside_choice and string == ',':
1505 tokens.restore_last_token()
1506 break
1507 elif string == ',':
1508 if not current_selector:
1509 raise syntax_error('"," must follow a format selector', start)
1510 selectors.append(current_selector)
1511 current_selector = None
1512 elif string == '/':
1513 if not current_selector:
1514 raise syntax_error('"/" must follow a format selector', start)
1515 first_choice = current_selector
1516 second_choice = _parse_format_selection(tokens, inside_choice=True)
1517 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
1518 elif string == '[':
1519 if not current_selector:
1520 current_selector = FormatSelector(SINGLE, 'best', [])
1521 format_filter = _parse_filter(tokens)
1522 current_selector.filters.append(format_filter)
1523 elif string == '(':
1524 if current_selector:
1525 raise syntax_error('Unexpected "("', start)
1526 group = _parse_format_selection(tokens, inside_group=True)
1527 current_selector = FormatSelector(GROUP, group, [])
1528 elif string == '+':
1529 if not current_selector:
1530 raise syntax_error('Unexpected "+"', start)
1531 selector_1 = current_selector
1532 selector_2 = _parse_format_selection(tokens, inside_merge=True)
1533 if not selector_2:
1534 raise syntax_error('Expected a selector', start)
1535 current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
1536 else:
1537 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1538 elif type == tokenize.ENDMARKER:
1539 break
1540 if current_selector:
1541 selectors.append(current_selector)
1542 return selectors
1543
1544 def _build_selector_function(selector):
1545 if isinstance(selector, list): # ,
1546 fs = [_build_selector_function(s) for s in selector]
1547
1548 def selector_function(ctx):
1549 for f in fs:
1550 for format in f(ctx):
1551 yield format
1552 return selector_function
1553
1554 elif selector.type == GROUP: # ()
1555 selector_function = _build_selector_function(selector.selector)
1556
1557 elif selector.type == PICKFIRST: # /
1558 fs = [_build_selector_function(s) for s in selector.selector]
1559
1560 def selector_function(ctx):
1561 for f in fs:
1562 picked_formats = list(f(ctx))
1563 if picked_formats:
1564 return picked_formats
1565 return []
1566
1567 elif selector.type == SINGLE: # atom
1568 format_spec = selector.selector if selector.selector is not None else 'best'
1569
1570 if format_spec == 'all':
1571 def selector_function(ctx):
1572 formats = list(ctx['formats'])
1573 if formats:
1574 for f in formats:
1575 yield f
1576
1577 else:
1578 format_fallback = False
1579 format_spec_obj = re.match(r'(best|worst|b|w)(video|audio|v|a)?(\*)?$', format_spec)
1580 if format_spec_obj is not None:
1581 format_idx = 0 if format_spec_obj.group(1)[0] == 'w' else -1
1582 format_type = format_spec_obj.group(2)[0] if format_spec_obj.group(2) else False
1583 not_format_type = 'v' if format_type == 'a' else 'a'
1584 format_modified = format_spec_obj.group(3) is not None
1585
1586 format_fallback = not format_type and not format_modified # for b, w
1587 filter_f = ((lambda f: f.get(format_type + 'codec') != 'none')
1588 if format_type and format_modified # bv*, ba*, wv*, wa*
1589 else (lambda f: f.get(not_format_type + 'codec') == 'none')
1590 if format_type # bv, ba, wv, wa
1591 else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
1592 if not format_modified # b, w
1593 else None) # b*, w*
1594 else:
1595 format_idx = -1
1596 filter_f = ((lambda f: f.get('ext') == format_spec)
1597 if format_spec in ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav'] # extension
1598 else (lambda f: f.get('format_id') == format_spec)) # id
1599
1600 def selector_function(ctx):
1601 formats = list(ctx['formats'])
1602 if not formats:
1603 return
1604 matches = list(filter(filter_f, formats)) if filter_f is not None else formats
1605 if matches:
1606 yield matches[format_idx]
1607 elif format_fallback == 'force' or (format_fallback and ctx['incomplete_formats']):
1608 # for extractors with incomplete formats (audio only (soundcloud)
1609 # or video only (imgur)) best/worst will fallback to
1610 # best/worst {video,audio}-only format
1611 yield formats[format_idx]
1612
1613 elif selector.type == MERGE: # +
1614 def _merge(formats_pair):
1615 format_1, format_2 = formats_pair
1616
1617 formats_info = []
1618 formats_info.extend(format_1.get('requested_formats', (format_1,)))
1619 formats_info.extend(format_2.get('requested_formats', (format_2,)))
1620
1621 if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
1622 get_no_more = {"video": False, "audio": False}
1623 for (i, fmt_info) in enumerate(formats_info):
1624 for aud_vid in ["audio", "video"]:
1625 if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
1626 if get_no_more[aud_vid]:
1627 formats_info.pop(i)
1628 get_no_more[aud_vid] = True
1629
1630 if len(formats_info) == 1:
1631 return formats_info[0]
1632
1633 video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
1634 audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
1635
1636 the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
1637 the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
1638
1639 output_ext = self.params.get('merge_output_format')
1640 if not output_ext:
1641 if the_only_video:
1642 output_ext = the_only_video['ext']
1643 elif the_only_audio and not video_fmts:
1644 output_ext = the_only_audio['ext']
1645 else:
1646 output_ext = 'mkv'
1647
1648 new_dict = {
1649 'requested_formats': formats_info,
1650 'format': '+'.join(fmt_info.get('format') for fmt_info in formats_info),
1651 'format_id': '+'.join(fmt_info.get('format_id') for fmt_info in formats_info),
1652 'ext': output_ext,
1653 }
1654
1655 if the_only_video:
1656 new_dict.update({
1657 'width': the_only_video.get('width'),
1658 'height': the_only_video.get('height'),
1659 'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
1660 'fps': the_only_video.get('fps'),
1661 'vcodec': the_only_video.get('vcodec'),
1662 'vbr': the_only_video.get('vbr'),
1663 'stretched_ratio': the_only_video.get('stretched_ratio'),
1664 })
1665
1666 if the_only_audio:
1667 new_dict.update({
1668 'acodec': the_only_audio.get('acodec'),
1669 'abr': the_only_audio.get('abr'),
1670 })
1671
1672 return new_dict
1673
1674 selector_1, selector_2 = map(_build_selector_function, selector.selector)
1675
1676 def selector_function(ctx):
1677 for pair in itertools.product(
1678 selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
1679 yield _merge(pair)
1680
1681 filters = [self._build_format_filter(f) for f in selector.filters]
1682
1683 def final_selector(ctx):
1684 ctx_copy = copy.deepcopy(ctx)
1685 for _filter in filters:
1686 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1687 return selector_function(ctx_copy)
1688 return final_selector
1689
1690 stream = io.BytesIO(format_spec.encode('utf-8'))
1691 try:
1692 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
1693 except tokenize.TokenError:
1694 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1695
1696 class TokenIterator(object):
1697 def __init__(self, tokens):
1698 self.tokens = tokens
1699 self.counter = 0
1700
1701 def __iter__(self):
1702 return self
1703
1704 def __next__(self):
1705 if self.counter >= len(self.tokens):
1706 raise StopIteration()
1707 value = self.tokens[self.counter]
1708 self.counter += 1
1709 return value
1710
1711 next = __next__
1712
1713 def restore_last_token(self):
1714 self.counter -= 1
1715
1716 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
1717 return _build_selector_function(parsed_selector)
1718
1719 def _calc_headers(self, info_dict):
1720 res = std_headers.copy()
1721
1722 add_headers = info_dict.get('http_headers')
1723 if add_headers:
1724 res.update(add_headers)
1725
1726 cookies = self._calc_cookies(info_dict)
1727 if cookies:
1728 res['Cookie'] = cookies
1729
1730 if 'X-Forwarded-For' not in res:
1731 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
1732 if x_forwarded_for_ip:
1733 res['X-Forwarded-For'] = x_forwarded_for_ip
1734
1735 return res
1736
1737 def _calc_cookies(self, info_dict):
1738 pr = sanitized_Request(info_dict['url'])
1739 self.cookiejar.add_cookie_header(pr)
1740 return pr.get_header('Cookie')
1741
1742 def process_video_result(self, info_dict, download=True):
1743 assert info_dict.get('_type', 'video') == 'video'
1744
1745 if 'id' not in info_dict:
1746 raise ExtractorError('Missing "id" field in extractor result')
1747 if 'title' not in info_dict:
1748 raise ExtractorError('Missing "title" field in extractor result')
1749
1750 def report_force_conversion(field, field_not, conversion):
1751 self.report_warning(
1752 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
1753 % (field, field_not, conversion))
1754
1755 def sanitize_string_field(info, string_field):
1756 field = info.get(string_field)
1757 if field is None or isinstance(field, compat_str):
1758 return
1759 report_force_conversion(string_field, 'a string', 'string')
1760 info[string_field] = compat_str(field)
1761
1762 def sanitize_numeric_fields(info):
1763 for numeric_field in self._NUMERIC_FIELDS:
1764 field = info.get(numeric_field)
1765 if field is None or isinstance(field, compat_numeric_types):
1766 continue
1767 report_force_conversion(numeric_field, 'numeric', 'int')
1768 info[numeric_field] = int_or_none(field)
1769
1770 sanitize_string_field(info_dict, 'id')
1771 sanitize_numeric_fields(info_dict)
1772
1773 if 'playlist' not in info_dict:
1774 # It isn't part of a playlist
1775 info_dict['playlist'] = None
1776 info_dict['playlist_index'] = None
1777
1778 thumbnails = info_dict.get('thumbnails')
1779 if thumbnails is None:
1780 thumbnail = info_dict.get('thumbnail')
1781 if thumbnail:
1782 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
1783 if thumbnails:
1784 thumbnails.sort(key=lambda t: (
1785 t.get('preference') if t.get('preference') is not None else -1,
1786 t.get('width') if t.get('width') is not None else -1,
1787 t.get('height') if t.get('height') is not None else -1,
1788 t.get('id') if t.get('id') is not None else '', t.get('url')))
1789 for i, t in enumerate(thumbnails):
1790 t['url'] = sanitize_url(t['url'])
1791 if t.get('width') and t.get('height'):
1792 t['resolution'] = '%dx%d' % (t['width'], t['height'])
1793 if t.get('id') is None:
1794 t['id'] = '%d' % i
1795
1796 if self.params.get('list_thumbnails'):
1797 self.list_thumbnails(info_dict)
1798 return
1799
1800 thumbnail = info_dict.get('thumbnail')
1801 if thumbnail:
1802 info_dict['thumbnail'] = sanitize_url(thumbnail)
1803 elif thumbnails:
1804 info_dict['thumbnail'] = thumbnails[-1]['url']
1805
1806 if 'display_id' not in info_dict and 'id' in info_dict:
1807 info_dict['display_id'] = info_dict['id']
1808
1809 for ts_key, date_key in (
1810 ('timestamp', 'upload_date'),
1811 ('release_timestamp', 'release_date'),
1812 ):
1813 if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
1814 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
1815 # see http://bugs.python.org/issue1646728)
1816 try:
1817 upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
1818 info_dict[date_key] = upload_date.strftime('%Y%m%d')
1819 except (ValueError, OverflowError, OSError):
1820 pass
1821
1822 # Auto generate title fields corresponding to the *_number fields when missing
1823 # in order to always have clean titles. This is very common for TV series.
1824 for field in ('chapter', 'season', 'episode'):
1825 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
1826 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
1827
1828 for cc_kind in ('subtitles', 'automatic_captions'):
1829 cc = info_dict.get(cc_kind)
1830 if cc:
1831 for _, subtitle in cc.items():
1832 for subtitle_format in subtitle:
1833 if subtitle_format.get('url'):
1834 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
1835 if subtitle_format.get('ext') is None:
1836 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
1837
1838 automatic_captions = info_dict.get('automatic_captions')
1839 subtitles = info_dict.get('subtitles')
1840
1841 if self.params.get('listsubtitles', False):
1842 if 'automatic_captions' in info_dict:
1843 self.list_subtitles(
1844 info_dict['id'], automatic_captions, 'automatic captions')
1845 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
1846 return
1847
1848 info_dict['requested_subtitles'] = self.process_subtitles(
1849 info_dict['id'], subtitles, automatic_captions)
1850
1851 # We now pick which formats have to be downloaded
1852 if info_dict.get('formats') is None:
1853 # There's only one format available
1854 formats = [info_dict]
1855 else:
1856 formats = info_dict['formats']
1857
1858 if not formats:
1859 raise ExtractorError('No video formats found!')
1860
1861 def is_wellformed(f):
1862 url = f.get('url')
1863 if not url:
1864 self.report_warning(
1865 '"url" field is missing or empty - skipping format, '
1866 'there is an error in extractor')
1867 return False
1868 if isinstance(url, bytes):
1869 sanitize_string_field(f, 'url')
1870 return True
1871
1872 # Filter out malformed formats for better extraction robustness
1873 formats = list(filter(is_wellformed, formats))
1874
1875 formats_dict = {}
1876
1877 # We check that all the formats have the format and format_id fields
1878 for i, format in enumerate(formats):
1879 sanitize_string_field(format, 'format_id')
1880 sanitize_numeric_fields(format)
1881 format['url'] = sanitize_url(format['url'])
1882 if not format.get('format_id'):
1883 format['format_id'] = compat_str(i)
1884 else:
1885 # Sanitize format_id from characters used in format selector expression
1886 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
1887 format_id = format['format_id']
1888 if format_id not in formats_dict:
1889 formats_dict[format_id] = []
1890 formats_dict[format_id].append(format)
1891
1892 # Make sure all formats have unique format_id
1893 for format_id, ambiguous_formats in formats_dict.items():
1894 if len(ambiguous_formats) > 1:
1895 for i, format in enumerate(ambiguous_formats):
1896 format['format_id'] = '%s-%d' % (format_id, i)
1897
1898 for i, format in enumerate(formats):
1899 if format.get('format') is None:
1900 format['format'] = '{id} - {res}{note}'.format(
1901 id=format['format_id'],
1902 res=self.format_resolution(format),
1903 note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
1904 )
1905 # Automatically determine file extension if missing
1906 if format.get('ext') is None:
1907 format['ext'] = determine_ext(format['url']).lower()
1908 # Automatically determine protocol if missing (useful for format
1909 # selection purposes)
1910 if format.get('protocol') is None:
1911 format['protocol'] = determine_protocol(format)
1912 # Add HTTP headers, so that external programs can use them from the
1913 # json output
1914 full_format_info = info_dict.copy()
1915 full_format_info.update(format)
1916 format['http_headers'] = self._calc_headers(full_format_info)
1917 # Remove private housekeeping stuff
1918 if '__x_forwarded_for_ip' in info_dict:
1919 del info_dict['__x_forwarded_for_ip']
1920
1921 # TODO Central sorting goes here
1922
1923 if formats[0] is not info_dict:
1924 # only set the 'formats' fields if the original info_dict list them
1925 # otherwise we end up with a circular reference, the first (and unique)
1926 # element in the 'formats' field in info_dict is info_dict itself,
1927 # which can't be exported to json
1928 info_dict['formats'] = formats
1929 if self.params.get('listformats'):
1930 self.list_formats(info_dict)
1931 return
1932
1933 req_format = self.params.get('format')
1934 if req_format is None:
1935 req_format = self._default_format_spec(info_dict, download=download)
1936 if self.params.get('verbose'):
1937 self.to_screen('[debug] Default format spec: %s' % req_format)
1938
1939 format_selector = self.build_format_selector(req_format)
1940
1941 # While in format selection we may need to have an access to the original
1942 # format set in order to calculate some metrics or do some processing.
1943 # For now we need to be able to guess whether original formats provided
1944 # by extractor are incomplete or not (i.e. whether extractor provides only
1945 # video-only or audio-only formats) for proper formats selection for
1946 # extractors with such incomplete formats (see
1947 # https://github.com/ytdl-org/youtube-dl/pull/5556).
1948 # Since formats may be filtered during format selection and may not match
1949 # the original formats the results may be incorrect. Thus original formats
1950 # or pre-calculated metrics should be passed to format selection routines
1951 # as well.
1952 # We will pass a context object containing all necessary additional data
1953 # instead of just formats.
1954 # This fixes incorrect format selection issue (see
1955 # https://github.com/ytdl-org/youtube-dl/issues/10083).
1956 incomplete_formats = (
1957 # All formats are video-only or
1958 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
1959 # all formats are audio-only
1960 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
1961
1962 ctx = {
1963 'formats': formats,
1964 'incomplete_formats': incomplete_formats,
1965 }
1966
1967 formats_to_download = list(format_selector(ctx))
1968 if not formats_to_download:
1969 raise ExtractorError('requested format not available',
1970 expected=True)
1971
1972 if download:
1973 self.to_screen('[info] Downloading format(s) %s' % ", ".join([f['format_id'] for f in formats_to_download]))
1974 if len(formats_to_download) > 1:
1975 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
1976 for format in formats_to_download:
1977 new_info = dict(info_dict)
1978 new_info.update(format)
1979 self.process_info(new_info)
1980 # We update the info dict with the best quality format (backwards compatibility)
1981 info_dict.update(formats_to_download[-1])
1982 return info_dict
1983
1984 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
1985 """Select the requested subtitles and their format"""
1986 available_subs = {}
1987 if normal_subtitles and self.params.get('writesubtitles'):
1988 available_subs.update(normal_subtitles)
1989 if automatic_captions and self.params.get('writeautomaticsub'):
1990 for lang, cap_info in automatic_captions.items():
1991 if lang not in available_subs:
1992 available_subs[lang] = cap_info
1993
1994 if (not self.params.get('writesubtitles') and not
1995 self.params.get('writeautomaticsub') or not
1996 available_subs):
1997 return None
1998
1999 if self.params.get('allsubtitles', False):
2000 requested_langs = available_subs.keys()
2001 else:
2002 if self.params.get('subtitleslangs', False):
2003 requested_langs = self.params.get('subtitleslangs')
2004 elif 'en' in available_subs:
2005 requested_langs = ['en']
2006 else:
2007 requested_langs = [list(available_subs.keys())[0]]
2008
2009 formats_query = self.params.get('subtitlesformat', 'best')
2010 formats_preference = formats_query.split('/') if formats_query else []
2011 subs = {}
2012 for lang in requested_langs:
2013 formats = available_subs.get(lang)
2014 if formats is None:
2015 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
2016 continue
2017 for ext in formats_preference:
2018 if ext == 'best':
2019 f = formats[-1]
2020 break
2021 matches = list(filter(lambda f: f['ext'] == ext, formats))
2022 if matches:
2023 f = matches[-1]
2024 break
2025 else:
2026 f = formats[-1]
2027 self.report_warning(
2028 'No subtitle format found matching "%s" for language %s, '
2029 'using %s' % (formats_query, lang, f['ext']))
2030 subs[lang] = f
2031 return subs
2032
2033 def __forced_printings(self, info_dict, filename, incomplete):
2034 def print_mandatory(field):
2035 if (self.params.get('force%s' % field, False)
2036 and (not incomplete or info_dict.get(field) is not None)):
2037 self.to_stdout(info_dict[field])
2038
2039 def print_optional(field):
2040 if (self.params.get('force%s' % field, False)
2041 and info_dict.get(field) is not None):
2042 self.to_stdout(info_dict[field])
2043
2044 print_mandatory('title')
2045 print_mandatory('id')
2046 if self.params.get('forceurl', False) and not incomplete:
2047 if info_dict.get('requested_formats') is not None:
2048 for f in info_dict['requested_formats']:
2049 self.to_stdout(f['url'] + f.get('play_path', ''))
2050 else:
2051 # For RTMP URLs, also include the playpath
2052 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
2053 print_optional('thumbnail')
2054 print_optional('description')
2055 if self.params.get('forcefilename', False) and filename is not None:
2056 self.to_stdout(filename)
2057 if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
2058 self.to_stdout(formatSeconds(info_dict['duration']))
2059 print_mandatory('format')
2060 if self.params.get('forcejson', False):
2061 self.post_extract(info_dict)
2062 self.to_stdout(json.dumps(info_dict, default=repr))
2063
2064 def process_info(self, info_dict):
2065 """Process a single resolved IE result."""
2066
2067 assert info_dict.get('_type', 'video') == 'video'
2068
2069 info_dict.setdefault('__postprocessors', [])
2070
2071 max_downloads = self.params.get('max_downloads')
2072 if max_downloads is not None:
2073 if self._num_downloads >= int(max_downloads):
2074 raise MaxDownloadsReached()
2075
2076 # TODO: backward compatibility, to be removed
2077 info_dict['fulltitle'] = info_dict['title']
2078
2079 if 'format' not in info_dict:
2080 info_dict['format'] = info_dict['ext']
2081
2082 if self._match_entry(info_dict, incomplete=False) is not None:
2083 return
2084
2085 self.post_extract(info_dict)
2086 self._num_downloads += 1
2087
2088 info_dict = self.pre_process(info_dict)
2089
2090 # info_dict['_filename'] needs to be set for backward compatibility
2091 info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
2092 temp_filename = self.prepare_filename(info_dict, 'temp')
2093 files_to_move = {}
2094 skip_dl = self.params.get('skip_download', False)
2095
2096 # Forced printings
2097 self.__forced_printings(info_dict, full_filename, incomplete=False)
2098
2099 if self.params.get('simulate', False):
2100 if self.params.get('force_write_download_archive', False):
2101 self.record_download_archive(info_dict)
2102
2103 # Do nothing else if in simulate mode
2104 return
2105
2106 if full_filename is None:
2107 return
2108
2109 if not self._ensure_dir_exists(encodeFilename(full_filename)):
2110 return
2111 if not self._ensure_dir_exists(encodeFilename(temp_filename)):
2112 return
2113
2114 if self.params.get('writedescription', False):
2115 descfn = self.prepare_filename(info_dict, 'description')
2116 if not self._ensure_dir_exists(encodeFilename(descfn)):
2117 return
2118 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(descfn)):
2119 self.to_screen('[info] Video description is already present')
2120 elif info_dict.get('description') is None:
2121 self.report_warning('There\'s no description to write.')
2122 else:
2123 try:
2124 self.to_screen('[info] Writing video description to: ' + descfn)
2125 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
2126 descfile.write(info_dict['description'])
2127 except (OSError, IOError):
2128 self.report_error('Cannot write description file ' + descfn)
2129 return
2130
2131 if self.params.get('writeannotations', False):
2132 annofn = self.prepare_filename(info_dict, 'annotation')
2133 if not self._ensure_dir_exists(encodeFilename(annofn)):
2134 return
2135 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
2136 self.to_screen('[info] Video annotations are already present')
2137 elif not info_dict.get('annotations'):
2138 self.report_warning('There are no annotations to write.')
2139 else:
2140 try:
2141 self.to_screen('[info] Writing video annotations to: ' + annofn)
2142 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
2143 annofile.write(info_dict['annotations'])
2144 except (KeyError, TypeError):
2145 self.report_warning('There are no annotations to write.')
2146 except (OSError, IOError):
2147 self.report_error('Cannot write annotations file: ' + annofn)
2148 return
2149
2150 def dl(name, info, subtitle=False):
2151 fd = get_suitable_downloader(info, self.params)(self, self.params)
2152 for ph in self._progress_hooks:
2153 fd.add_progress_hook(ph)
2154 if self.params.get('verbose'):
2155 self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
2156 new_info = dict(info)
2157 if new_info.get('http_headers') is None:
2158 new_info['http_headers'] = self._calc_headers(new_info)
2159 return fd.download(name, new_info, subtitle)
2160
2161 subtitles_are_requested = any([self.params.get('writesubtitles', False),
2162 self.params.get('writeautomaticsub')])
2163
2164 if subtitles_are_requested and info_dict.get('requested_subtitles'):
2165 # subtitles download errors are already managed as troubles in relevant IE
2166 # that way it will silently go on when used with unsupporting IE
2167 subtitles = info_dict['requested_subtitles']
2168 # ie = self.get_info_extractor(info_dict['extractor_key'])
2169 for sub_lang, sub_info in subtitles.items():
2170 sub_format = sub_info['ext']
2171 sub_fn = self.prepare_filename(info_dict, 'subtitle')
2172 sub_filename = subtitles_filename(
2173 temp_filename if not skip_dl else sub_fn,
2174 sub_lang, sub_format, info_dict.get('ext'))
2175 sub_filename_final = subtitles_filename(sub_fn, sub_lang, sub_format, info_dict.get('ext'))
2176 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(sub_filename)):
2177 self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
2178 sub_info['filepath'] = sub_filename
2179 files_to_move[sub_filename] = sub_filename_final
2180 else:
2181 self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
2182 if sub_info.get('data') is not None:
2183 try:
2184 # Use newline='' to prevent conversion of newline characters
2185 # See https://github.com/ytdl-org/youtube-dl/issues/10268
2186 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
2187 subfile.write(sub_info['data'])
2188 sub_info['filepath'] = sub_filename
2189 files_to_move[sub_filename] = sub_filename_final
2190 except (OSError, IOError):
2191 self.report_error('Cannot write subtitles file ' + sub_filename)
2192 return
2193 else:
2194 try:
2195 dl(sub_filename, sub_info.copy(), subtitle=True)
2196 sub_info['filepath'] = sub_filename
2197 files_to_move[sub_filename] = sub_filename_final
2198 except (ExtractorError, IOError, OSError, ValueError, compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
2199 self.report_warning('Unable to download subtitle for "%s": %s' %
2200 (sub_lang, error_to_compat_str(err)))
2201 continue
2202
2203 if skip_dl:
2204 if self.params.get('convertsubtitles', False):
2205 # subconv = FFmpegSubtitlesConvertorPP(self, format=self.params.get('convertsubtitles'))
2206 filename_real_ext = os.path.splitext(full_filename)[1][1:]
2207 filename_wo_ext = (
2208 os.path.splitext(full_filename)[0]
2209 if filename_real_ext == info_dict['ext']
2210 else full_filename)
2211 afilename = '%s.%s' % (filename_wo_ext, self.params.get('convertsubtitles'))
2212 # if subconv.available:
2213 # info_dict['__postprocessors'].append(subconv)
2214 if os.path.exists(encodeFilename(afilename)):
2215 self.to_screen(
2216 '[download] %s has already been downloaded and '
2217 'converted' % afilename)
2218 else:
2219 try:
2220 self.post_process(full_filename, info_dict, files_to_move)
2221 except PostProcessingError as err:
2222 self.report_error('Postprocessing: %s' % str(err))
2223 return
2224
2225 if self.params.get('writeinfojson', False):
2226 infofn = self.prepare_filename(info_dict, 'infojson')
2227 if not self._ensure_dir_exists(encodeFilename(infofn)):
2228 return
2229 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(infofn)):
2230 self.to_screen('[info] Video metadata is already present')
2231 else:
2232 self.to_screen('[info] Writing video metadata as JSON to: ' + infofn)
2233 try:
2234 write_json_file(self.filter_requested_info(info_dict, self.params.get('clean_infojson', True)), infofn)
2235 except (OSError, IOError):
2236 self.report_error('Cannot write video metadata to JSON file ' + infofn)
2237 return
2238 info_dict['__infojson_filename'] = infofn
2239
2240 thumbfn = self.prepare_filename(info_dict, 'thumbnail')
2241 thumb_fn_temp = temp_filename if not skip_dl else thumbfn
2242 for thumb_ext in self._write_thumbnails(info_dict, thumb_fn_temp):
2243 thumb_filename_temp = replace_extension(thumb_fn_temp, thumb_ext, info_dict.get('ext'))
2244 thumb_filename = replace_extension(thumbfn, thumb_ext, info_dict.get('ext'))
2245 files_to_move[thumb_filename_temp] = thumb_filename
2246
2247 # Write internet shortcut files
2248 url_link = webloc_link = desktop_link = False
2249 if self.params.get('writelink', False):
2250 if sys.platform == "darwin": # macOS.
2251 webloc_link = True
2252 elif sys.platform.startswith("linux"):
2253 desktop_link = True
2254 else: # if sys.platform in ['win32', 'cygwin']:
2255 url_link = True
2256 if self.params.get('writeurllink', False):
2257 url_link = True
2258 if self.params.get('writewebloclink', False):
2259 webloc_link = True
2260 if self.params.get('writedesktoplink', False):
2261 desktop_link = True
2262
2263 if url_link or webloc_link or desktop_link:
2264 if 'webpage_url' not in info_dict:
2265 self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
2266 return
2267 ascii_url = iri_to_uri(info_dict['webpage_url'])
2268
2269 def _write_link_file(extension, template, newline, embed_filename):
2270 linkfn = replace_extension(full_filename, extension, info_dict.get('ext'))
2271 if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
2272 self.to_screen('[info] Internet shortcut is already present')
2273 else:
2274 try:
2275 self.to_screen('[info] Writing internet shortcut to: ' + linkfn)
2276 with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', newline=newline) as linkfile:
2277 template_vars = {'url': ascii_url}
2278 if embed_filename:
2279 template_vars['filename'] = linkfn[:-(len(extension) + 1)]
2280 linkfile.write(template % template_vars)
2281 except (OSError, IOError):
2282 self.report_error('Cannot write internet shortcut ' + linkfn)
2283 return False
2284 return True
2285
2286 if url_link:
2287 if not _write_link_file('url', DOT_URL_LINK_TEMPLATE, '\r\n', embed_filename=False):
2288 return
2289 if webloc_link:
2290 if not _write_link_file('webloc', DOT_WEBLOC_LINK_TEMPLATE, '\n', embed_filename=False):
2291 return
2292 if desktop_link:
2293 if not _write_link_file('desktop', DOT_DESKTOP_LINK_TEMPLATE, '\n', embed_filename=True):
2294 return
2295
2296 # Download
2297 must_record_download_archive = False
2298 if not skip_dl:
2299 try:
2300
2301 def existing_file(*filepaths):
2302 ext = info_dict.get('ext')
2303 final_ext = self.params.get('final_ext', ext)
2304 existing_files = []
2305 for file in orderedSet(filepaths):
2306 if final_ext != ext:
2307 converted = replace_extension(file, final_ext, ext)
2308 if os.path.exists(encodeFilename(converted)):
2309 existing_files.append(converted)
2310 if os.path.exists(encodeFilename(file)):
2311 existing_files.append(file)
2312
2313 if not existing_files or self.params.get('overwrites', False):
2314 for file in orderedSet(existing_files):
2315 self.report_file_delete(file)
2316 os.remove(encodeFilename(file))
2317 return None
2318
2319 self.report_file_already_downloaded(existing_files[0])
2320 info_dict['ext'] = os.path.splitext(existing_files[0])[1][1:]
2321 return existing_files[0]
2322
2323 success = True
2324 if info_dict.get('requested_formats') is not None:
2325 downloaded = []
2326 merger = FFmpegMergerPP(self)
2327 if self.params.get('allow_unplayable_formats'):
2328 self.report_warning(
2329 'You have requested merging of multiple formats '
2330 'while also allowing unplayable formats to be downloaded. '
2331 'The formats won\'t be merged to prevent data corruption.')
2332 elif not merger.available:
2333 self.report_warning(
2334 'You have requested merging of multiple formats but ffmpeg is not installed. '
2335 'The formats won\'t be merged.')
2336
2337 def compatible_formats(formats):
2338 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
2339 video_formats = [format for format in formats if format.get('vcodec') != 'none']
2340 audio_formats = [format for format in formats if format.get('acodec') != 'none']
2341 if len(video_formats) > 2 or len(audio_formats) > 2:
2342 return False
2343
2344 # Check extension
2345 exts = set(format.get('ext') for format in formats)
2346 COMPATIBLE_EXTS = (
2347 set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
2348 set(('webm',)),
2349 )
2350 for ext_sets in COMPATIBLE_EXTS:
2351 if ext_sets.issuperset(exts):
2352 return True
2353 # TODO: Check acodec/vcodec
2354 return False
2355
2356 requested_formats = info_dict['requested_formats']
2357 old_ext = info_dict['ext']
2358 if self.params.get('merge_output_format') is None:
2359 if not compatible_formats(requested_formats):
2360 info_dict['ext'] = 'mkv'
2361 self.report_warning(
2362 'Requested formats are incompatible for merge and will be merged into mkv.')
2363 if (info_dict['ext'] == 'webm'
2364 and self.params.get('writethumbnail', False)
2365 and info_dict.get('thumbnails')):
2366 info_dict['ext'] = 'mkv'
2367 self.report_warning(
2368 'webm doesn\'t support embedding a thumbnail, mkv will be used.')
2369
2370 def correct_ext(filename):
2371 filename_real_ext = os.path.splitext(filename)[1][1:]
2372 filename_wo_ext = (
2373 os.path.splitext(filename)[0]
2374 if filename_real_ext == old_ext
2375 else filename)
2376 return '%s.%s' % (filename_wo_ext, info_dict['ext'])
2377
2378 # Ensure filename always has a correct extension for successful merge
2379 full_filename = correct_ext(full_filename)
2380 temp_filename = correct_ext(temp_filename)
2381 dl_filename = existing_file(full_filename, temp_filename)
2382 info_dict['__real_download'] = False
2383 if dl_filename is None:
2384 for f in requested_formats:
2385 new_info = dict(info_dict)
2386 new_info.update(f)
2387 fname = prepend_extension(
2388 self.prepare_filename(new_info, 'temp'),
2389 'f%s' % f['format_id'], new_info['ext'])
2390 if not self._ensure_dir_exists(fname):
2391 return
2392 downloaded.append(fname)
2393 partial_success, real_download = dl(fname, new_info)
2394 info_dict['__real_download'] = info_dict['__real_download'] or real_download
2395 success = success and partial_success
2396 if merger.available and not self.params.get('allow_unplayable_formats'):
2397 info_dict['__postprocessors'].append(merger)
2398 info_dict['__files_to_merge'] = downloaded
2399 # Even if there were no downloads, it is being merged only now
2400 info_dict['__real_download'] = True
2401 else:
2402 for file in downloaded:
2403 files_to_move[file] = None
2404 else:
2405 # Just a single file
2406 dl_filename = existing_file(full_filename, temp_filename)
2407 if dl_filename is None:
2408 success, real_download = dl(temp_filename, info_dict)
2409 info_dict['__real_download'] = real_download
2410
2411 dl_filename = dl_filename or temp_filename
2412 info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
2413
2414 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
2415 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
2416 return
2417 except (OSError, IOError) as err:
2418 raise UnavailableVideoError(err)
2419 except (ContentTooShortError, ) as err:
2420 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
2421 return
2422
2423 if success and full_filename != '-':
2424 # Fixup content
2425 fixup_policy = self.params.get('fixup')
2426 if fixup_policy is None:
2427 fixup_policy = 'detect_or_warn'
2428
2429 INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg to fix this automatically.'
2430
2431 stretched_ratio = info_dict.get('stretched_ratio')
2432 if stretched_ratio is not None and stretched_ratio != 1:
2433 if fixup_policy == 'warn':
2434 self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
2435 info_dict['id'], stretched_ratio))
2436 elif fixup_policy == 'detect_or_warn':
2437 stretched_pp = FFmpegFixupStretchedPP(self)
2438 if stretched_pp.available:
2439 info_dict['__postprocessors'].append(stretched_pp)
2440 else:
2441 self.report_warning(
2442 '%s: Non-uniform pixel ratio (%s). %s'
2443 % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
2444 else:
2445 assert fixup_policy in ('ignore', 'never')
2446
2447 if (info_dict.get('requested_formats') is None
2448 and info_dict.get('container') == 'm4a_dash'
2449 and info_dict.get('ext') == 'm4a'):
2450 if fixup_policy == 'warn':
2451 self.report_warning(
2452 '%s: writing DASH m4a. '
2453 'Only some players support this container.'
2454 % info_dict['id'])
2455 elif fixup_policy == 'detect_or_warn':
2456 fixup_pp = FFmpegFixupM4aPP(self)
2457 if fixup_pp.available:
2458 info_dict['__postprocessors'].append(fixup_pp)
2459 else:
2460 self.report_warning(
2461 '%s: writing DASH m4a. '
2462 'Only some players support this container. %s'
2463 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
2464 else:
2465 assert fixup_policy in ('ignore', 'never')
2466
2467 if ('protocol' in info_dict
2468 and get_suitable_downloader(info_dict, self.params).__name__ == 'HlsFD'):
2469 if fixup_policy == 'warn':
2470 self.report_warning('%s: malformed AAC bitstream detected.' % (
2471 info_dict['id']))
2472 elif fixup_policy == 'detect_or_warn':
2473 fixup_pp = FFmpegFixupM3u8PP(self)
2474 if fixup_pp.available:
2475 info_dict['__postprocessors'].append(fixup_pp)
2476 else:
2477 self.report_warning(
2478 '%s: malformed AAC bitstream detected. %s'
2479 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
2480 else:
2481 assert fixup_policy in ('ignore', 'never')
2482
2483 try:
2484 info_dict = self.post_process(dl_filename, info_dict, files_to_move)
2485 except PostProcessingError as err:
2486 self.report_error('Postprocessing: %s' % str(err))
2487 return
2488 try:
2489 for ph in self._post_hooks:
2490 ph(info_dict['filepath'])
2491 except Exception as err:
2492 self.report_error('post hooks: %s' % str(err))
2493 return
2494 must_record_download_archive = True
2495
2496 if must_record_download_archive or self.params.get('force_write_download_archive', False):
2497 self.record_download_archive(info_dict)
2498 max_downloads = self.params.get('max_downloads')
2499 if max_downloads is not None and self._num_downloads >= int(max_downloads):
2500 raise MaxDownloadsReached()
2501
2502 def download(self, url_list):
2503 """Download a given list of URLs."""
2504 outtmpl = self.outtmpl_dict['default']
2505 if (len(url_list) > 1
2506 and outtmpl != '-'
2507 and '%' not in outtmpl
2508 and self.params.get('max_downloads') != 1):
2509 raise SameFileError(outtmpl)
2510
2511 for url in url_list:
2512 try:
2513 # It also downloads the videos
2514 res = self.extract_info(
2515 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
2516 except UnavailableVideoError:
2517 self.report_error('unable to download video')
2518 except MaxDownloadsReached:
2519 self.to_screen('[info] Maximum number of downloaded files reached')
2520 raise
2521 except ExistingVideoReached:
2522 self.to_screen('[info] Encountered a file that is already in the archive, stopping due to --break-on-existing')
2523 raise
2524 except RejectedVideoReached:
2525 self.to_screen('[info] Encountered a file that did not match filter, stopping due to --break-on-reject')
2526 raise
2527 else:
2528 if self.params.get('dump_single_json', False):
2529 self.post_extract(res)
2530 self.to_stdout(json.dumps(res, default=repr))
2531
2532 return self._download_retcode
2533
2534 def download_with_info_file(self, info_filename):
2535 with contextlib.closing(fileinput.FileInput(
2536 [info_filename], mode='r',
2537 openhook=fileinput.hook_encoded('utf-8'))) as f:
2538 # FileInput doesn't have a read method, we can't call json.load
2539 info = self.filter_requested_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
2540 try:
2541 self.process_ie_result(info, download=True)
2542 except (DownloadError, EntryNotInPlaylist):
2543 webpage_url = info.get('webpage_url')
2544 if webpage_url is not None:
2545 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
2546 return self.download([webpage_url])
2547 else:
2548 raise
2549 return self._download_retcode
2550
2551 @staticmethod
2552 def filter_requested_info(info_dict, actually_filter=True):
2553 if not actually_filter:
2554 info_dict['epoch'] = int(time.time())
2555 return info_dict
2556 exceptions = {
2557 'remove': ['requested_formats', 'requested_subtitles', 'requested_entries', 'filepath', 'entries'],
2558 'keep': ['_type'],
2559 }
2560 keep_key = lambda k: k in exceptions['keep'] or not (k.startswith('_') or k in exceptions['remove'])
2561 filter_fn = lambda obj: (
2562 list(map(filter_fn, obj)) if isinstance(obj, (list, tuple))
2563 else obj if not isinstance(obj, dict)
2564 else dict((k, filter_fn(v)) for k, v in obj.items() if keep_key(k)))
2565 return filter_fn(info_dict)
2566
2567 def run_pp(self, pp, infodict):
2568 files_to_delete = []
2569 if '__files_to_move' not in infodict:
2570 infodict['__files_to_move'] = {}
2571 files_to_delete, infodict = pp.run(infodict)
2572 if not files_to_delete:
2573 return infodict
2574
2575 if self.params.get('keepvideo', False):
2576 for f in files_to_delete:
2577 infodict['__files_to_move'].setdefault(f, '')
2578 else:
2579 for old_filename in set(files_to_delete):
2580 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2581 try:
2582 os.remove(encodeFilename(old_filename))
2583 except (IOError, OSError):
2584 self.report_warning('Unable to remove downloaded original file')
2585 if old_filename in infodict['__files_to_move']:
2586 del infodict['__files_to_move'][old_filename]
2587 return infodict
2588
2589 @staticmethod
2590 def post_extract(info_dict):
2591 def actual_post_extract(info_dict):
2592 if info_dict.get('_type') in ('playlist', 'multi_video'):
2593 for video_dict in info_dict.get('entries', {}):
2594 actual_post_extract(video_dict or {})
2595 return
2596
2597 if '__post_extractor' not in info_dict:
2598 return
2599 post_extractor = info_dict['__post_extractor']
2600 if post_extractor:
2601 info_dict.update(post_extractor().items())
2602 del info_dict['__post_extractor']
2603 return
2604
2605 actual_post_extract(info_dict or {})
2606
2607 def pre_process(self, ie_info):
2608 info = dict(ie_info)
2609 for pp in self._pps['beforedl']:
2610 info = self.run_pp(pp, info)
2611 return info
2612
2613 def post_process(self, filename, ie_info, files_to_move=None):
2614 """Run all the postprocessors on the given file."""
2615 info = dict(ie_info)
2616 info['filepath'] = filename
2617 info['__files_to_move'] = files_to_move or {}
2618
2619 for pp in ie_info.get('__postprocessors', []) + self._pps['normal']:
2620 info = self.run_pp(pp, info)
2621 info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
2622 del info['__files_to_move']
2623 for pp in self._pps['aftermove']:
2624 info = self.run_pp(pp, info)
2625 return info
2626
2627 def _make_archive_id(self, info_dict):
2628 video_id = info_dict.get('id')
2629 if not video_id:
2630 return
2631 # Future-proof against any change in case
2632 # and backwards compatibility with prior versions
2633 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
2634 if extractor is None:
2635 url = str_or_none(info_dict.get('url'))
2636 if not url:
2637 return
2638 # Try to find matching extractor for the URL and take its ie_key
2639 for ie in self._ies:
2640 if ie.suitable(url):
2641 extractor = ie.ie_key()
2642 break
2643 else:
2644 return
2645 return '%s %s' % (extractor.lower(), video_id)
2646
2647 def in_download_archive(self, info_dict):
2648 fn = self.params.get('download_archive')
2649 if fn is None:
2650 return False
2651
2652 vid_id = self._make_archive_id(info_dict)
2653 if not vid_id:
2654 return False # Incomplete video information
2655
2656 return vid_id in self.archive
2657
2658 def record_download_archive(self, info_dict):
2659 fn = self.params.get('download_archive')
2660 if fn is None:
2661 return
2662 vid_id = self._make_archive_id(info_dict)
2663 assert vid_id
2664 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
2665 archive_file.write(vid_id + '\n')
2666 self.archive.add(vid_id)
2667
2668 @staticmethod
2669 def format_resolution(format, default='unknown'):
2670 if format.get('vcodec') == 'none':
2671 return 'audio only'
2672 if format.get('resolution') is not None:
2673 return format['resolution']
2674 if format.get('width') and format.get('height'):
2675 res = '%dx%d' % (format['width'], format['height'])
2676 elif format.get('height'):
2677 res = '%sp' % format['height']
2678 elif format.get('width'):
2679 res = '%dx?' % format['width']
2680 else:
2681 res = default
2682 return res
2683
2684 def _format_note(self, fdict):
2685 res = ''
2686 if fdict.get('ext') in ['f4f', 'f4m']:
2687 res += '(unsupported) '
2688 if fdict.get('language'):
2689 if res:
2690 res += ' '
2691 res += '[%s] ' % fdict['language']
2692 if fdict.get('format_note') is not None:
2693 res += fdict['format_note'] + ' '
2694 if fdict.get('tbr') is not None:
2695 res += '%4dk ' % fdict['tbr']
2696 if fdict.get('container') is not None:
2697 if res:
2698 res += ', '
2699 res += '%s container' % fdict['container']
2700 if (fdict.get('vcodec') is not None
2701 and fdict.get('vcodec') != 'none'):
2702 if res:
2703 res += ', '
2704 res += fdict['vcodec']
2705 if fdict.get('vbr') is not None:
2706 res += '@'
2707 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
2708 res += 'video@'
2709 if fdict.get('vbr') is not None:
2710 res += '%4dk' % fdict['vbr']
2711 if fdict.get('fps') is not None:
2712 if res:
2713 res += ', '
2714 res += '%sfps' % fdict['fps']
2715 if fdict.get('acodec') is not None:
2716 if res:
2717 res += ', '
2718 if fdict['acodec'] == 'none':
2719 res += 'video only'
2720 else:
2721 res += '%-5s' % fdict['acodec']
2722 elif fdict.get('abr') is not None:
2723 if res:
2724 res += ', '
2725 res += 'audio'
2726 if fdict.get('abr') is not None:
2727 res += '@%3dk' % fdict['abr']
2728 if fdict.get('asr') is not None:
2729 res += ' (%5dHz)' % fdict['asr']
2730 if fdict.get('filesize') is not None:
2731 if res:
2732 res += ', '
2733 res += format_bytes(fdict['filesize'])
2734 elif fdict.get('filesize_approx') is not None:
2735 if res:
2736 res += ', '
2737 res += '~' + format_bytes(fdict['filesize_approx'])
2738 return res
2739
2740 def _format_note_table(self, f):
2741 def join_fields(*vargs):
2742 return ', '.join((val for val in vargs if val != ''))
2743
2744 return join_fields(
2745 'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
2746 format_field(f, 'language', '[%s]'),
2747 format_field(f, 'format_note'),
2748 format_field(f, 'container', ignore=(None, f.get('ext'))),
2749 format_field(f, 'asr', '%5dHz'))
2750
2751 def list_formats(self, info_dict):
2752 formats = info_dict.get('formats', [info_dict])
2753 new_format = self.params.get('listformats_table', False)
2754 if new_format:
2755 table = [
2756 [
2757 format_field(f, 'format_id'),
2758 format_field(f, 'ext'),
2759 self.format_resolution(f),
2760 format_field(f, 'fps', '%d'),
2761 '|',
2762 format_field(f, 'filesize', ' %s', func=format_bytes) + format_field(f, 'filesize_approx', '~%s', func=format_bytes),
2763 format_field(f, 'tbr', '%4dk'),
2764 f.get('protocol').replace('http_dash_segments', 'dash').replace("native", "n").replace('niconico_', ''),
2765 '|',
2766 format_field(f, 'vcodec', default='unknown').replace('none', ''),
2767 format_field(f, 'vbr', '%4dk'),
2768 format_field(f, 'acodec', default='unknown').replace('none', ''),
2769 format_field(f, 'abr', '%3dk'),
2770 format_field(f, 'asr', '%5dHz'),
2771 self._format_note_table(f)]
2772 for f in formats
2773 if f.get('preference') is None or f['preference'] >= -1000]
2774 header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', ' TBR', 'PROTO',
2775 '|', 'VCODEC', ' VBR', 'ACODEC', ' ABR', ' ASR', 'NOTE']
2776 else:
2777 table = [
2778 [
2779 format_field(f, 'format_id'),
2780 format_field(f, 'ext'),
2781 self.format_resolution(f),
2782 self._format_note(f)]
2783 for f in formats
2784 if f.get('preference') is None or f['preference'] >= -1000]
2785 header_line = ['format code', 'extension', 'resolution', 'note']
2786
2787 self.to_screen(
2788 '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(
2789 header_line,
2790 table,
2791 delim=new_format,
2792 extraGap=(0 if new_format else 1),
2793 hideEmpty=new_format)))
2794
2795 def list_thumbnails(self, info_dict):
2796 thumbnails = info_dict.get('thumbnails')
2797 if not thumbnails:
2798 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
2799 return
2800
2801 self.to_screen(
2802 '[info] Thumbnails for %s:' % info_dict['id'])
2803 self.to_screen(render_table(
2804 ['ID', 'width', 'height', 'URL'],
2805 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
2806
2807 def list_subtitles(self, video_id, subtitles, name='subtitles'):
2808 if not subtitles:
2809 self.to_screen('%s has no %s' % (video_id, name))
2810 return
2811 self.to_screen(
2812 'Available %s for %s:' % (name, video_id))
2813 self.to_screen(render_table(
2814 ['Language', 'formats'],
2815 [[lang, ', '.join(f['ext'] for f in reversed(formats))]
2816 for lang, formats in subtitles.items()]))
2817
2818 def urlopen(self, req):
2819 """ Start an HTTP download """
2820 if isinstance(req, compat_basestring):
2821 req = sanitized_Request(req)
2822 return self._opener.open(req, timeout=self._socket_timeout)
2823
2824 def print_debug_header(self):
2825 if not self.params.get('verbose'):
2826 return
2827
2828 if type('') is not compat_str:
2829 # Python 2.6 on SLES11 SP1 (https://github.com/ytdl-org/youtube-dl/issues/3326)
2830 self.report_warning(
2831 'Your Python is broken! Update to a newer and supported version')
2832
2833 stdout_encoding = getattr(
2834 sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
2835 encoding_str = (
2836 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
2837 locale.getpreferredencoding(),
2838 sys.getfilesystemencoding(),
2839 stdout_encoding,
2840 self.get_encoding()))
2841 write_string(encoding_str, encoding=None)
2842
2843 source = (
2844 '(exe)' if hasattr(sys, 'frozen')
2845 else '(zip)' if isinstance(globals().get('__loader__'), zipimporter)
2846 else '(source)' if os.path.basename(sys.argv[0]) == '__main__.py'
2847 else '')
2848 self._write_string('[debug] yt-dlp version %s %s\n' % (__version__, source))
2849 if _LAZY_LOADER:
2850 self._write_string('[debug] Lazy loading extractors enabled\n')
2851 if _PLUGIN_CLASSES:
2852 self._write_string(
2853 '[debug] Plugin Extractors: %s\n' % [ie.ie_key() for ie in _PLUGIN_CLASSES])
2854 try:
2855 sp = subprocess.Popen(
2856 ['git', 'rev-parse', '--short', 'HEAD'],
2857 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2858 cwd=os.path.dirname(os.path.abspath(__file__)))
2859 out, err = process_communicate_or_kill(sp)
2860 out = out.decode().strip()
2861 if re.match('[0-9a-f]+', out):
2862 self._write_string('[debug] Git HEAD: %s\n' % out)
2863 except Exception:
2864 try:
2865 sys.exc_clear()
2866 except Exception:
2867 pass
2868
2869 def python_implementation():
2870 impl_name = platform.python_implementation()
2871 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2872 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
2873 return impl_name
2874
2875 self._write_string('[debug] Python version %s (%s %s) - %s\n' % (
2876 platform.python_version(),
2877 python_implementation(),
2878 platform.architecture()[0],
2879 platform_name()))
2880
2881 exe_versions = FFmpegPostProcessor.get_versions(self)
2882 exe_versions['rtmpdump'] = rtmpdump_version()
2883 exe_versions['phantomjs'] = PhantomJSwrapper._version()
2884 exe_str = ', '.join(
2885 '%s %s' % (exe, v)
2886 for exe, v in sorted(exe_versions.items())
2887 if v
2888 )
2889 if not exe_str:
2890 exe_str = 'none'
2891 self._write_string('[debug] exe versions: %s\n' % exe_str)
2892
2893 proxy_map = {}
2894 for handler in self._opener.handlers:
2895 if hasattr(handler, 'proxies'):
2896 proxy_map.update(handler.proxies)
2897 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
2898
2899 if self.params.get('call_home', False):
2900 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
2901 self._write_string('[debug] Public IP address: %s\n' % ipaddr)
2902 return
2903 latest_version = self.urlopen(
2904 'https://yt-dl.org/latest/version').read().decode('utf-8')
2905 if version_tuple(latest_version) > version_tuple(__version__):
2906 self.report_warning(
2907 'You are using an outdated version (newest version: %s)! '
2908 'See https://yt-dl.org/update if you need help updating.' %
2909 latest_version)
2910
2911 def _setup_opener(self):
2912 timeout_val = self.params.get('socket_timeout')
2913 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
2914
2915 opts_cookiefile = self.params.get('cookiefile')
2916 opts_proxy = self.params.get('proxy')
2917
2918 if opts_cookiefile is None:
2919 self.cookiejar = compat_cookiejar.CookieJar()
2920 else:
2921 opts_cookiefile = expand_path(opts_cookiefile)
2922 self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
2923 if os.access(opts_cookiefile, os.R_OK):
2924 self.cookiejar.load(ignore_discard=True, ignore_expires=True)
2925
2926 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
2927 if opts_proxy is not None:
2928 if opts_proxy == '':
2929 proxies = {}
2930 else:
2931 proxies = {'http': opts_proxy, 'https': opts_proxy}
2932 else:
2933 proxies = compat_urllib_request.getproxies()
2934 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
2935 if 'http' in proxies and 'https' not in proxies:
2936 proxies['https'] = proxies['http']
2937 proxy_handler = PerRequestProxyHandler(proxies)
2938
2939 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
2940 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
2941 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
2942 redirect_handler = YoutubeDLRedirectHandler()
2943 data_handler = compat_urllib_request_DataHandler()
2944
2945 # When passing our own FileHandler instance, build_opener won't add the
2946 # default FileHandler and allows us to disable the file protocol, which
2947 # can be used for malicious purposes (see
2948 # https://github.com/ytdl-org/youtube-dl/issues/8227)
2949 file_handler = compat_urllib_request.FileHandler()
2950
2951 def file_open(*args, **kwargs):
2952 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
2953 file_handler.file_open = file_open
2954
2955 opener = compat_urllib_request.build_opener(
2956 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2957
2958 # Delete the default user-agent header, which would otherwise apply in
2959 # cases where our custom HTTP handler doesn't come into play
2960 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
2961 opener.addheaders = []
2962 self._opener = opener
2963
2964 def encode(self, s):
2965 if isinstance(s, bytes):
2966 return s # Already encoded
2967
2968 try:
2969 return s.encode(self.get_encoding())
2970 except UnicodeEncodeError as err:
2971 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
2972 raise
2973
2974 def get_encoding(self):
2975 encoding = self.params.get('encoding')
2976 if encoding is None:
2977 encoding = preferredencoding()
2978 return encoding
2979
2980 def _write_thumbnails(self, info_dict, filename): # return the extensions
2981 write_all = self.params.get('write_all_thumbnails', False)
2982 thumbnails = []
2983 if write_all or self.params.get('writethumbnail', False):
2984 thumbnails = info_dict.get('thumbnails') or []
2985 multiple = write_all and len(thumbnails) > 1
2986
2987 ret = []
2988 for t in thumbnails[::1 if write_all else -1]:
2989 thumb_ext = determine_ext(t['url'], 'jpg')
2990 suffix = '%s.' % t['id'] if multiple else ''
2991 thumb_display_id = '%s ' % t['id'] if multiple else ''
2992 t['filepath'] = thumb_filename = replace_extension(filename, suffix + thumb_ext, info_dict.get('ext'))
2993
2994 if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(thumb_filename)):
2995 ret.append(suffix + thumb_ext)
2996 self.to_screen('[%s] %s: Thumbnail %sis already present' %
2997 (info_dict['extractor'], info_dict['id'], thumb_display_id))
2998 else:
2999 self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
3000 (info_dict['extractor'], info_dict['id'], thumb_display_id))
3001 try:
3002 uf = self.urlopen(t['url'])
3003 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
3004 shutil.copyfileobj(uf, thumbf)
3005 ret.append(suffix + thumb_ext)
3006 self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
3007 (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
3008 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
3009 self.report_warning('Unable to download thumbnail "%s": %s' %
3010 (t['url'], error_to_compat_str(err)))
3011 if ret and not write_all:
3012 break
3013 return ret