]> jfr.im git - yt-dlp.git/blob - youtube_dl/YoutubeDL.py
Start moving to ytdl-org
[yt-dlp.git] / youtube_dl / YoutubeDL.py
1 #!/usr/bin/env python
2 # coding: utf-8
3
4 from __future__ import absolute_import, unicode_literals
5
6 import collections
7 import contextlib
8 import copy
9 import datetime
10 import errno
11 import fileinput
12 import io
13 import itertools
14 import json
15 import locale
16 import operator
17 import os
18 import platform
19 import re
20 import shutil
21 import subprocess
22 import socket
23 import sys
24 import time
25 import tokenize
26 import traceback
27 import random
28
29 from string import ascii_letters
30
31 from .compat import (
32 compat_basestring,
33 compat_cookiejar,
34 compat_get_terminal_size,
35 compat_http_client,
36 compat_kwargs,
37 compat_numeric_types,
38 compat_os_name,
39 compat_str,
40 compat_tokenize_tokenize,
41 compat_urllib_error,
42 compat_urllib_request,
43 compat_urllib_request_DataHandler,
44 )
45 from .utils import (
46 age_restricted,
47 args_to_str,
48 ContentTooShortError,
49 date_from_str,
50 DateRange,
51 DEFAULT_OUTTMPL,
52 determine_ext,
53 determine_protocol,
54 DownloadError,
55 encode_compat_str,
56 encodeFilename,
57 error_to_compat_str,
58 expand_path,
59 ExtractorError,
60 format_bytes,
61 formatSeconds,
62 GeoRestrictedError,
63 int_or_none,
64 ISO3166Utils,
65 locked_file,
66 make_HTTPS_handler,
67 MaxDownloadsReached,
68 orderedSet,
69 PagedList,
70 parse_filesize,
71 PerRequestProxyHandler,
72 platform_name,
73 PostProcessingError,
74 preferredencoding,
75 prepend_extension,
76 register_socks_protocols,
77 render_table,
78 replace_extension,
79 SameFileError,
80 sanitize_filename,
81 sanitize_path,
82 sanitize_url,
83 sanitized_Request,
84 std_headers,
85 str_or_none,
86 subtitles_filename,
87 UnavailableVideoError,
88 url_basename,
89 version_tuple,
90 write_json_file,
91 write_string,
92 YoutubeDLCookieJar,
93 YoutubeDLCookieProcessor,
94 YoutubeDLHandler,
95 )
96 from .cache import Cache
97 from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
98 from .extractor.openload import PhantomJSwrapper
99 from .downloader import get_suitable_downloader
100 from .downloader.rtmp import rtmpdump_version
101 from .postprocessor import (
102 FFmpegFixupM3u8PP,
103 FFmpegFixupM4aPP,
104 FFmpegFixupStretchedPP,
105 FFmpegMergerPP,
106 FFmpegPostProcessor,
107 get_postprocessor,
108 )
109 from .version import __version__
110
111 if compat_os_name == 'nt':
112 import ctypes
113
114
115 class YoutubeDL(object):
116 """YoutubeDL class.
117
118 YoutubeDL objects are the ones responsible of downloading the
119 actual video file and writing it to disk if the user has requested
120 it, among some other tasks. In most cases there should be one per
121 program. As, given a video URL, the downloader doesn't know how to
122 extract all the needed information, task that InfoExtractors do, it
123 has to pass the URL to one of them.
124
125 For this, YoutubeDL objects have a method that allows
126 InfoExtractors to be registered in a given order. When it is passed
127 a URL, the YoutubeDL object handles it to the first InfoExtractor it
128 finds that reports being able to handle it. The InfoExtractor extracts
129 all the information about the video or videos the URL refers to, and
130 YoutubeDL process the extracted information, possibly using a File
131 Downloader to download the video.
132
133 YoutubeDL objects accept a lot of parameters. In order not to saturate
134 the object constructor with arguments, it receives a dictionary of
135 options instead. These options are available through the params
136 attribute for the InfoExtractors to use. The YoutubeDL also
137 registers itself as the downloader in charge for the InfoExtractors
138 that are added to it, so this is a "mutual registration".
139
140 Available options:
141
142 username: Username for authentication purposes.
143 password: Password for authentication purposes.
144 videopassword: Password for accessing a video.
145 ap_mso: Adobe Pass multiple-system operator identifier.
146 ap_username: Multiple-system operator account username.
147 ap_password: Multiple-system operator account password.
148 usenetrc: Use netrc for authentication instead.
149 verbose: Print additional info to stdout.
150 quiet: Do not print messages to stdout.
151 no_warnings: Do not print out anything for warnings.
152 forceurl: Force printing final URL.
153 forcetitle: Force printing title.
154 forceid: Force printing ID.
155 forcethumbnail: Force printing thumbnail URL.
156 forcedescription: Force printing description.
157 forcefilename: Force printing final filename.
158 forceduration: Force printing duration.
159 forcejson: Force printing info_dict as JSON.
160 dump_single_json: Force printing the info_dict of the whole playlist
161 (or video) as a single JSON line.
162 simulate: Do not download the video files.
163 format: Video format code. See options.py for more information.
164 outtmpl: Template for output names.
165 restrictfilenames: Do not allow "&" and spaces in file names
166 ignoreerrors: Do not stop on download errors.
167 force_generic_extractor: Force downloader to use the generic extractor
168 nooverwrites: Prevent overwriting files.
169 playliststart: Playlist item to start at.
170 playlistend: Playlist item to end at.
171 playlist_items: Specific indices of playlist to download.
172 playlistreverse: Download playlist items in reverse order.
173 playlistrandom: Download playlist items in random order.
174 matchtitle: Download only matching titles.
175 rejecttitle: Reject downloads for matching titles.
176 logger: Log messages to a logging.Logger instance.
177 logtostderr: Log messages to stderr instead of stdout.
178 writedescription: Write the video description to a .description file
179 writeinfojson: Write the video description to a .info.json file
180 writeannotations: Write the video annotations to a .annotations.xml file
181 writethumbnail: Write the thumbnail image to a file
182 write_all_thumbnails: Write all thumbnail formats to files
183 writesubtitles: Write the video subtitles to a file
184 writeautomaticsub: Write the automatically generated subtitles to a file
185 allsubtitles: Downloads all the subtitles of the video
186 (requires writesubtitles or writeautomaticsub)
187 listsubtitles: Lists all available subtitles for the video
188 subtitlesformat: The format code for subtitles
189 subtitleslangs: List of languages of the subtitles to download
190 keepvideo: Keep the video file after post-processing
191 daterange: A DateRange object, download only if the upload_date is in the range.
192 skip_download: Skip the actual download of the video file
193 cachedir: Location of the cache files in the filesystem.
194 False to disable filesystem cache.
195 noplaylist: Download single video instead of a playlist if in doubt.
196 age_limit: An integer representing the user's age in years.
197 Unsuitable videos for the given age are skipped.
198 min_views: An integer representing the minimum view count the video
199 must have in order to not be skipped.
200 Videos without view count information are always
201 downloaded. None for no limit.
202 max_views: An integer representing the maximum view count.
203 Videos that are more popular than that are not
204 downloaded.
205 Videos without view count information are always
206 downloaded. None for no limit.
207 download_archive: File name of a file where all downloads are recorded.
208 Videos already present in the file are not downloaded
209 again.
210 cookiefile: File name where cookies should be read from and dumped to.
211 nocheckcertificate:Do not verify SSL certificates
212 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
213 At the moment, this is only supported by YouTube.
214 proxy: URL of the proxy server to use
215 geo_verification_proxy: URL of the proxy to use for IP address verification
216 on geo-restricted sites.
217 socket_timeout: Time to wait for unresponsive hosts, in seconds
218 bidi_workaround: Work around buggy terminals without bidirectional text
219 support, using fridibi
220 debug_printtraffic:Print out sent and received HTTP traffic
221 include_ads: Download ads as well
222 default_search: Prepend this string if an input url is not valid.
223 'auto' for elaborate guessing
224 encoding: Use this encoding instead of the system-specified.
225 extract_flat: Do not resolve URLs, return the immediate result.
226 Pass in 'in_playlist' to only show this behavior for
227 playlist items.
228 postprocessors: A list of dictionaries, each with an entry
229 * key: The name of the postprocessor. See
230 youtube_dl/postprocessor/__init__.py for a list.
231 as well as any further keyword arguments for the
232 postprocessor.
233 progress_hooks: A list of functions that get called on download
234 progress, with a dictionary with the entries
235 * status: One of "downloading", "error", or "finished".
236 Check this first and ignore unknown values.
237
238 If status is one of "downloading", or "finished", the
239 following properties may also be present:
240 * filename: The final filename (always present)
241 * tmpfilename: The filename we're currently writing to
242 * downloaded_bytes: Bytes on disk
243 * total_bytes: Size of the whole file, None if unknown
244 * total_bytes_estimate: Guess of the eventual file size,
245 None if unavailable.
246 * elapsed: The number of seconds since download started.
247 * eta: The estimated time in seconds, None if unknown
248 * speed: The download speed in bytes/second, None if
249 unknown
250 * fragment_index: The counter of the currently
251 downloaded video fragment.
252 * fragment_count: The number of fragments (= individual
253 files that will be merged)
254
255 Progress hooks are guaranteed to be called at least once
256 (with status "finished") if the download is successful.
257 merge_output_format: Extension to use when merging formats.
258 fixup: Automatically correct known faults of the file.
259 One of:
260 - "never": do nothing
261 - "warn": only emit a warning
262 - "detect_or_warn": check whether we can do anything
263 about it, warn otherwise (default)
264 source_address: Client-side IP address to bind to.
265 call_home: Boolean, true iff we are allowed to contact the
266 youtube-dl servers for debugging.
267 sleep_interval: Number of seconds to sleep before each download when
268 used alone or a lower bound of a range for randomized
269 sleep before each download (minimum possible number
270 of seconds to sleep) when used along with
271 max_sleep_interval.
272 max_sleep_interval:Upper bound of a range for randomized sleep before each
273 download (maximum possible number of seconds to sleep).
274 Must only be used along with sleep_interval.
275 Actual sleep time will be a random float from range
276 [sleep_interval; max_sleep_interval].
277 listformats: Print an overview of available video formats and exit.
278 list_thumbnails: Print a table of all thumbnails and exit.
279 match_filter: A function that gets called with the info_dict of
280 every video.
281 If it returns a message, the video is ignored.
282 If it returns None, the video is downloaded.
283 match_filter_func in utils.py is one example for this.
284 no_color: Do not emit color codes in output.
285 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
286 HTTP header
287 geo_bypass_country:
288 Two-letter ISO 3166-2 country code that will be used for
289 explicit geographic restriction bypassing via faking
290 X-Forwarded-For HTTP header
291 geo_bypass_ip_block:
292 IP range in CIDR notation that will be used similarly to
293 geo_bypass_country
294
295 The following options determine which downloader is picked:
296 external_downloader: Executable of the external downloader to call.
297 None or unset for standard (built-in) downloader.
298 hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
299 if True, otherwise use ffmpeg/avconv if False, otherwise
300 use downloader suggested by extractor if None.
301
302 The following parameters are not used by YoutubeDL itself, they are used by
303 the downloader (see youtube_dl/downloader/common.py):
304 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
305 noresizebuffer, retries, continuedl, noprogress, consoletitle,
306 xattr_set_filesize, external_downloader_args, hls_use_mpegts,
307 http_chunk_size.
308
309 The following options are used by the post processors:
310 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
311 otherwise prefer ffmpeg.
312 postprocessor_args: A list of additional command-line arguments for the
313 postprocessor.
314
315 The following options are used by the Youtube extractor:
316 youtube_include_dash_manifest: If True (default), DASH manifests and related
317 data will be downloaded and processed by extractor.
318 You can reduce network I/O by disabling it if you don't
319 care about DASH.
320 """
321
322 _NUMERIC_FIELDS = set((
323 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
324 'timestamp', 'upload_year', 'upload_month', 'upload_day',
325 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
326 'average_rating', 'comment_count', 'age_limit',
327 'start_time', 'end_time',
328 'chapter_number', 'season_number', 'episode_number',
329 'track_number', 'disc_number', 'release_year',
330 'playlist_index',
331 ))
332
333 params = None
334 _ies = []
335 _pps = []
336 _download_retcode = None
337 _num_downloads = None
338 _screen_file = None
339
340 def __init__(self, params=None, auto_init=True):
341 """Create a FileDownloader object with the given options."""
342 if params is None:
343 params = {}
344 self._ies = []
345 self._ies_instances = {}
346 self._pps = []
347 self._progress_hooks = []
348 self._download_retcode = 0
349 self._num_downloads = 0
350 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
351 self._err_file = sys.stderr
352 self.params = {
353 # Default parameters
354 'nocheckcertificate': False,
355 }
356 self.params.update(params)
357 self.cache = Cache(self)
358
359 def check_deprecated(param, option, suggestion):
360 if self.params.get(param) is not None:
361 self.report_warning(
362 '%s is deprecated. Use %s instead.' % (option, suggestion))
363 return True
364 return False
365
366 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
367 if self.params.get('geo_verification_proxy') is None:
368 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
369
370 check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
371 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
372 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
373
374 if params.get('bidi_workaround', False):
375 try:
376 import pty
377 master, slave = pty.openpty()
378 width = compat_get_terminal_size().columns
379 if width is None:
380 width_args = []
381 else:
382 width_args = ['-w', str(width)]
383 sp_kwargs = dict(
384 stdin=subprocess.PIPE,
385 stdout=slave,
386 stderr=self._err_file)
387 try:
388 self._output_process = subprocess.Popen(
389 ['bidiv'] + width_args, **sp_kwargs
390 )
391 except OSError:
392 self._output_process = subprocess.Popen(
393 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
394 self._output_channel = os.fdopen(master, 'rb')
395 except OSError as ose:
396 if ose.errno == errno.ENOENT:
397 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
398 else:
399 raise
400
401 if (sys.platform != 'win32' and
402 sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and
403 not params.get('restrictfilenames', False)):
404 # Unicode filesystem API will throw errors (#1474, #13027)
405 self.report_warning(
406 'Assuming --restrict-filenames since file system encoding '
407 'cannot encode all characters. '
408 'Set the LC_ALL environment variable to fix this.')
409 self.params['restrictfilenames'] = True
410
411 if isinstance(params.get('outtmpl'), bytes):
412 self.report_warning(
413 'Parameter outtmpl is bytes, but should be a unicode string. '
414 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
415
416 self._setup_opener()
417
418 if auto_init:
419 self.print_debug_header()
420 self.add_default_info_extractors()
421
422 for pp_def_raw in self.params.get('postprocessors', []):
423 pp_class = get_postprocessor(pp_def_raw['key'])
424 pp_def = dict(pp_def_raw)
425 del pp_def['key']
426 pp = pp_class(self, **compat_kwargs(pp_def))
427 self.add_post_processor(pp)
428
429 for ph in self.params.get('progress_hooks', []):
430 self.add_progress_hook(ph)
431
432 register_socks_protocols()
433
434 def warn_if_short_id(self, argv):
435 # short YouTube ID starting with dash?
436 idxs = [
437 i for i, a in enumerate(argv)
438 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
439 if idxs:
440 correct_argv = (
441 ['youtube-dl'] +
442 [a for i, a in enumerate(argv) if i not in idxs] +
443 ['--'] + [argv[i] for i in idxs]
444 )
445 self.report_warning(
446 'Long argument string detected. '
447 'Use -- to separate parameters and URLs, like this:\n%s\n' %
448 args_to_str(correct_argv))
449
450 def add_info_extractor(self, ie):
451 """Add an InfoExtractor object to the end of the list."""
452 self._ies.append(ie)
453 if not isinstance(ie, type):
454 self._ies_instances[ie.ie_key()] = ie
455 ie.set_downloader(self)
456
457 def get_info_extractor(self, ie_key):
458 """
459 Get an instance of an IE with name ie_key, it will try to get one from
460 the _ies list, if there's no instance it will create a new one and add
461 it to the extractor list.
462 """
463 ie = self._ies_instances.get(ie_key)
464 if ie is None:
465 ie = get_info_extractor(ie_key)()
466 self.add_info_extractor(ie)
467 return ie
468
469 def add_default_info_extractors(self):
470 """
471 Add the InfoExtractors returned by gen_extractors to the end of the list
472 """
473 for ie in gen_extractor_classes():
474 self.add_info_extractor(ie)
475
476 def add_post_processor(self, pp):
477 """Add a PostProcessor object to the end of the chain."""
478 self._pps.append(pp)
479 pp.set_downloader(self)
480
481 def add_progress_hook(self, ph):
482 """Add the progress hook (currently only for the file downloader)"""
483 self._progress_hooks.append(ph)
484
485 def _bidi_workaround(self, message):
486 if not hasattr(self, '_output_channel'):
487 return message
488
489 assert hasattr(self, '_output_process')
490 assert isinstance(message, compat_str)
491 line_count = message.count('\n') + 1
492 self._output_process.stdin.write((message + '\n').encode('utf-8'))
493 self._output_process.stdin.flush()
494 res = ''.join(self._output_channel.readline().decode('utf-8')
495 for _ in range(line_count))
496 return res[:-len('\n')]
497
498 def to_screen(self, message, skip_eol=False):
499 """Print message to stdout if not in quiet mode."""
500 return self.to_stdout(message, skip_eol, check_quiet=True)
501
502 def _write_string(self, s, out=None):
503 write_string(s, out=out, encoding=self.params.get('encoding'))
504
505 def to_stdout(self, message, skip_eol=False, check_quiet=False):
506 """Print message to stdout if not in quiet mode."""
507 if self.params.get('logger'):
508 self.params['logger'].debug(message)
509 elif not check_quiet or not self.params.get('quiet', False):
510 message = self._bidi_workaround(message)
511 terminator = ['\n', ''][skip_eol]
512 output = message + terminator
513
514 self._write_string(output, self._screen_file)
515
516 def to_stderr(self, message):
517 """Print message to stderr."""
518 assert isinstance(message, compat_str)
519 if self.params.get('logger'):
520 self.params['logger'].error(message)
521 else:
522 message = self._bidi_workaround(message)
523 output = message + '\n'
524 self._write_string(output, self._err_file)
525
526 def to_console_title(self, message):
527 if not self.params.get('consoletitle', False):
528 return
529 if compat_os_name == 'nt':
530 if ctypes.windll.kernel32.GetConsoleWindow():
531 # c_wchar_p() might not be necessary if `message` is
532 # already of type unicode()
533 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
534 elif 'TERM' in os.environ:
535 self._write_string('\033]0;%s\007' % message, self._screen_file)
536
537 def save_console_title(self):
538 if not self.params.get('consoletitle', False):
539 return
540 if self.params.get('simulate', False):
541 return
542 if compat_os_name != 'nt' and 'TERM' in os.environ:
543 # Save the title on stack
544 self._write_string('\033[22;0t', self._screen_file)
545
546 def restore_console_title(self):
547 if not self.params.get('consoletitle', False):
548 return
549 if self.params.get('simulate', False):
550 return
551 if compat_os_name != 'nt' and 'TERM' in os.environ:
552 # Restore the title from stack
553 self._write_string('\033[23;0t', self._screen_file)
554
555 def __enter__(self):
556 self.save_console_title()
557 return self
558
559 def __exit__(self, *args):
560 self.restore_console_title()
561
562 if self.params.get('cookiefile') is not None:
563 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
564
565 def trouble(self, message=None, tb=None):
566 """Determine action to take when a download problem appears.
567
568 Depending on if the downloader has been configured to ignore
569 download errors or not, this method may throw an exception or
570 not when errors are found, after printing the message.
571
572 tb, if given, is additional traceback information.
573 """
574 if message is not None:
575 self.to_stderr(message)
576 if self.params.get('verbose'):
577 if tb is None:
578 if sys.exc_info()[0]: # if .trouble has been called from an except block
579 tb = ''
580 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
581 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
582 tb += encode_compat_str(traceback.format_exc())
583 else:
584 tb_data = traceback.format_list(traceback.extract_stack())
585 tb = ''.join(tb_data)
586 self.to_stderr(tb)
587 if not self.params.get('ignoreerrors', False):
588 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
589 exc_info = sys.exc_info()[1].exc_info
590 else:
591 exc_info = sys.exc_info()
592 raise DownloadError(message, exc_info)
593 self._download_retcode = 1
594
595 def report_warning(self, message):
596 '''
597 Print the message to stderr, it will be prefixed with 'WARNING:'
598 If stderr is a tty file the 'WARNING:' will be colored
599 '''
600 if self.params.get('logger') is not None:
601 self.params['logger'].warning(message)
602 else:
603 if self.params.get('no_warnings'):
604 return
605 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
606 _msg_header = '\033[0;33mWARNING:\033[0m'
607 else:
608 _msg_header = 'WARNING:'
609 warning_message = '%s %s' % (_msg_header, message)
610 self.to_stderr(warning_message)
611
612 def report_error(self, message, tb=None):
613 '''
614 Do the same as trouble, but prefixes the message with 'ERROR:', colored
615 in red if stderr is a tty file.
616 '''
617 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
618 _msg_header = '\033[0;31mERROR:\033[0m'
619 else:
620 _msg_header = 'ERROR:'
621 error_message = '%s %s' % (_msg_header, message)
622 self.trouble(error_message, tb)
623
624 def report_file_already_downloaded(self, file_name):
625 """Report file has already been fully downloaded."""
626 try:
627 self.to_screen('[download] %s has already been downloaded' % file_name)
628 except UnicodeEncodeError:
629 self.to_screen('[download] The file has already been downloaded')
630
631 def prepare_filename(self, info_dict):
632 """Generate the output filename."""
633 try:
634 template_dict = dict(info_dict)
635
636 template_dict['epoch'] = int(time.time())
637 autonumber_size = self.params.get('autonumber_size')
638 if autonumber_size is None:
639 autonumber_size = 5
640 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
641 if template_dict.get('resolution') is None:
642 if template_dict.get('width') and template_dict.get('height'):
643 template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
644 elif template_dict.get('height'):
645 template_dict['resolution'] = '%sp' % template_dict['height']
646 elif template_dict.get('width'):
647 template_dict['resolution'] = '%dx?' % template_dict['width']
648
649 sanitize = lambda k, v: sanitize_filename(
650 compat_str(v),
651 restricted=self.params.get('restrictfilenames'),
652 is_id=(k == 'id' or k.endswith('_id')))
653 template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
654 for k, v in template_dict.items()
655 if v is not None and not isinstance(v, (list, tuple, dict)))
656 template_dict = collections.defaultdict(lambda: 'NA', template_dict)
657
658 outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
659
660 # For fields playlist_index and autonumber convert all occurrences
661 # of %(field)s to %(field)0Nd for backward compatibility
662 field_size_compat_map = {
663 'playlist_index': len(str(template_dict['n_entries'])),
664 'autonumber': autonumber_size,
665 }
666 FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
667 mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
668 if mobj:
669 outtmpl = re.sub(
670 FIELD_SIZE_COMPAT_RE,
671 r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
672 outtmpl)
673
674 # Missing numeric fields used together with integer presentation types
675 # in format specification will break the argument substitution since
676 # string 'NA' is returned for missing fields. We will patch output
677 # template for missing fields to meet string presentation type.
678 for numeric_field in self._NUMERIC_FIELDS:
679 if numeric_field not in template_dict:
680 # As of [1] format syntax is:
681 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
682 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
683 FORMAT_RE = r'''(?x)
684 (?<!%)
685 %
686 \({0}\) # mapping key
687 (?:[#0\-+ ]+)? # conversion flags (optional)
688 (?:\d+)? # minimum field width (optional)
689 (?:\.\d+)? # precision (optional)
690 [hlL]? # length modifier (optional)
691 [diouxXeEfFgGcrs%] # conversion type
692 '''
693 outtmpl = re.sub(
694 FORMAT_RE.format(numeric_field),
695 r'%({0})s'.format(numeric_field), outtmpl)
696
697 # expand_path translates '%%' into '%' and '$$' into '$'
698 # correspondingly that is not what we want since we need to keep
699 # '%%' intact for template dict substitution step. Working around
700 # with boundary-alike separator hack.
701 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
702 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
703
704 # outtmpl should be expand_path'ed before template dict substitution
705 # because meta fields may contain env variables we don't want to
706 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
707 # title "Hello $PATH", we don't want `$PATH` to be expanded.
708 filename = expand_path(outtmpl).replace(sep, '') % template_dict
709
710 # Temporary fix for #4787
711 # 'Treat' all problem characters by passing filename through preferredencoding
712 # to workaround encoding issues with subprocess on python2 @ Windows
713 if sys.version_info < (3, 0) and sys.platform == 'win32':
714 filename = encodeFilename(filename, True).decode(preferredencoding())
715 return sanitize_path(filename)
716 except ValueError as err:
717 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
718 return None
719
720 def _match_entry(self, info_dict, incomplete):
721 """ Returns None iff the file should be downloaded """
722
723 video_title = info_dict.get('title', info_dict.get('id', 'video'))
724 if 'title' in info_dict:
725 # This can happen when we're just evaluating the playlist
726 title = info_dict['title']
727 matchtitle = self.params.get('matchtitle', False)
728 if matchtitle:
729 if not re.search(matchtitle, title, re.IGNORECASE):
730 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
731 rejecttitle = self.params.get('rejecttitle', False)
732 if rejecttitle:
733 if re.search(rejecttitle, title, re.IGNORECASE):
734 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
735 date = info_dict.get('upload_date')
736 if date is not None:
737 dateRange = self.params.get('daterange', DateRange())
738 if date not in dateRange:
739 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
740 view_count = info_dict.get('view_count')
741 if view_count is not None:
742 min_views = self.params.get('min_views')
743 if min_views is not None and view_count < min_views:
744 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
745 max_views = self.params.get('max_views')
746 if max_views is not None and view_count > max_views:
747 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
748 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
749 return 'Skipping "%s" because it is age restricted' % video_title
750 if self.in_download_archive(info_dict):
751 return '%s has already been recorded in archive' % video_title
752
753 if not incomplete:
754 match_filter = self.params.get('match_filter')
755 if match_filter is not None:
756 ret = match_filter(info_dict)
757 if ret is not None:
758 return ret
759
760 return None
761
762 @staticmethod
763 def add_extra_info(info_dict, extra_info):
764 '''Set the keys from extra_info in info dict if they are missing'''
765 for key, value in extra_info.items():
766 info_dict.setdefault(key, value)
767
768 def extract_info(self, url, download=True, ie_key=None, extra_info={},
769 process=True, force_generic_extractor=False):
770 '''
771 Returns a list with a dictionary for each video we find.
772 If 'download', also downloads the videos.
773 extra_info is a dict containing the extra values to add to each result
774 '''
775
776 if not ie_key and force_generic_extractor:
777 ie_key = 'Generic'
778
779 if ie_key:
780 ies = [self.get_info_extractor(ie_key)]
781 else:
782 ies = self._ies
783
784 for ie in ies:
785 if not ie.suitable(url):
786 continue
787
788 ie = self.get_info_extractor(ie.ie_key())
789 if not ie.working():
790 self.report_warning('The program functionality for this site has been marked as broken, '
791 'and will probably not work.')
792
793 try:
794 ie_result = ie.extract(url)
795 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
796 break
797 if isinstance(ie_result, list):
798 # Backwards compatibility: old IE result format
799 ie_result = {
800 '_type': 'compat_list',
801 'entries': ie_result,
802 }
803 self.add_default_extra_info(ie_result, ie, url)
804 if process:
805 return self.process_ie_result(ie_result, download, extra_info)
806 else:
807 return ie_result
808 except GeoRestrictedError as e:
809 msg = e.msg
810 if e.countries:
811 msg += '\nThis video is available in %s.' % ', '.join(
812 map(ISO3166Utils.short2full, e.countries))
813 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
814 self.report_error(msg)
815 break
816 except ExtractorError as e: # An error we somewhat expected
817 self.report_error(compat_str(e), e.format_traceback())
818 break
819 except MaxDownloadsReached:
820 raise
821 except Exception as e:
822 if self.params.get('ignoreerrors', False):
823 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
824 break
825 else:
826 raise
827 else:
828 self.report_error('no suitable InfoExtractor for URL %s' % url)
829
830 def add_default_extra_info(self, ie_result, ie, url):
831 self.add_extra_info(ie_result, {
832 'extractor': ie.IE_NAME,
833 'webpage_url': url,
834 'webpage_url_basename': url_basename(url),
835 'extractor_key': ie.ie_key(),
836 })
837
838 def process_ie_result(self, ie_result, download=True, extra_info={}):
839 """
840 Take the result of the ie(may be modified) and resolve all unresolved
841 references (URLs, playlist items).
842
843 It will also download the videos if 'download'.
844 Returns the resolved ie_result.
845 """
846 result_type = ie_result.get('_type', 'video')
847
848 if result_type in ('url', 'url_transparent'):
849 ie_result['url'] = sanitize_url(ie_result['url'])
850 extract_flat = self.params.get('extract_flat', False)
851 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
852 extract_flat is True):
853 if self.params.get('forcejson', False):
854 self.to_stdout(json.dumps(ie_result))
855 return ie_result
856
857 if result_type == 'video':
858 self.add_extra_info(ie_result, extra_info)
859 return self.process_video_result(ie_result, download=download)
860 elif result_type == 'url':
861 # We have to add extra_info to the results because it may be
862 # contained in a playlist
863 return self.extract_info(ie_result['url'],
864 download,
865 ie_key=ie_result.get('ie_key'),
866 extra_info=extra_info)
867 elif result_type == 'url_transparent':
868 # Use the information from the embedding page
869 info = self.extract_info(
870 ie_result['url'], ie_key=ie_result.get('ie_key'),
871 extra_info=extra_info, download=False, process=False)
872
873 # extract_info may return None when ignoreerrors is enabled and
874 # extraction failed with an error, don't crash and return early
875 # in this case
876 if not info:
877 return info
878
879 force_properties = dict(
880 (k, v) for k, v in ie_result.items() if v is not None)
881 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
882 if f in force_properties:
883 del force_properties[f]
884 new_result = info.copy()
885 new_result.update(force_properties)
886
887 # Extracted info may not be a video result (i.e.
888 # info.get('_type', 'video') != video) but rather an url or
889 # url_transparent. In such cases outer metadata (from ie_result)
890 # should be propagated to inner one (info). For this to happen
891 # _type of info should be overridden with url_transparent. This
892 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
893 if new_result.get('_type') == 'url':
894 new_result['_type'] = 'url_transparent'
895
896 return self.process_ie_result(
897 new_result, download=download, extra_info=extra_info)
898 elif result_type in ('playlist', 'multi_video'):
899 # We process each entry in the playlist
900 playlist = ie_result.get('title') or ie_result.get('id')
901 self.to_screen('[download] Downloading playlist: %s' % playlist)
902
903 playlist_results = []
904
905 playliststart = self.params.get('playliststart', 1) - 1
906 playlistend = self.params.get('playlistend')
907 # For backwards compatibility, interpret -1 as whole list
908 if playlistend == -1:
909 playlistend = None
910
911 playlistitems_str = self.params.get('playlist_items')
912 playlistitems = None
913 if playlistitems_str is not None:
914 def iter_playlistitems(format):
915 for string_segment in format.split(','):
916 if '-' in string_segment:
917 start, end = string_segment.split('-')
918 for item in range(int(start), int(end) + 1):
919 yield int(item)
920 else:
921 yield int(string_segment)
922 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
923
924 ie_entries = ie_result['entries']
925
926 def make_playlistitems_entries(list_ie_entries):
927 num_entries = len(list_ie_entries)
928 return [
929 list_ie_entries[i - 1] for i in playlistitems
930 if -num_entries <= i - 1 < num_entries]
931
932 def report_download(num_entries):
933 self.to_screen(
934 '[%s] playlist %s: Downloading %d videos' %
935 (ie_result['extractor'], playlist, num_entries))
936
937 if isinstance(ie_entries, list):
938 n_all_entries = len(ie_entries)
939 if playlistitems:
940 entries = make_playlistitems_entries(ie_entries)
941 else:
942 entries = ie_entries[playliststart:playlistend]
943 n_entries = len(entries)
944 self.to_screen(
945 '[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
946 (ie_result['extractor'], playlist, n_all_entries, n_entries))
947 elif isinstance(ie_entries, PagedList):
948 if playlistitems:
949 entries = []
950 for item in playlistitems:
951 entries.extend(ie_entries.getslice(
952 item - 1, item
953 ))
954 else:
955 entries = ie_entries.getslice(
956 playliststart, playlistend)
957 n_entries = len(entries)
958 report_download(n_entries)
959 else: # iterable
960 if playlistitems:
961 entries = make_playlistitems_entries(list(itertools.islice(
962 ie_entries, 0, max(playlistitems))))
963 else:
964 entries = list(itertools.islice(
965 ie_entries, playliststart, playlistend))
966 n_entries = len(entries)
967 report_download(n_entries)
968
969 if self.params.get('playlistreverse', False):
970 entries = entries[::-1]
971
972 if self.params.get('playlistrandom', False):
973 random.shuffle(entries)
974
975 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
976
977 for i, entry in enumerate(entries, 1):
978 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
979 # This __x_forwarded_for_ip thing is a bit ugly but requires
980 # minimal changes
981 if x_forwarded_for:
982 entry['__x_forwarded_for_ip'] = x_forwarded_for
983 extra = {
984 'n_entries': n_entries,
985 'playlist': playlist,
986 'playlist_id': ie_result.get('id'),
987 'playlist_title': ie_result.get('title'),
988 'playlist_uploader': ie_result.get('uploader'),
989 'playlist_uploader_id': ie_result.get('uploader_id'),
990 'playlist_index': i + playliststart,
991 'extractor': ie_result['extractor'],
992 'webpage_url': ie_result['webpage_url'],
993 'webpage_url_basename': url_basename(ie_result['webpage_url']),
994 'extractor_key': ie_result['extractor_key'],
995 }
996
997 reason = self._match_entry(entry, incomplete=True)
998 if reason is not None:
999 self.to_screen('[download] ' + reason)
1000 continue
1001
1002 entry_result = self.process_ie_result(entry,
1003 download=download,
1004 extra_info=extra)
1005 playlist_results.append(entry_result)
1006 ie_result['entries'] = playlist_results
1007 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1008 return ie_result
1009 elif result_type == 'compat_list':
1010 self.report_warning(
1011 'Extractor %s returned a compat_list result. '
1012 'It needs to be updated.' % ie_result.get('extractor'))
1013
1014 def _fixup(r):
1015 self.add_extra_info(
1016 r,
1017 {
1018 'extractor': ie_result['extractor'],
1019 'webpage_url': ie_result['webpage_url'],
1020 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1021 'extractor_key': ie_result['extractor_key'],
1022 }
1023 )
1024 return r
1025 ie_result['entries'] = [
1026 self.process_ie_result(_fixup(r), download, extra_info)
1027 for r in ie_result['entries']
1028 ]
1029 return ie_result
1030 else:
1031 raise Exception('Invalid result type: %s' % result_type)
1032
1033 def _build_format_filter(self, filter_spec):
1034 " Returns a function to filter the formats according to the filter_spec "
1035
1036 OPERATORS = {
1037 '<': operator.lt,
1038 '<=': operator.le,
1039 '>': operator.gt,
1040 '>=': operator.ge,
1041 '=': operator.eq,
1042 '!=': operator.ne,
1043 }
1044 operator_rex = re.compile(r'''(?x)\s*
1045 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
1046 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1047 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
1048 $
1049 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1050 m = operator_rex.search(filter_spec)
1051 if m:
1052 try:
1053 comparison_value = int(m.group('value'))
1054 except ValueError:
1055 comparison_value = parse_filesize(m.group('value'))
1056 if comparison_value is None:
1057 comparison_value = parse_filesize(m.group('value') + 'B')
1058 if comparison_value is None:
1059 raise ValueError(
1060 'Invalid value %r in format specification %r' % (
1061 m.group('value'), filter_spec))
1062 op = OPERATORS[m.group('op')]
1063
1064 if not m:
1065 STR_OPERATORS = {
1066 '=': operator.eq,
1067 '^=': lambda attr, value: attr.startswith(value),
1068 '$=': lambda attr, value: attr.endswith(value),
1069 '*=': lambda attr, value: value in attr,
1070 }
1071 str_operator_rex = re.compile(r'''(?x)
1072 \s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
1073 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
1074 \s*(?P<value>[a-zA-Z0-9._-]+)
1075 \s*$
1076 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1077 m = str_operator_rex.search(filter_spec)
1078 if m:
1079 comparison_value = m.group('value')
1080 str_op = STR_OPERATORS[m.group('op')]
1081 if m.group('negation'):
1082 op = lambda attr, value: not str_op(attr, value)
1083 else:
1084 op = str_op
1085
1086 if not m:
1087 raise ValueError('Invalid filter specification %r' % filter_spec)
1088
1089 def _filter(f):
1090 actual_value = f.get(m.group('key'))
1091 if actual_value is None:
1092 return m.group('none_inclusive')
1093 return op(actual_value, comparison_value)
1094 return _filter
1095
1096 def _default_format_spec(self, info_dict, download=True):
1097
1098 def can_merge():
1099 merger = FFmpegMergerPP(self)
1100 return merger.available and merger.can_merge()
1101
1102 def prefer_best():
1103 if self.params.get('simulate', False):
1104 return False
1105 if not download:
1106 return False
1107 if self.params.get('outtmpl', DEFAULT_OUTTMPL) == '-':
1108 return True
1109 if info_dict.get('is_live'):
1110 return True
1111 if not can_merge():
1112 return True
1113 return False
1114
1115 req_format_list = ['bestvideo+bestaudio', 'best']
1116 if prefer_best():
1117 req_format_list.reverse()
1118 return '/'.join(req_format_list)
1119
1120 def build_format_selector(self, format_spec):
1121 def syntax_error(note, start):
1122 message = (
1123 'Invalid format specification: '
1124 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1125 return SyntaxError(message)
1126
1127 PICKFIRST = 'PICKFIRST'
1128 MERGE = 'MERGE'
1129 SINGLE = 'SINGLE'
1130 GROUP = 'GROUP'
1131 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1132
1133 def _parse_filter(tokens):
1134 filter_parts = []
1135 for type, string, start, _, _ in tokens:
1136 if type == tokenize.OP and string == ']':
1137 return ''.join(filter_parts)
1138 else:
1139 filter_parts.append(string)
1140
1141 def _remove_unused_ops(tokens):
1142 # Remove operators that we don't use and join them with the surrounding strings
1143 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1144 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1145 last_string, last_start, last_end, last_line = None, None, None, None
1146 for type, string, start, end, line in tokens:
1147 if type == tokenize.OP and string == '[':
1148 if last_string:
1149 yield tokenize.NAME, last_string, last_start, last_end, last_line
1150 last_string = None
1151 yield type, string, start, end, line
1152 # everything inside brackets will be handled by _parse_filter
1153 for type, string, start, end, line in tokens:
1154 yield type, string, start, end, line
1155 if type == tokenize.OP and string == ']':
1156 break
1157 elif type == tokenize.OP and string in ALLOWED_OPS:
1158 if last_string:
1159 yield tokenize.NAME, last_string, last_start, last_end, last_line
1160 last_string = None
1161 yield type, string, start, end, line
1162 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1163 if not last_string:
1164 last_string = string
1165 last_start = start
1166 last_end = end
1167 else:
1168 last_string += string
1169 if last_string:
1170 yield tokenize.NAME, last_string, last_start, last_end, last_line
1171
1172 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
1173 selectors = []
1174 current_selector = None
1175 for type, string, start, _, _ in tokens:
1176 # ENCODING is only defined in python 3.x
1177 if type == getattr(tokenize, 'ENCODING', None):
1178 continue
1179 elif type in [tokenize.NAME, tokenize.NUMBER]:
1180 current_selector = FormatSelector(SINGLE, string, [])
1181 elif type == tokenize.OP:
1182 if string == ')':
1183 if not inside_group:
1184 # ')' will be handled by the parentheses group
1185 tokens.restore_last_token()
1186 break
1187 elif inside_merge and string in ['/', ',']:
1188 tokens.restore_last_token()
1189 break
1190 elif inside_choice and string == ',':
1191 tokens.restore_last_token()
1192 break
1193 elif string == ',':
1194 if not current_selector:
1195 raise syntax_error('"," must follow a format selector', start)
1196 selectors.append(current_selector)
1197 current_selector = None
1198 elif string == '/':
1199 if not current_selector:
1200 raise syntax_error('"/" must follow a format selector', start)
1201 first_choice = current_selector
1202 second_choice = _parse_format_selection(tokens, inside_choice=True)
1203 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
1204 elif string == '[':
1205 if not current_selector:
1206 current_selector = FormatSelector(SINGLE, 'best', [])
1207 format_filter = _parse_filter(tokens)
1208 current_selector.filters.append(format_filter)
1209 elif string == '(':
1210 if current_selector:
1211 raise syntax_error('Unexpected "("', start)
1212 group = _parse_format_selection(tokens, inside_group=True)
1213 current_selector = FormatSelector(GROUP, group, [])
1214 elif string == '+':
1215 video_selector = current_selector
1216 audio_selector = _parse_format_selection(tokens, inside_merge=True)
1217 if not video_selector or not audio_selector:
1218 raise syntax_error('"+" must be between two format selectors', start)
1219 current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
1220 else:
1221 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1222 elif type == tokenize.ENDMARKER:
1223 break
1224 if current_selector:
1225 selectors.append(current_selector)
1226 return selectors
1227
1228 def _build_selector_function(selector):
1229 if isinstance(selector, list):
1230 fs = [_build_selector_function(s) for s in selector]
1231
1232 def selector_function(ctx):
1233 for f in fs:
1234 for format in f(ctx):
1235 yield format
1236 return selector_function
1237 elif selector.type == GROUP:
1238 selector_function = _build_selector_function(selector.selector)
1239 elif selector.type == PICKFIRST:
1240 fs = [_build_selector_function(s) for s in selector.selector]
1241
1242 def selector_function(ctx):
1243 for f in fs:
1244 picked_formats = list(f(ctx))
1245 if picked_formats:
1246 return picked_formats
1247 return []
1248 elif selector.type == SINGLE:
1249 format_spec = selector.selector
1250
1251 def selector_function(ctx):
1252 formats = list(ctx['formats'])
1253 if not formats:
1254 return
1255 if format_spec == 'all':
1256 for f in formats:
1257 yield f
1258 elif format_spec in ['best', 'worst', None]:
1259 format_idx = 0 if format_spec == 'worst' else -1
1260 audiovideo_formats = [
1261 f for f in formats
1262 if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
1263 if audiovideo_formats:
1264 yield audiovideo_formats[format_idx]
1265 # for extractors with incomplete formats (audio only (soundcloud)
1266 # or video only (imgur)) we will fallback to best/worst
1267 # {video,audio}-only format
1268 elif ctx['incomplete_formats']:
1269 yield formats[format_idx]
1270 elif format_spec == 'bestaudio':
1271 audio_formats = [
1272 f for f in formats
1273 if f.get('vcodec') == 'none']
1274 if audio_formats:
1275 yield audio_formats[-1]
1276 elif format_spec == 'worstaudio':
1277 audio_formats = [
1278 f for f in formats
1279 if f.get('vcodec') == 'none']
1280 if audio_formats:
1281 yield audio_formats[0]
1282 elif format_spec == 'bestvideo':
1283 video_formats = [
1284 f for f in formats
1285 if f.get('acodec') == 'none']
1286 if video_formats:
1287 yield video_formats[-1]
1288 elif format_spec == 'worstvideo':
1289 video_formats = [
1290 f for f in formats
1291 if f.get('acodec') == 'none']
1292 if video_formats:
1293 yield video_formats[0]
1294 else:
1295 extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
1296 if format_spec in extensions:
1297 filter_f = lambda f: f['ext'] == format_spec
1298 else:
1299 filter_f = lambda f: f['format_id'] == format_spec
1300 matches = list(filter(filter_f, formats))
1301 if matches:
1302 yield matches[-1]
1303 elif selector.type == MERGE:
1304 def _merge(formats_info):
1305 format_1, format_2 = [f['format_id'] for f in formats_info]
1306 # The first format must contain the video and the
1307 # second the audio
1308 if formats_info[0].get('vcodec') == 'none':
1309 self.report_error('The first format must '
1310 'contain the video, try using '
1311 '"-f %s+%s"' % (format_2, format_1))
1312 return
1313 # Formats must be opposite (video+audio)
1314 if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none':
1315 self.report_error(
1316 'Both formats %s and %s are video-only, you must specify "-f video+audio"'
1317 % (format_1, format_2))
1318 return
1319 output_ext = (
1320 formats_info[0]['ext']
1321 if self.params.get('merge_output_format') is None
1322 else self.params['merge_output_format'])
1323 return {
1324 'requested_formats': formats_info,
1325 'format': '%s+%s' % (formats_info[0].get('format'),
1326 formats_info[1].get('format')),
1327 'format_id': '%s+%s' % (formats_info[0].get('format_id'),
1328 formats_info[1].get('format_id')),
1329 'width': formats_info[0].get('width'),
1330 'height': formats_info[0].get('height'),
1331 'resolution': formats_info[0].get('resolution'),
1332 'fps': formats_info[0].get('fps'),
1333 'vcodec': formats_info[0].get('vcodec'),
1334 'vbr': formats_info[0].get('vbr'),
1335 'stretched_ratio': formats_info[0].get('stretched_ratio'),
1336 'acodec': formats_info[1].get('acodec'),
1337 'abr': formats_info[1].get('abr'),
1338 'ext': output_ext,
1339 }
1340 video_selector, audio_selector = map(_build_selector_function, selector.selector)
1341
1342 def selector_function(ctx):
1343 for pair in itertools.product(
1344 video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))):
1345 yield _merge(pair)
1346
1347 filters = [self._build_format_filter(f) for f in selector.filters]
1348
1349 def final_selector(ctx):
1350 ctx_copy = copy.deepcopy(ctx)
1351 for _filter in filters:
1352 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1353 return selector_function(ctx_copy)
1354 return final_selector
1355
1356 stream = io.BytesIO(format_spec.encode('utf-8'))
1357 try:
1358 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
1359 except tokenize.TokenError:
1360 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1361
1362 class TokenIterator(object):
1363 def __init__(self, tokens):
1364 self.tokens = tokens
1365 self.counter = 0
1366
1367 def __iter__(self):
1368 return self
1369
1370 def __next__(self):
1371 if self.counter >= len(self.tokens):
1372 raise StopIteration()
1373 value = self.tokens[self.counter]
1374 self.counter += 1
1375 return value
1376
1377 next = __next__
1378
1379 def restore_last_token(self):
1380 self.counter -= 1
1381
1382 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
1383 return _build_selector_function(parsed_selector)
1384
1385 def _calc_headers(self, info_dict):
1386 res = std_headers.copy()
1387
1388 add_headers = info_dict.get('http_headers')
1389 if add_headers:
1390 res.update(add_headers)
1391
1392 cookies = self._calc_cookies(info_dict)
1393 if cookies:
1394 res['Cookie'] = cookies
1395
1396 if 'X-Forwarded-For' not in res:
1397 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
1398 if x_forwarded_for_ip:
1399 res['X-Forwarded-For'] = x_forwarded_for_ip
1400
1401 return res
1402
1403 def _calc_cookies(self, info_dict):
1404 pr = sanitized_Request(info_dict['url'])
1405 self.cookiejar.add_cookie_header(pr)
1406 return pr.get_header('Cookie')
1407
1408 def process_video_result(self, info_dict, download=True):
1409 assert info_dict.get('_type', 'video') == 'video'
1410
1411 if 'id' not in info_dict:
1412 raise ExtractorError('Missing "id" field in extractor result')
1413 if 'title' not in info_dict:
1414 raise ExtractorError('Missing "title" field in extractor result')
1415
1416 def report_force_conversion(field, field_not, conversion):
1417 self.report_warning(
1418 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
1419 % (field, field_not, conversion))
1420
1421 def sanitize_string_field(info, string_field):
1422 field = info.get(string_field)
1423 if field is None or isinstance(field, compat_str):
1424 return
1425 report_force_conversion(string_field, 'a string', 'string')
1426 info[string_field] = compat_str(field)
1427
1428 def sanitize_numeric_fields(info):
1429 for numeric_field in self._NUMERIC_FIELDS:
1430 field = info.get(numeric_field)
1431 if field is None or isinstance(field, compat_numeric_types):
1432 continue
1433 report_force_conversion(numeric_field, 'numeric', 'int')
1434 info[numeric_field] = int_or_none(field)
1435
1436 sanitize_string_field(info_dict, 'id')
1437 sanitize_numeric_fields(info_dict)
1438
1439 if 'playlist' not in info_dict:
1440 # It isn't part of a playlist
1441 info_dict['playlist'] = None
1442 info_dict['playlist_index'] = None
1443
1444 thumbnails = info_dict.get('thumbnails')
1445 if thumbnails is None:
1446 thumbnail = info_dict.get('thumbnail')
1447 if thumbnail:
1448 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
1449 if thumbnails:
1450 thumbnails.sort(key=lambda t: (
1451 t.get('preference') if t.get('preference') is not None else -1,
1452 t.get('width') if t.get('width') is not None else -1,
1453 t.get('height') if t.get('height') is not None else -1,
1454 t.get('id') if t.get('id') is not None else '', t.get('url')))
1455 for i, t in enumerate(thumbnails):
1456 t['url'] = sanitize_url(t['url'])
1457 if t.get('width') and t.get('height'):
1458 t['resolution'] = '%dx%d' % (t['width'], t['height'])
1459 if t.get('id') is None:
1460 t['id'] = '%d' % i
1461
1462 if self.params.get('list_thumbnails'):
1463 self.list_thumbnails(info_dict)
1464 return
1465
1466 thumbnail = info_dict.get('thumbnail')
1467 if thumbnail:
1468 info_dict['thumbnail'] = sanitize_url(thumbnail)
1469 elif thumbnails:
1470 info_dict['thumbnail'] = thumbnails[-1]['url']
1471
1472 if 'display_id' not in info_dict and 'id' in info_dict:
1473 info_dict['display_id'] = info_dict['id']
1474
1475 if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
1476 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
1477 # see http://bugs.python.org/issue1646728)
1478 try:
1479 upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
1480 info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
1481 except (ValueError, OverflowError, OSError):
1482 pass
1483
1484 # Auto generate title fields corresponding to the *_number fields when missing
1485 # in order to always have clean titles. This is very common for TV series.
1486 for field in ('chapter', 'season', 'episode'):
1487 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
1488 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
1489
1490 for cc_kind in ('subtitles', 'automatic_captions'):
1491 cc = info_dict.get(cc_kind)
1492 if cc:
1493 for _, subtitle in cc.items():
1494 for subtitle_format in subtitle:
1495 if subtitle_format.get('url'):
1496 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
1497 if subtitle_format.get('ext') is None:
1498 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
1499
1500 automatic_captions = info_dict.get('automatic_captions')
1501 subtitles = info_dict.get('subtitles')
1502
1503 if self.params.get('listsubtitles', False):
1504 if 'automatic_captions' in info_dict:
1505 self.list_subtitles(
1506 info_dict['id'], automatic_captions, 'automatic captions')
1507 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
1508 return
1509
1510 info_dict['requested_subtitles'] = self.process_subtitles(
1511 info_dict['id'], subtitles, automatic_captions)
1512
1513 # We now pick which formats have to be downloaded
1514 if info_dict.get('formats') is None:
1515 # There's only one format available
1516 formats = [info_dict]
1517 else:
1518 formats = info_dict['formats']
1519
1520 if not formats:
1521 raise ExtractorError('No video formats found!')
1522
1523 def is_wellformed(f):
1524 url = f.get('url')
1525 if not url:
1526 self.report_warning(
1527 '"url" field is missing or empty - skipping format, '
1528 'there is an error in extractor')
1529 return False
1530 if isinstance(url, bytes):
1531 sanitize_string_field(f, 'url')
1532 return True
1533
1534 # Filter out malformed formats for better extraction robustness
1535 formats = list(filter(is_wellformed, formats))
1536
1537 formats_dict = {}
1538
1539 # We check that all the formats have the format and format_id fields
1540 for i, format in enumerate(formats):
1541 sanitize_string_field(format, 'format_id')
1542 sanitize_numeric_fields(format)
1543 format['url'] = sanitize_url(format['url'])
1544 if not format.get('format_id'):
1545 format['format_id'] = compat_str(i)
1546 else:
1547 # Sanitize format_id from characters used in format selector expression
1548 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
1549 format_id = format['format_id']
1550 if format_id not in formats_dict:
1551 formats_dict[format_id] = []
1552 formats_dict[format_id].append(format)
1553
1554 # Make sure all formats have unique format_id
1555 for format_id, ambiguous_formats in formats_dict.items():
1556 if len(ambiguous_formats) > 1:
1557 for i, format in enumerate(ambiguous_formats):
1558 format['format_id'] = '%s-%d' % (format_id, i)
1559
1560 for i, format in enumerate(formats):
1561 if format.get('format') is None:
1562 format['format'] = '{id} - {res}{note}'.format(
1563 id=format['format_id'],
1564 res=self.format_resolution(format),
1565 note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
1566 )
1567 # Automatically determine file extension if missing
1568 if format.get('ext') is None:
1569 format['ext'] = determine_ext(format['url']).lower()
1570 # Automatically determine protocol if missing (useful for format
1571 # selection purposes)
1572 if format.get('protocol') is None:
1573 format['protocol'] = determine_protocol(format)
1574 # Add HTTP headers, so that external programs can use them from the
1575 # json output
1576 full_format_info = info_dict.copy()
1577 full_format_info.update(format)
1578 format['http_headers'] = self._calc_headers(full_format_info)
1579 # Remove private housekeeping stuff
1580 if '__x_forwarded_for_ip' in info_dict:
1581 del info_dict['__x_forwarded_for_ip']
1582
1583 # TODO Central sorting goes here
1584
1585 if formats[0] is not info_dict:
1586 # only set the 'formats' fields if the original info_dict list them
1587 # otherwise we end up with a circular reference, the first (and unique)
1588 # element in the 'formats' field in info_dict is info_dict itself,
1589 # which can't be exported to json
1590 info_dict['formats'] = formats
1591 if self.params.get('listformats'):
1592 self.list_formats(info_dict)
1593 return
1594
1595 req_format = self.params.get('format')
1596 if req_format is None:
1597 req_format = self._default_format_spec(info_dict, download=download)
1598 if self.params.get('verbose'):
1599 self.to_stdout('[debug] Default format spec: %s' % req_format)
1600
1601 format_selector = self.build_format_selector(req_format)
1602
1603 # While in format selection we may need to have an access to the original
1604 # format set in order to calculate some metrics or do some processing.
1605 # For now we need to be able to guess whether original formats provided
1606 # by extractor are incomplete or not (i.e. whether extractor provides only
1607 # video-only or audio-only formats) for proper formats selection for
1608 # extractors with such incomplete formats (see
1609 # https://github.com/ytdl-org/youtube-dl/pull/5556).
1610 # Since formats may be filtered during format selection and may not match
1611 # the original formats the results may be incorrect. Thus original formats
1612 # or pre-calculated metrics should be passed to format selection routines
1613 # as well.
1614 # We will pass a context object containing all necessary additional data
1615 # instead of just formats.
1616 # This fixes incorrect format selection issue (see
1617 # https://github.com/ytdl-org/youtube-dl/issues/10083).
1618 incomplete_formats = (
1619 # All formats are video-only or
1620 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or
1621 # all formats are audio-only
1622 all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
1623
1624 ctx = {
1625 'formats': formats,
1626 'incomplete_formats': incomplete_formats,
1627 }
1628
1629 formats_to_download = list(format_selector(ctx))
1630 if not formats_to_download:
1631 raise ExtractorError('requested format not available',
1632 expected=True)
1633
1634 if download:
1635 if len(formats_to_download) > 1:
1636 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
1637 for format in formats_to_download:
1638 new_info = dict(info_dict)
1639 new_info.update(format)
1640 self.process_info(new_info)
1641 # We update the info dict with the best quality format (backwards compatibility)
1642 info_dict.update(formats_to_download[-1])
1643 return info_dict
1644
1645 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
1646 """Select the requested subtitles and their format"""
1647 available_subs = {}
1648 if normal_subtitles and self.params.get('writesubtitles'):
1649 available_subs.update(normal_subtitles)
1650 if automatic_captions and self.params.get('writeautomaticsub'):
1651 for lang, cap_info in automatic_captions.items():
1652 if lang not in available_subs:
1653 available_subs[lang] = cap_info
1654
1655 if (not self.params.get('writesubtitles') and not
1656 self.params.get('writeautomaticsub') or not
1657 available_subs):
1658 return None
1659
1660 if self.params.get('allsubtitles', False):
1661 requested_langs = available_subs.keys()
1662 else:
1663 if self.params.get('subtitleslangs', False):
1664 requested_langs = self.params.get('subtitleslangs')
1665 elif 'en' in available_subs:
1666 requested_langs = ['en']
1667 else:
1668 requested_langs = [list(available_subs.keys())[0]]
1669
1670 formats_query = self.params.get('subtitlesformat', 'best')
1671 formats_preference = formats_query.split('/') if formats_query else []
1672 subs = {}
1673 for lang in requested_langs:
1674 formats = available_subs.get(lang)
1675 if formats is None:
1676 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
1677 continue
1678 for ext in formats_preference:
1679 if ext == 'best':
1680 f = formats[-1]
1681 break
1682 matches = list(filter(lambda f: f['ext'] == ext, formats))
1683 if matches:
1684 f = matches[-1]
1685 break
1686 else:
1687 f = formats[-1]
1688 self.report_warning(
1689 'No subtitle format found matching "%s" for language %s, '
1690 'using %s' % (formats_query, lang, f['ext']))
1691 subs[lang] = f
1692 return subs
1693
1694 def process_info(self, info_dict):
1695 """Process a single resolved IE result."""
1696
1697 assert info_dict.get('_type', 'video') == 'video'
1698
1699 max_downloads = self.params.get('max_downloads')
1700 if max_downloads is not None:
1701 if self._num_downloads >= int(max_downloads):
1702 raise MaxDownloadsReached()
1703
1704 info_dict['fulltitle'] = info_dict['title']
1705 if len(info_dict['title']) > 200:
1706 info_dict['title'] = info_dict['title'][:197] + '...'
1707
1708 if 'format' not in info_dict:
1709 info_dict['format'] = info_dict['ext']
1710
1711 reason = self._match_entry(info_dict, incomplete=False)
1712 if reason is not None:
1713 self.to_screen('[download] ' + reason)
1714 return
1715
1716 self._num_downloads += 1
1717
1718 info_dict['_filename'] = filename = self.prepare_filename(info_dict)
1719
1720 # Forced printings
1721 if self.params.get('forcetitle', False):
1722 self.to_stdout(info_dict['fulltitle'])
1723 if self.params.get('forceid', False):
1724 self.to_stdout(info_dict['id'])
1725 if self.params.get('forceurl', False):
1726 if info_dict.get('requested_formats') is not None:
1727 for f in info_dict['requested_formats']:
1728 self.to_stdout(f['url'] + f.get('play_path', ''))
1729 else:
1730 # For RTMP URLs, also include the playpath
1731 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
1732 if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
1733 self.to_stdout(info_dict['thumbnail'])
1734 if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
1735 self.to_stdout(info_dict['description'])
1736 if self.params.get('forcefilename', False) and filename is not None:
1737 self.to_stdout(filename)
1738 if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
1739 self.to_stdout(formatSeconds(info_dict['duration']))
1740 if self.params.get('forceformat', False):
1741 self.to_stdout(info_dict['format'])
1742 if self.params.get('forcejson', False):
1743 self.to_stdout(json.dumps(info_dict))
1744
1745 # Do nothing else if in simulate mode
1746 if self.params.get('simulate', False):
1747 return
1748
1749 if filename is None:
1750 return
1751
1752 def ensure_dir_exists(path):
1753 try:
1754 dn = os.path.dirname(path)
1755 if dn and not os.path.exists(dn):
1756 os.makedirs(dn)
1757 return True
1758 except (OSError, IOError) as err:
1759 self.report_error('unable to create directory ' + error_to_compat_str(err))
1760 return False
1761
1762 if not ensure_dir_exists(sanitize_path(encodeFilename(filename))):
1763 return
1764
1765 if self.params.get('writedescription', False):
1766 descfn = replace_extension(filename, 'description', info_dict.get('ext'))
1767 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
1768 self.to_screen('[info] Video description is already present')
1769 elif info_dict.get('description') is None:
1770 self.report_warning('There\'s no description to write.')
1771 else:
1772 try:
1773 self.to_screen('[info] Writing video description to: ' + descfn)
1774 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1775 descfile.write(info_dict['description'])
1776 except (OSError, IOError):
1777 self.report_error('Cannot write description file ' + descfn)
1778 return
1779
1780 if self.params.get('writeannotations', False):
1781 annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext'))
1782 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
1783 self.to_screen('[info] Video annotations are already present')
1784 else:
1785 try:
1786 self.to_screen('[info] Writing video annotations to: ' + annofn)
1787 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
1788 annofile.write(info_dict['annotations'])
1789 except (KeyError, TypeError):
1790 self.report_warning('There are no annotations to write.')
1791 except (OSError, IOError):
1792 self.report_error('Cannot write annotations file: ' + annofn)
1793 return
1794
1795 subtitles_are_requested = any([self.params.get('writesubtitles', False),
1796 self.params.get('writeautomaticsub')])
1797
1798 if subtitles_are_requested and info_dict.get('requested_subtitles'):
1799 # subtitles download errors are already managed as troubles in relevant IE
1800 # that way it will silently go on when used with unsupporting IE
1801 subtitles = info_dict['requested_subtitles']
1802 ie = self.get_info_extractor(info_dict['extractor_key'])
1803 for sub_lang, sub_info in subtitles.items():
1804 sub_format = sub_info['ext']
1805 sub_filename = subtitles_filename(filename, sub_lang, sub_format)
1806 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
1807 self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
1808 else:
1809 self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
1810 if sub_info.get('data') is not None:
1811 try:
1812 # Use newline='' to prevent conversion of newline characters
1813 # See https://github.com/ytdl-org/youtube-dl/issues/10268
1814 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
1815 subfile.write(sub_info['data'])
1816 except (OSError, IOError):
1817 self.report_error('Cannot write subtitles file ' + sub_filename)
1818 return
1819 else:
1820 try:
1821 sub_data = ie._request_webpage(
1822 sub_info['url'], info_dict['id'], note=False).read()
1823 with io.open(encodeFilename(sub_filename), 'wb') as subfile:
1824 subfile.write(sub_data)
1825 except (ExtractorError, IOError, OSError, ValueError) as err:
1826 self.report_warning('Unable to download subtitle for "%s": %s' %
1827 (sub_lang, error_to_compat_str(err)))
1828 continue
1829
1830 if self.params.get('writeinfojson', False):
1831 infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
1832 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
1833 self.to_screen('[info] Video description metadata is already present')
1834 else:
1835 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
1836 try:
1837 write_json_file(self.filter_requested_info(info_dict), infofn)
1838 except (OSError, IOError):
1839 self.report_error('Cannot write metadata to JSON file ' + infofn)
1840 return
1841
1842 self._write_thumbnails(info_dict, filename)
1843
1844 if not self.params.get('skip_download', False):
1845 try:
1846 def dl(name, info):
1847 fd = get_suitable_downloader(info, self.params)(self, self.params)
1848 for ph in self._progress_hooks:
1849 fd.add_progress_hook(ph)
1850 if self.params.get('verbose'):
1851 self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
1852 return fd.download(name, info)
1853
1854 if info_dict.get('requested_formats') is not None:
1855 downloaded = []
1856 success = True
1857 merger = FFmpegMergerPP(self)
1858 if not merger.available:
1859 postprocessors = []
1860 self.report_warning('You have requested multiple '
1861 'formats but ffmpeg or avconv are not installed.'
1862 ' The formats won\'t be merged.')
1863 else:
1864 postprocessors = [merger]
1865
1866 def compatible_formats(formats):
1867 video, audio = formats
1868 # Check extension
1869 video_ext, audio_ext = video.get('ext'), audio.get('ext')
1870 if video_ext and audio_ext:
1871 COMPATIBLE_EXTS = (
1872 ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
1873 ('webm')
1874 )
1875 for exts in COMPATIBLE_EXTS:
1876 if video_ext in exts and audio_ext in exts:
1877 return True
1878 # TODO: Check acodec/vcodec
1879 return False
1880
1881 filename_real_ext = os.path.splitext(filename)[1][1:]
1882 filename_wo_ext = (
1883 os.path.splitext(filename)[0]
1884 if filename_real_ext == info_dict['ext']
1885 else filename)
1886 requested_formats = info_dict['requested_formats']
1887 if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
1888 info_dict['ext'] = 'mkv'
1889 self.report_warning(
1890 'Requested formats are incompatible for merge and will be merged into mkv.')
1891 # Ensure filename always has a correct extension for successful merge
1892 filename = '%s.%s' % (filename_wo_ext, info_dict['ext'])
1893 if os.path.exists(encodeFilename(filename)):
1894 self.to_screen(
1895 '[download] %s has already been downloaded and '
1896 'merged' % filename)
1897 else:
1898 for f in requested_formats:
1899 new_info = dict(info_dict)
1900 new_info.update(f)
1901 fname = prepend_extension(
1902 self.prepare_filename(new_info),
1903 'f%s' % f['format_id'], new_info['ext'])
1904 if not ensure_dir_exists(fname):
1905 return
1906 downloaded.append(fname)
1907 partial_success = dl(fname, new_info)
1908 success = success and partial_success
1909 info_dict['__postprocessors'] = postprocessors
1910 info_dict['__files_to_merge'] = downloaded
1911 else:
1912 # Just a single file
1913 success = dl(filename, info_dict)
1914 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1915 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
1916 return
1917 except (OSError, IOError) as err:
1918 raise UnavailableVideoError(err)
1919 except (ContentTooShortError, ) as err:
1920 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
1921 return
1922
1923 if success and filename != '-':
1924 # Fixup content
1925 fixup_policy = self.params.get('fixup')
1926 if fixup_policy is None:
1927 fixup_policy = 'detect_or_warn'
1928
1929 INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.'
1930
1931 stretched_ratio = info_dict.get('stretched_ratio')
1932 if stretched_ratio is not None and stretched_ratio != 1:
1933 if fixup_policy == 'warn':
1934 self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
1935 info_dict['id'], stretched_ratio))
1936 elif fixup_policy == 'detect_or_warn':
1937 stretched_pp = FFmpegFixupStretchedPP(self)
1938 if stretched_pp.available:
1939 info_dict.setdefault('__postprocessors', [])
1940 info_dict['__postprocessors'].append(stretched_pp)
1941 else:
1942 self.report_warning(
1943 '%s: Non-uniform pixel ratio (%s). %s'
1944 % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
1945 else:
1946 assert fixup_policy in ('ignore', 'never')
1947
1948 if (info_dict.get('requested_formats') is None and
1949 info_dict.get('container') == 'm4a_dash'):
1950 if fixup_policy == 'warn':
1951 self.report_warning(
1952 '%s: writing DASH m4a. '
1953 'Only some players support this container.'
1954 % info_dict['id'])
1955 elif fixup_policy == 'detect_or_warn':
1956 fixup_pp = FFmpegFixupM4aPP(self)
1957 if fixup_pp.available:
1958 info_dict.setdefault('__postprocessors', [])
1959 info_dict['__postprocessors'].append(fixup_pp)
1960 else:
1961 self.report_warning(
1962 '%s: writing DASH m4a. '
1963 'Only some players support this container. %s'
1964 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
1965 else:
1966 assert fixup_policy in ('ignore', 'never')
1967
1968 if (info_dict.get('protocol') == 'm3u8_native' or
1969 info_dict.get('protocol') == 'm3u8' and
1970 self.params.get('hls_prefer_native')):
1971 if fixup_policy == 'warn':
1972 self.report_warning('%s: malformed AAC bitstream detected.' % (
1973 info_dict['id']))
1974 elif fixup_policy == 'detect_or_warn':
1975 fixup_pp = FFmpegFixupM3u8PP(self)
1976 if fixup_pp.available:
1977 info_dict.setdefault('__postprocessors', [])
1978 info_dict['__postprocessors'].append(fixup_pp)
1979 else:
1980 self.report_warning(
1981 '%s: malformed AAC bitstream detected. %s'
1982 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
1983 else:
1984 assert fixup_policy in ('ignore', 'never')
1985
1986 try:
1987 self.post_process(filename, info_dict)
1988 except (PostProcessingError) as err:
1989 self.report_error('postprocessing: %s' % str(err))
1990 return
1991 self.record_download_archive(info_dict)
1992
1993 def download(self, url_list):
1994 """Download a given list of URLs."""
1995 outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
1996 if (len(url_list) > 1 and
1997 outtmpl != '-' and
1998 '%' not in outtmpl and
1999 self.params.get('max_downloads') != 1):
2000 raise SameFileError(outtmpl)
2001
2002 for url in url_list:
2003 try:
2004 # It also downloads the videos
2005 res = self.extract_info(
2006 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
2007 except UnavailableVideoError:
2008 self.report_error('unable to download video')
2009 except MaxDownloadsReached:
2010 self.to_screen('[info] Maximum number of downloaded files reached.')
2011 raise
2012 else:
2013 if self.params.get('dump_single_json', False):
2014 self.to_stdout(json.dumps(res))
2015
2016 return self._download_retcode
2017
2018 def download_with_info_file(self, info_filename):
2019 with contextlib.closing(fileinput.FileInput(
2020 [info_filename], mode='r',
2021 openhook=fileinput.hook_encoded('utf-8'))) as f:
2022 # FileInput doesn't have a read method, we can't call json.load
2023 info = self.filter_requested_info(json.loads('\n'.join(f)))
2024 try:
2025 self.process_ie_result(info, download=True)
2026 except DownloadError:
2027 webpage_url = info.get('webpage_url')
2028 if webpage_url is not None:
2029 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
2030 return self.download([webpage_url])
2031 else:
2032 raise
2033 return self._download_retcode
2034
2035 @staticmethod
2036 def filter_requested_info(info_dict):
2037 return dict(
2038 (k, v) for k, v in info_dict.items()
2039 if k not in ['requested_formats', 'requested_subtitles'])
2040
2041 def post_process(self, filename, ie_info):
2042 """Run all the postprocessors on the given file."""
2043 info = dict(ie_info)
2044 info['filepath'] = filename
2045 pps_chain = []
2046 if ie_info.get('__postprocessors') is not None:
2047 pps_chain.extend(ie_info['__postprocessors'])
2048 pps_chain.extend(self._pps)
2049 for pp in pps_chain:
2050 files_to_delete = []
2051 try:
2052 files_to_delete, info = pp.run(info)
2053 except PostProcessingError as e:
2054 self.report_error(e.msg)
2055 if files_to_delete and not self.params.get('keepvideo', False):
2056 for old_filename in files_to_delete:
2057 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2058 try:
2059 os.remove(encodeFilename(old_filename))
2060 except (IOError, OSError):
2061 self.report_warning('Unable to remove downloaded original file')
2062
2063 def _make_archive_id(self, info_dict):
2064 video_id = info_dict.get('id')
2065 if not video_id:
2066 return
2067 # Future-proof against any change in case
2068 # and backwards compatibility with prior versions
2069 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
2070 if extractor is None:
2071 url = str_or_none(info_dict.get('url'))
2072 if not url:
2073 return
2074 # Try to find matching extractor for the URL and take its ie_key
2075 for ie in self._ies:
2076 if ie.suitable(url):
2077 extractor = ie.ie_key()
2078 break
2079 else:
2080 return
2081 return extractor.lower() + ' ' + video_id
2082
2083 def in_download_archive(self, info_dict):
2084 fn = self.params.get('download_archive')
2085 if fn is None:
2086 return False
2087
2088 vid_id = self._make_archive_id(info_dict)
2089 if not vid_id:
2090 return False # Incomplete video information
2091
2092 try:
2093 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
2094 for line in archive_file:
2095 if line.strip() == vid_id:
2096 return True
2097 except IOError as ioe:
2098 if ioe.errno != errno.ENOENT:
2099 raise
2100 return False
2101
2102 def record_download_archive(self, info_dict):
2103 fn = self.params.get('download_archive')
2104 if fn is None:
2105 return
2106 vid_id = self._make_archive_id(info_dict)
2107 assert vid_id
2108 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
2109 archive_file.write(vid_id + '\n')
2110
2111 @staticmethod
2112 def format_resolution(format, default='unknown'):
2113 if format.get('vcodec') == 'none':
2114 return 'audio only'
2115 if format.get('resolution') is not None:
2116 return format['resolution']
2117 if format.get('height') is not None:
2118 if format.get('width') is not None:
2119 res = '%sx%s' % (format['width'], format['height'])
2120 else:
2121 res = '%sp' % format['height']
2122 elif format.get('width') is not None:
2123 res = '%dx?' % format['width']
2124 else:
2125 res = default
2126 return res
2127
2128 def _format_note(self, fdict):
2129 res = ''
2130 if fdict.get('ext') in ['f4f', 'f4m']:
2131 res += '(unsupported) '
2132 if fdict.get('language'):
2133 if res:
2134 res += ' '
2135 res += '[%s] ' % fdict['language']
2136 if fdict.get('format_note') is not None:
2137 res += fdict['format_note'] + ' '
2138 if fdict.get('tbr') is not None:
2139 res += '%4dk ' % fdict['tbr']
2140 if fdict.get('container') is not None:
2141 if res:
2142 res += ', '
2143 res += '%s container' % fdict['container']
2144 if (fdict.get('vcodec') is not None and
2145 fdict.get('vcodec') != 'none'):
2146 if res:
2147 res += ', '
2148 res += fdict['vcodec']
2149 if fdict.get('vbr') is not None:
2150 res += '@'
2151 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
2152 res += 'video@'
2153 if fdict.get('vbr') is not None:
2154 res += '%4dk' % fdict['vbr']
2155 if fdict.get('fps') is not None:
2156 if res:
2157 res += ', '
2158 res += '%sfps' % fdict['fps']
2159 if fdict.get('acodec') is not None:
2160 if res:
2161 res += ', '
2162 if fdict['acodec'] == 'none':
2163 res += 'video only'
2164 else:
2165 res += '%-5s' % fdict['acodec']
2166 elif fdict.get('abr') is not None:
2167 if res:
2168 res += ', '
2169 res += 'audio'
2170 if fdict.get('abr') is not None:
2171 res += '@%3dk' % fdict['abr']
2172 if fdict.get('asr') is not None:
2173 res += ' (%5dHz)' % fdict['asr']
2174 if fdict.get('filesize') is not None:
2175 if res:
2176 res += ', '
2177 res += format_bytes(fdict['filesize'])
2178 elif fdict.get('filesize_approx') is not None:
2179 if res:
2180 res += ', '
2181 res += '~' + format_bytes(fdict['filesize_approx'])
2182 return res
2183
2184 def list_formats(self, info_dict):
2185 formats = info_dict.get('formats', [info_dict])
2186 table = [
2187 [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)]
2188 for f in formats
2189 if f.get('preference') is None or f['preference'] >= -1000]
2190 if len(formats) > 1:
2191 table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'
2192
2193 header_line = ['format code', 'extension', 'resolution', 'note']
2194 self.to_screen(
2195 '[info] Available formats for %s:\n%s' %
2196 (info_dict['id'], render_table(header_line, table)))
2197
2198 def list_thumbnails(self, info_dict):
2199 thumbnails = info_dict.get('thumbnails')
2200 if not thumbnails:
2201 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
2202 return
2203
2204 self.to_screen(
2205 '[info] Thumbnails for %s:' % info_dict['id'])
2206 self.to_screen(render_table(
2207 ['ID', 'width', 'height', 'URL'],
2208 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
2209
2210 def list_subtitles(self, video_id, subtitles, name='subtitles'):
2211 if not subtitles:
2212 self.to_screen('%s has no %s' % (video_id, name))
2213 return
2214 self.to_screen(
2215 'Available %s for %s:' % (name, video_id))
2216 self.to_screen(render_table(
2217 ['Language', 'formats'],
2218 [[lang, ', '.join(f['ext'] for f in reversed(formats))]
2219 for lang, formats in subtitles.items()]))
2220
2221 def urlopen(self, req):
2222 """ Start an HTTP download """
2223 if isinstance(req, compat_basestring):
2224 req = sanitized_Request(req)
2225 return self._opener.open(req, timeout=self._socket_timeout)
2226
2227 def print_debug_header(self):
2228 if not self.params.get('verbose'):
2229 return
2230
2231 if type('') is not compat_str:
2232 # Python 2.6 on SLES11 SP1 (https://github.com/ytdl-org/youtube-dl/issues/3326)
2233 self.report_warning(
2234 'Your Python is broken! Update to a newer and supported version')
2235
2236 stdout_encoding = getattr(
2237 sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
2238 encoding_str = (
2239 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
2240 locale.getpreferredencoding(),
2241 sys.getfilesystemencoding(),
2242 stdout_encoding,
2243 self.get_encoding()))
2244 write_string(encoding_str, encoding=None)
2245
2246 self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
2247 if _LAZY_LOADER:
2248 self._write_string('[debug] Lazy loading extractors enabled' + '\n')
2249 try:
2250 sp = subprocess.Popen(
2251 ['git', 'rev-parse', '--short', 'HEAD'],
2252 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2253 cwd=os.path.dirname(os.path.abspath(__file__)))
2254 out, err = sp.communicate()
2255 out = out.decode().strip()
2256 if re.match('[0-9a-f]+', out):
2257 self._write_string('[debug] Git HEAD: ' + out + '\n')
2258 except Exception:
2259 try:
2260 sys.exc_clear()
2261 except Exception:
2262 pass
2263
2264 def python_implementation():
2265 impl_name = platform.python_implementation()
2266 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2267 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
2268 return impl_name
2269
2270 self._write_string('[debug] Python version %s (%s) - %s\n' % (
2271 platform.python_version(), python_implementation(),
2272 platform_name()))
2273
2274 exe_versions = FFmpegPostProcessor.get_versions(self)
2275 exe_versions['rtmpdump'] = rtmpdump_version()
2276 exe_versions['phantomjs'] = PhantomJSwrapper._version()
2277 exe_str = ', '.join(
2278 '%s %s' % (exe, v)
2279 for exe, v in sorted(exe_versions.items())
2280 if v
2281 )
2282 if not exe_str:
2283 exe_str = 'none'
2284 self._write_string('[debug] exe versions: %s\n' % exe_str)
2285
2286 proxy_map = {}
2287 for handler in self._opener.handlers:
2288 if hasattr(handler, 'proxies'):
2289 proxy_map.update(handler.proxies)
2290 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
2291
2292 if self.params.get('call_home', False):
2293 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
2294 self._write_string('[debug] Public IP address: %s\n' % ipaddr)
2295 latest_version = self.urlopen(
2296 'https://yt-dl.org/latest/version').read().decode('utf-8')
2297 if version_tuple(latest_version) > version_tuple(__version__):
2298 self.report_warning(
2299 'You are using an outdated version (newest version: %s)! '
2300 'See https://yt-dl.org/update if you need help updating.' %
2301 latest_version)
2302
2303 def _setup_opener(self):
2304 timeout_val = self.params.get('socket_timeout')
2305 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
2306
2307 opts_cookiefile = self.params.get('cookiefile')
2308 opts_proxy = self.params.get('proxy')
2309
2310 if opts_cookiefile is None:
2311 self.cookiejar = compat_cookiejar.CookieJar()
2312 else:
2313 opts_cookiefile = expand_path(opts_cookiefile)
2314 self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
2315 if os.access(opts_cookiefile, os.R_OK):
2316 self.cookiejar.load(ignore_discard=True, ignore_expires=True)
2317
2318 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
2319 if opts_proxy is not None:
2320 if opts_proxy == '':
2321 proxies = {}
2322 else:
2323 proxies = {'http': opts_proxy, 'https': opts_proxy}
2324 else:
2325 proxies = compat_urllib_request.getproxies()
2326 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
2327 if 'http' in proxies and 'https' not in proxies:
2328 proxies['https'] = proxies['http']
2329 proxy_handler = PerRequestProxyHandler(proxies)
2330
2331 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
2332 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
2333 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
2334 data_handler = compat_urllib_request_DataHandler()
2335
2336 # When passing our own FileHandler instance, build_opener won't add the
2337 # default FileHandler and allows us to disable the file protocol, which
2338 # can be used for malicious purposes (see
2339 # https://github.com/ytdl-org/youtube-dl/issues/8227)
2340 file_handler = compat_urllib_request.FileHandler()
2341
2342 def file_open(*args, **kwargs):
2343 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons')
2344 file_handler.file_open = file_open
2345
2346 opener = compat_urllib_request.build_opener(
2347 proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler)
2348
2349 # Delete the default user-agent header, which would otherwise apply in
2350 # cases where our custom HTTP handler doesn't come into play
2351 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
2352 opener.addheaders = []
2353 self._opener = opener
2354
2355 def encode(self, s):
2356 if isinstance(s, bytes):
2357 return s # Already encoded
2358
2359 try:
2360 return s.encode(self.get_encoding())
2361 except UnicodeEncodeError as err:
2362 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
2363 raise
2364
2365 def get_encoding(self):
2366 encoding = self.params.get('encoding')
2367 if encoding is None:
2368 encoding = preferredencoding()
2369 return encoding
2370
2371 def _write_thumbnails(self, info_dict, filename):
2372 if self.params.get('writethumbnail', False):
2373 thumbnails = info_dict.get('thumbnails')
2374 if thumbnails:
2375 thumbnails = [thumbnails[-1]]
2376 elif self.params.get('write_all_thumbnails', False):
2377 thumbnails = info_dict.get('thumbnails')
2378 else:
2379 return
2380
2381 if not thumbnails:
2382 # No thumbnails present, so return immediately
2383 return
2384
2385 for t in thumbnails:
2386 thumb_ext = determine_ext(t['url'], 'jpg')
2387 suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
2388 thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
2389 t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
2390
2391 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
2392 self.to_screen('[%s] %s: Thumbnail %sis already present' %
2393 (info_dict['extractor'], info_dict['id'], thumb_display_id))
2394 else:
2395 self.to_screen('[%s] %s: Downloading thumbnail %s...' %
2396 (info_dict['extractor'], info_dict['id'], thumb_display_id))
2397 try:
2398 uf = self.urlopen(t['url'])
2399 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
2400 shutil.copyfileobj(uf, thumbf)
2401 self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
2402 (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
2403 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
2404 self.report_warning('Unable to download thumbnail "%s": %s' %
2405 (t['url'], error_to_compat_str(err)))