]> jfr.im git - yt-dlp.git/blob - youtube_dlc/YoutubeDL.py
Merge pull request #94 from blackjack4494/conv_subs_when_skipped
[yt-dlp.git] / youtube_dlc / YoutubeDL.py
1 #!/usr/bin/env python
2 # coding: utf-8
3
4 from __future__ import absolute_import, unicode_literals
5
6 import collections
7 import contextlib
8 import copy
9 import datetime
10 import errno
11 import fileinput
12 import io
13 import itertools
14 import json
15 import locale
16 import operator
17 import os
18 import platform
19 import re
20 import shutil
21 import subprocess
22 import socket
23 import sys
24 import time
25 import tokenize
26 import traceback
27 import random
28
29 from string import ascii_letters
30
31 from .compat import (
32 compat_basestring,
33 compat_cookiejar,
34 compat_get_terminal_size,
35 compat_http_client,
36 compat_kwargs,
37 compat_numeric_types,
38 compat_os_name,
39 compat_str,
40 compat_tokenize_tokenize,
41 compat_urllib_error,
42 compat_urllib_request,
43 compat_urllib_request_DataHandler,
44 )
45 from .utils import (
46 age_restricted,
47 args_to_str,
48 ContentTooShortError,
49 date_from_str,
50 DateRange,
51 DEFAULT_OUTTMPL,
52 determine_ext,
53 determine_protocol,
54 DownloadError,
55 encode_compat_str,
56 encodeFilename,
57 error_to_compat_str,
58 expand_path,
59 ExtractorError,
60 format_bytes,
61 formatSeconds,
62 GeoRestrictedError,
63 int_or_none,
64 ISO3166Utils,
65 locked_file,
66 make_HTTPS_handler,
67 MaxDownloadsReached,
68 orderedSet,
69 PagedList,
70 parse_filesize,
71 PerRequestProxyHandler,
72 platform_name,
73 PostProcessingError,
74 preferredencoding,
75 prepend_extension,
76 register_socks_protocols,
77 render_table,
78 replace_extension,
79 SameFileError,
80 sanitize_filename,
81 sanitize_path,
82 sanitize_url,
83 sanitized_Request,
84 std_headers,
85 str_or_none,
86 subtitles_filename,
87 UnavailableVideoError,
88 url_basename,
89 version_tuple,
90 write_json_file,
91 write_string,
92 YoutubeDLCookieJar,
93 YoutubeDLCookieProcessor,
94 YoutubeDLHandler,
95 YoutubeDLRedirectHandler,
96 )
97 from .cache import Cache
98 from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
99 from .extractor.openload import PhantomJSwrapper
100 from .downloader import get_suitable_downloader
101 from .downloader.rtmp import rtmpdump_version
102 from .postprocessor import (
103 FFmpegFixupM3u8PP,
104 FFmpegFixupM4aPP,
105 FFmpegFixupStretchedPP,
106 FFmpegMergerPP,
107 FFmpegPostProcessor,
108 FFmpegSubtitlesConvertorPP,
109 get_postprocessor,
110 )
111 from .version import __version__
112
113 if compat_os_name == 'nt':
114 import ctypes
115
116
117 class YoutubeDL(object):
118 """YoutubeDL class.
119
120 YoutubeDL objects are the ones responsible of downloading the
121 actual video file and writing it to disk if the user has requested
122 it, among some other tasks. In most cases there should be one per
123 program. As, given a video URL, the downloader doesn't know how to
124 extract all the needed information, task that InfoExtractors do, it
125 has to pass the URL to one of them.
126
127 For this, YoutubeDL objects have a method that allows
128 InfoExtractors to be registered in a given order. When it is passed
129 a URL, the YoutubeDL object handles it to the first InfoExtractor it
130 finds that reports being able to handle it. The InfoExtractor extracts
131 all the information about the video or videos the URL refers to, and
132 YoutubeDL process the extracted information, possibly using a File
133 Downloader to download the video.
134
135 YoutubeDL objects accept a lot of parameters. In order not to saturate
136 the object constructor with arguments, it receives a dictionary of
137 options instead. These options are available through the params
138 attribute for the InfoExtractors to use. The YoutubeDL also
139 registers itself as the downloader in charge for the InfoExtractors
140 that are added to it, so this is a "mutual registration".
141
142 Available options:
143
144 username: Username for authentication purposes.
145 password: Password for authentication purposes.
146 videopassword: Password for accessing a video.
147 ap_mso: Adobe Pass multiple-system operator identifier.
148 ap_username: Multiple-system operator account username.
149 ap_password: Multiple-system operator account password.
150 usenetrc: Use netrc for authentication instead.
151 verbose: Print additional info to stdout.
152 quiet: Do not print messages to stdout.
153 no_warnings: Do not print out anything for warnings.
154 forceurl: Force printing final URL.
155 forcetitle: Force printing title.
156 forceid: Force printing ID.
157 forcethumbnail: Force printing thumbnail URL.
158 forcedescription: Force printing description.
159 forcefilename: Force printing final filename.
160 forceduration: Force printing duration.
161 forcejson: Force printing info_dict as JSON.
162 dump_single_json: Force printing the info_dict of the whole playlist
163 (or video) as a single JSON line.
164 simulate: Do not download the video files.
165 format: Video format code. See options.py for more information.
166 outtmpl: Template for output names.
167 restrictfilenames: Do not allow "&" and spaces in file names
168 ignoreerrors: Do not stop on download errors.
169 force_generic_extractor: Force downloader to use the generic extractor
170 nooverwrites: Prevent overwriting files.
171 playliststart: Playlist item to start at.
172 playlistend: Playlist item to end at.
173 playlist_items: Specific indices of playlist to download.
174 playlistreverse: Download playlist items in reverse order.
175 playlistrandom: Download playlist items in random order.
176 matchtitle: Download only matching titles.
177 rejecttitle: Reject downloads for matching titles.
178 logger: Log messages to a logging.Logger instance.
179 logtostderr: Log messages to stderr instead of stdout.
180 writedescription: Write the video description to a .description file
181 writeinfojson: Write the video description to a .info.json file
182 writeannotations: Write the video annotations to a .annotations.xml file
183 writethumbnail: Write the thumbnail image to a file
184 write_all_thumbnails: Write all thumbnail formats to files
185 writesubtitles: Write the video subtitles to a file
186 writeautomaticsub: Write the automatically generated subtitles to a file
187 allsubtitles: Downloads all the subtitles of the video
188 (requires writesubtitles or writeautomaticsub)
189 listsubtitles: Lists all available subtitles for the video
190 subtitlesformat: The format code for subtitles
191 subtitleslangs: List of languages of the subtitles to download
192 keepvideo: Keep the video file after post-processing
193 daterange: A DateRange object, download only if the upload_date is in the range.
194 skip_download: Skip the actual download of the video file
195 cachedir: Location of the cache files in the filesystem.
196 False to disable filesystem cache.
197 noplaylist: Download single video instead of a playlist if in doubt.
198 age_limit: An integer representing the user's age in years.
199 Unsuitable videos for the given age are skipped.
200 min_views: An integer representing the minimum view count the video
201 must have in order to not be skipped.
202 Videos without view count information are always
203 downloaded. None for no limit.
204 max_views: An integer representing the maximum view count.
205 Videos that are more popular than that are not
206 downloaded.
207 Videos without view count information are always
208 downloaded. None for no limit.
209 download_archive: File name of a file where all downloads are recorded.
210 Videos already present in the file are not downloaded
211 again.
212 cookiefile: File name where cookies should be read from and dumped to.
213 nocheckcertificate:Do not verify SSL certificates
214 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
215 At the moment, this is only supported by YouTube.
216 proxy: URL of the proxy server to use
217 geo_verification_proxy: URL of the proxy to use for IP address verification
218 on geo-restricted sites.
219 socket_timeout: Time to wait for unresponsive hosts, in seconds
220 bidi_workaround: Work around buggy terminals without bidirectional text
221 support, using fridibi
222 debug_printtraffic:Print out sent and received HTTP traffic
223 include_ads: Download ads as well
224 default_search: Prepend this string if an input url is not valid.
225 'auto' for elaborate guessing
226 encoding: Use this encoding instead of the system-specified.
227 extract_flat: Do not resolve URLs, return the immediate result.
228 Pass in 'in_playlist' to only show this behavior for
229 playlist items.
230 postprocessors: A list of dictionaries, each with an entry
231 * key: The name of the postprocessor. See
232 youtube_dlc/postprocessor/__init__.py for a list.
233 as well as any further keyword arguments for the
234 postprocessor.
235 progress_hooks: A list of functions that get called on download
236 progress, with a dictionary with the entries
237 * status: One of "downloading", "error", or "finished".
238 Check this first and ignore unknown values.
239
240 If status is one of "downloading", or "finished", the
241 following properties may also be present:
242 * filename: The final filename (always present)
243 * tmpfilename: The filename we're currently writing to
244 * downloaded_bytes: Bytes on disk
245 * total_bytes: Size of the whole file, None if unknown
246 * total_bytes_estimate: Guess of the eventual file size,
247 None if unavailable.
248 * elapsed: The number of seconds since download started.
249 * eta: The estimated time in seconds, None if unknown
250 * speed: The download speed in bytes/second, None if
251 unknown
252 * fragment_index: The counter of the currently
253 downloaded video fragment.
254 * fragment_count: The number of fragments (= individual
255 files that will be merged)
256
257 Progress hooks are guaranteed to be called at least once
258 (with status "finished") if the download is successful.
259 merge_output_format: Extension to use when merging formats.
260 fixup: Automatically correct known faults of the file.
261 One of:
262 - "never": do nothing
263 - "warn": only emit a warning
264 - "detect_or_warn": check whether we can do anything
265 about it, warn otherwise (default)
266 source_address: Client-side IP address to bind to.
267 call_home: Boolean, true iff we are allowed to contact the
268 youtube-dlc servers for debugging.
269 sleep_interval: Number of seconds to sleep before each download when
270 used alone or a lower bound of a range for randomized
271 sleep before each download (minimum possible number
272 of seconds to sleep) when used along with
273 max_sleep_interval.
274 max_sleep_interval:Upper bound of a range for randomized sleep before each
275 download (maximum possible number of seconds to sleep).
276 Must only be used along with sleep_interval.
277 Actual sleep time will be a random float from range
278 [sleep_interval; max_sleep_interval].
279 listformats: Print an overview of available video formats and exit.
280 list_thumbnails: Print a table of all thumbnails and exit.
281 match_filter: A function that gets called with the info_dict of
282 every video.
283 If it returns a message, the video is ignored.
284 If it returns None, the video is downloaded.
285 match_filter_func in utils.py is one example for this.
286 no_color: Do not emit color codes in output.
287 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
288 HTTP header
289 geo_bypass_country:
290 Two-letter ISO 3166-2 country code that will be used for
291 explicit geographic restriction bypassing via faking
292 X-Forwarded-For HTTP header
293 geo_bypass_ip_block:
294 IP range in CIDR notation that will be used similarly to
295 geo_bypass_country
296
297 The following options determine which downloader is picked:
298 external_downloader: Executable of the external downloader to call.
299 None or unset for standard (built-in) downloader.
300 hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
301 if True, otherwise use ffmpeg/avconv if False, otherwise
302 use downloader suggested by extractor if None.
303
304 The following parameters are not used by YoutubeDL itself, they are used by
305 the downloader (see youtube_dlc/downloader/common.py):
306 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
307 noresizebuffer, retries, continuedl, noprogress, consoletitle,
308 xattr_set_filesize, external_downloader_args, hls_use_mpegts,
309 http_chunk_size.
310
311 The following options are used by the post processors:
312 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
313 otherwise prefer ffmpeg.
314 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
315 to the binary or its containing directory.
316 postprocessor_args: A list of additional command-line arguments for the
317 postprocessor.
318
319 The following options are used by the Youtube extractor:
320 youtube_include_dash_manifest: If True (default), DASH manifests and related
321 data will be downloaded and processed by extractor.
322 You can reduce network I/O by disabling it if you don't
323 care about DASH.
324 """
325
326 _NUMERIC_FIELDS = set((
327 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
328 'timestamp', 'upload_year', 'upload_month', 'upload_day',
329 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
330 'average_rating', 'comment_count', 'age_limit',
331 'start_time', 'end_time',
332 'chapter_number', 'season_number', 'episode_number',
333 'track_number', 'disc_number', 'release_year',
334 'playlist_index',
335 ))
336
337 params = None
338 _ies = []
339 _pps = []
340 _download_retcode = None
341 _num_downloads = None
342 _screen_file = None
343
344 def __init__(self, params=None, auto_init=True):
345 """Create a FileDownloader object with the given options."""
346 if params is None:
347 params = {}
348 self._ies = []
349 self._ies_instances = {}
350 self._pps = []
351 self._progress_hooks = []
352 self._download_retcode = 0
353 self._num_downloads = 0
354 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
355 self._err_file = sys.stderr
356 self.params = {
357 # Default parameters
358 'nocheckcertificate': False,
359 }
360 self.params.update(params)
361 self.cache = Cache(self)
362
363 def check_deprecated(param, option, suggestion):
364 if self.params.get(param) is not None:
365 self.report_warning(
366 '%s is deprecated. Use %s instead.' % (option, suggestion))
367 return True
368 return False
369
370 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
371 if self.params.get('geo_verification_proxy') is None:
372 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
373
374 check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
375 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
376 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
377
378 if params.get('bidi_workaround', False):
379 try:
380 import pty
381 master, slave = pty.openpty()
382 width = compat_get_terminal_size().columns
383 if width is None:
384 width_args = []
385 else:
386 width_args = ['-w', str(width)]
387 sp_kwargs = dict(
388 stdin=subprocess.PIPE,
389 stdout=slave,
390 stderr=self._err_file)
391 try:
392 self._output_process = subprocess.Popen(
393 ['bidiv'] + width_args, **sp_kwargs
394 )
395 except OSError:
396 self._output_process = subprocess.Popen(
397 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
398 self._output_channel = os.fdopen(master, 'rb')
399 except OSError as ose:
400 if ose.errno == errno.ENOENT:
401 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
402 else:
403 raise
404
405 if (sys.platform != 'win32'
406 and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
407 and not params.get('restrictfilenames', False)):
408 # Unicode filesystem API will throw errors (#1474, #13027)
409 self.report_warning(
410 'Assuming --restrict-filenames since file system encoding '
411 'cannot encode all characters. '
412 'Set the LC_ALL environment variable to fix this.')
413 self.params['restrictfilenames'] = True
414
415 if isinstance(params.get('outtmpl'), bytes):
416 self.report_warning(
417 'Parameter outtmpl is bytes, but should be a unicode string. '
418 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
419
420 self._setup_opener()
421
422 if auto_init:
423 self.print_debug_header()
424 self.add_default_info_extractors()
425
426 for pp_def_raw in self.params.get('postprocessors', []):
427 pp_class = get_postprocessor(pp_def_raw['key'])
428 pp_def = dict(pp_def_raw)
429 del pp_def['key']
430 pp = pp_class(self, **compat_kwargs(pp_def))
431 self.add_post_processor(pp)
432
433 for ph in self.params.get('progress_hooks', []):
434 self.add_progress_hook(ph)
435
436 register_socks_protocols()
437
438 def warn_if_short_id(self, argv):
439 # short YouTube ID starting with dash?
440 idxs = [
441 i for i, a in enumerate(argv)
442 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
443 if idxs:
444 correct_argv = (
445 ['youtube-dlc']
446 + [a for i, a in enumerate(argv) if i not in idxs]
447 + ['--'] + [argv[i] for i in idxs]
448 )
449 self.report_warning(
450 'Long argument string detected. '
451 'Use -- to separate parameters and URLs, like this:\n%s\n' %
452 args_to_str(correct_argv))
453
454 def add_info_extractor(self, ie):
455 """Add an InfoExtractor object to the end of the list."""
456 self._ies.append(ie)
457 if not isinstance(ie, type):
458 self._ies_instances[ie.ie_key()] = ie
459 ie.set_downloader(self)
460
461 def get_info_extractor(self, ie_key):
462 """
463 Get an instance of an IE with name ie_key, it will try to get one from
464 the _ies list, if there's no instance it will create a new one and add
465 it to the extractor list.
466 """
467 ie = self._ies_instances.get(ie_key)
468 if ie is None:
469 ie = get_info_extractor(ie_key)()
470 self.add_info_extractor(ie)
471 return ie
472
473 def add_default_info_extractors(self):
474 """
475 Add the InfoExtractors returned by gen_extractors to the end of the list
476 """
477 for ie in gen_extractor_classes():
478 self.add_info_extractor(ie)
479
480 def add_post_processor(self, pp):
481 """Add a PostProcessor object to the end of the chain."""
482 self._pps.append(pp)
483 pp.set_downloader(self)
484
485 def add_progress_hook(self, ph):
486 """Add the progress hook (currently only for the file downloader)"""
487 self._progress_hooks.append(ph)
488
489 def _bidi_workaround(self, message):
490 if not hasattr(self, '_output_channel'):
491 return message
492
493 assert hasattr(self, '_output_process')
494 assert isinstance(message, compat_str)
495 line_count = message.count('\n') + 1
496 self._output_process.stdin.write((message + '\n').encode('utf-8'))
497 self._output_process.stdin.flush()
498 res = ''.join(self._output_channel.readline().decode('utf-8')
499 for _ in range(line_count))
500 return res[:-len('\n')]
501
502 def to_screen(self, message, skip_eol=False):
503 """Print message to stdout if not in quiet mode."""
504 return self.to_stdout(message, skip_eol, check_quiet=True)
505
506 def _write_string(self, s, out=None):
507 write_string(s, out=out, encoding=self.params.get('encoding'))
508
509 def to_stdout(self, message, skip_eol=False, check_quiet=False):
510 """Print message to stdout if not in quiet mode."""
511 if self.params.get('logger'):
512 self.params['logger'].debug(message)
513 elif not check_quiet or not self.params.get('quiet', False):
514 message = self._bidi_workaround(message)
515 terminator = ['\n', ''][skip_eol]
516 output = message + terminator
517
518 self._write_string(output, self._screen_file)
519
520 def to_stderr(self, message):
521 """Print message to stderr."""
522 assert isinstance(message, compat_str)
523 if self.params.get('logger'):
524 self.params['logger'].error(message)
525 else:
526 message = self._bidi_workaround(message)
527 output = message + '\n'
528 self._write_string(output, self._err_file)
529
530 def to_console_title(self, message):
531 if not self.params.get('consoletitle', False):
532 return
533 if compat_os_name == 'nt':
534 if ctypes.windll.kernel32.GetConsoleWindow():
535 # c_wchar_p() might not be necessary if `message` is
536 # already of type unicode()
537 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
538 elif 'TERM' in os.environ:
539 self._write_string('\033]0;%s\007' % message, self._screen_file)
540
541 def save_console_title(self):
542 if not self.params.get('consoletitle', False):
543 return
544 if self.params.get('simulate', False):
545 return
546 if compat_os_name != 'nt' and 'TERM' in os.environ:
547 # Save the title on stack
548 self._write_string('\033[22;0t', self._screen_file)
549
550 def restore_console_title(self):
551 if not self.params.get('consoletitle', False):
552 return
553 if self.params.get('simulate', False):
554 return
555 if compat_os_name != 'nt' and 'TERM' in os.environ:
556 # Restore the title from stack
557 self._write_string('\033[23;0t', self._screen_file)
558
559 def __enter__(self):
560 self.save_console_title()
561 return self
562
563 def __exit__(self, *args):
564 self.restore_console_title()
565
566 if self.params.get('cookiefile') is not None:
567 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
568
569 def trouble(self, message=None, tb=None):
570 """Determine action to take when a download problem appears.
571
572 Depending on if the downloader has been configured to ignore
573 download errors or not, this method may throw an exception or
574 not when errors are found, after printing the message.
575
576 tb, if given, is additional traceback information.
577 """
578 if message is not None:
579 self.to_stderr(message)
580 if self.params.get('verbose'):
581 if tb is None:
582 if sys.exc_info()[0]: # if .trouble has been called from an except block
583 tb = ''
584 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
585 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
586 tb += encode_compat_str(traceback.format_exc())
587 else:
588 tb_data = traceback.format_list(traceback.extract_stack())
589 tb = ''.join(tb_data)
590 self.to_stderr(tb)
591 if not self.params.get('ignoreerrors', False):
592 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
593 exc_info = sys.exc_info()[1].exc_info
594 else:
595 exc_info = sys.exc_info()
596 raise DownloadError(message, exc_info)
597 self._download_retcode = 1
598
599 def report_warning(self, message):
600 '''
601 Print the message to stderr, it will be prefixed with 'WARNING:'
602 If stderr is a tty file the 'WARNING:' will be colored
603 '''
604 if self.params.get('logger') is not None:
605 self.params['logger'].warning(message)
606 else:
607 if self.params.get('no_warnings'):
608 return
609 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
610 _msg_header = '\033[0;33mWARNING:\033[0m'
611 else:
612 _msg_header = 'WARNING:'
613 warning_message = '%s %s' % (_msg_header, message)
614 self.to_stderr(warning_message)
615
616 def report_error(self, message, tb=None):
617 '''
618 Do the same as trouble, but prefixes the message with 'ERROR:', colored
619 in red if stderr is a tty file.
620 '''
621 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
622 _msg_header = '\033[0;31mERROR:\033[0m'
623 else:
624 _msg_header = 'ERROR:'
625 error_message = '%s %s' % (_msg_header, message)
626 self.trouble(error_message, tb)
627
628 def report_file_already_downloaded(self, file_name):
629 """Report file has already been fully downloaded."""
630 try:
631 self.to_screen('[download] %s has already been downloaded' % file_name)
632 except UnicodeEncodeError:
633 self.to_screen('[download] The file has already been downloaded')
634
635 def prepare_filename(self, info_dict):
636 """Generate the output filename."""
637 try:
638 template_dict = dict(info_dict)
639
640 template_dict['epoch'] = int(time.time())
641 autonumber_size = self.params.get('autonumber_size')
642 if autonumber_size is None:
643 autonumber_size = 5
644 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
645 if template_dict.get('resolution') is None:
646 if template_dict.get('width') and template_dict.get('height'):
647 template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
648 elif template_dict.get('height'):
649 template_dict['resolution'] = '%sp' % template_dict['height']
650 elif template_dict.get('width'):
651 template_dict['resolution'] = '%dx?' % template_dict['width']
652
653 sanitize = lambda k, v: sanitize_filename(
654 compat_str(v),
655 restricted=self.params.get('restrictfilenames'),
656 is_id=(k == 'id' or k.endswith('_id')))
657 template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
658 for k, v in template_dict.items()
659 if v is not None and not isinstance(v, (list, tuple, dict)))
660 template_dict = collections.defaultdict(lambda: 'NA', template_dict)
661
662 outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
663
664 # For fields playlist_index and autonumber convert all occurrences
665 # of %(field)s to %(field)0Nd for backward compatibility
666 field_size_compat_map = {
667 'playlist_index': len(str(template_dict['n_entries'])),
668 'autonumber': autonumber_size,
669 }
670 FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
671 mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
672 if mobj:
673 outtmpl = re.sub(
674 FIELD_SIZE_COMPAT_RE,
675 r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
676 outtmpl)
677
678 # Missing numeric fields used together with integer presentation types
679 # in format specification will break the argument substitution since
680 # string 'NA' is returned for missing fields. We will patch output
681 # template for missing fields to meet string presentation type.
682 for numeric_field in self._NUMERIC_FIELDS:
683 if numeric_field not in template_dict:
684 # As of [1] format syntax is:
685 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
686 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
687 FORMAT_RE = r'''(?x)
688 (?<!%)
689 %
690 \({0}\) # mapping key
691 (?:[#0\-+ ]+)? # conversion flags (optional)
692 (?:\d+)? # minimum field width (optional)
693 (?:\.\d+)? # precision (optional)
694 [hlL]? # length modifier (optional)
695 [diouxXeEfFgGcrs%] # conversion type
696 '''
697 outtmpl = re.sub(
698 FORMAT_RE.format(numeric_field),
699 r'%({0})s'.format(numeric_field), outtmpl)
700
701 # expand_path translates '%%' into '%' and '$$' into '$'
702 # correspondingly that is not what we want since we need to keep
703 # '%%' intact for template dict substitution step. Working around
704 # with boundary-alike separator hack.
705 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
706 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
707
708 # outtmpl should be expand_path'ed before template dict substitution
709 # because meta fields may contain env variables we don't want to
710 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
711 # title "Hello $PATH", we don't want `$PATH` to be expanded.
712 filename = expand_path(outtmpl).replace(sep, '') % template_dict
713
714 # Temporary fix for #4787
715 # 'Treat' all problem characters by passing filename through preferredencoding
716 # to workaround encoding issues with subprocess on python2 @ Windows
717 if sys.version_info < (3, 0) and sys.platform == 'win32':
718 filename = encodeFilename(filename, True).decode(preferredencoding())
719 return sanitize_path(filename)
720 except ValueError as err:
721 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
722 return None
723
724 def _match_entry(self, info_dict, incomplete):
725 """ Returns None iff the file should be downloaded """
726
727 video_title = info_dict.get('title', info_dict.get('id', 'video'))
728 if 'title' in info_dict:
729 # This can happen when we're just evaluating the playlist
730 title = info_dict['title']
731 matchtitle = self.params.get('matchtitle', False)
732 if matchtitle:
733 if not re.search(matchtitle, title, re.IGNORECASE):
734 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
735 rejecttitle = self.params.get('rejecttitle', False)
736 if rejecttitle:
737 if re.search(rejecttitle, title, re.IGNORECASE):
738 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
739 date = info_dict.get('upload_date')
740 if date is not None:
741 dateRange = self.params.get('daterange', DateRange())
742 if date not in dateRange:
743 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
744 view_count = info_dict.get('view_count')
745 if view_count is not None:
746 min_views = self.params.get('min_views')
747 if min_views is not None and view_count < min_views:
748 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
749 max_views = self.params.get('max_views')
750 if max_views is not None and view_count > max_views:
751 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
752 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
753 return 'Skipping "%s" because it is age restricted' % video_title
754 if self.in_download_archive(info_dict):
755 return '%s has already been recorded in archive' % video_title
756
757 if not incomplete:
758 match_filter = self.params.get('match_filter')
759 if match_filter is not None:
760 ret = match_filter(info_dict)
761 if ret is not None:
762 return ret
763
764 return None
765
766 @staticmethod
767 def add_extra_info(info_dict, extra_info):
768 '''Set the keys from extra_info in info dict if they are missing'''
769 for key, value in extra_info.items():
770 info_dict.setdefault(key, value)
771
772 def extract_info(self, url, download=True, ie_key=None, extra_info={},
773 process=True, force_generic_extractor=False):
774 '''
775 Returns a list with a dictionary for each video we find.
776 If 'download', also downloads the videos.
777 extra_info is a dict containing the extra values to add to each result
778 '''
779
780 if not ie_key and force_generic_extractor:
781 ie_key = 'Generic'
782
783 if ie_key:
784 ies = [self.get_info_extractor(ie_key)]
785 else:
786 ies = self._ies
787
788 for ie in ies:
789 if not ie.suitable(url):
790 continue
791
792 ie = self.get_info_extractor(ie.ie_key())
793 if not ie.working():
794 self.report_warning('The program functionality for this site has been marked as broken, '
795 'and will probably not work.')
796
797 try:
798 ie_result = ie.extract(url)
799 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
800 break
801 if isinstance(ie_result, list):
802 # Backwards compatibility: old IE result format
803 ie_result = {
804 '_type': 'compat_list',
805 'entries': ie_result,
806 }
807 self.add_default_extra_info(ie_result, ie, url)
808 if process:
809 return self.process_ie_result(ie_result, download, extra_info)
810 else:
811 return ie_result
812 except GeoRestrictedError as e:
813 msg = e.msg
814 if e.countries:
815 msg += '\nThis video is available in %s.' % ', '.join(
816 map(ISO3166Utils.short2full, e.countries))
817 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
818 self.report_error(msg)
819 break
820 except ExtractorError as e: # An error we somewhat expected
821 self.report_error(compat_str(e), e.format_traceback())
822 break
823 except MaxDownloadsReached:
824 raise
825 except Exception as e:
826 if self.params.get('ignoreerrors', False):
827 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
828 break
829 else:
830 raise
831 else:
832 self.report_error('no suitable InfoExtractor for URL %s' % url)
833
834 def add_default_extra_info(self, ie_result, ie, url):
835 self.add_extra_info(ie_result, {
836 'extractor': ie.IE_NAME,
837 'webpage_url': url,
838 'webpage_url_basename': url_basename(url),
839 'extractor_key': ie.ie_key(),
840 })
841
842 def process_ie_result(self, ie_result, download=True, extra_info={}):
843 """
844 Take the result of the ie(may be modified) and resolve all unresolved
845 references (URLs, playlist items).
846
847 It will also download the videos if 'download'.
848 Returns the resolved ie_result.
849 """
850 result_type = ie_result.get('_type', 'video')
851
852 if result_type in ('url', 'url_transparent'):
853 ie_result['url'] = sanitize_url(ie_result['url'])
854 extract_flat = self.params.get('extract_flat', False)
855 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
856 or extract_flat is True):
857 self.__forced_printings(
858 ie_result, self.prepare_filename(ie_result),
859 incomplete=True)
860 return ie_result
861
862 if result_type == 'video':
863 self.add_extra_info(ie_result, extra_info)
864 return self.process_video_result(ie_result, download=download)
865 elif result_type == 'url':
866 # We have to add extra_info to the results because it may be
867 # contained in a playlist
868 return self.extract_info(ie_result['url'],
869 download,
870 ie_key=ie_result.get('ie_key'),
871 extra_info=extra_info)
872 elif result_type == 'url_transparent':
873 # Use the information from the embedding page
874 info = self.extract_info(
875 ie_result['url'], ie_key=ie_result.get('ie_key'),
876 extra_info=extra_info, download=False, process=False)
877
878 # extract_info may return None when ignoreerrors is enabled and
879 # extraction failed with an error, don't crash and return early
880 # in this case
881 if not info:
882 return info
883
884 force_properties = dict(
885 (k, v) for k, v in ie_result.items() if v is not None)
886 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
887 if f in force_properties:
888 del force_properties[f]
889 new_result = info.copy()
890 new_result.update(force_properties)
891
892 # Extracted info may not be a video result (i.e.
893 # info.get('_type', 'video') != video) but rather an url or
894 # url_transparent. In such cases outer metadata (from ie_result)
895 # should be propagated to inner one (info). For this to happen
896 # _type of info should be overridden with url_transparent. This
897 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
898 if new_result.get('_type') == 'url':
899 new_result['_type'] = 'url_transparent'
900
901 return self.process_ie_result(
902 new_result, download=download, extra_info=extra_info)
903 elif result_type in ('playlist', 'multi_video'):
904 # We process each entry in the playlist
905 playlist = ie_result.get('title') or ie_result.get('id')
906 self.to_screen('[download] Downloading playlist: %s' % playlist)
907
908 playlist_results = []
909
910 playliststart = self.params.get('playliststart', 1) - 1
911 playlistend = self.params.get('playlistend')
912 # For backwards compatibility, interpret -1 as whole list
913 if playlistend == -1:
914 playlistend = None
915
916 playlistitems_str = self.params.get('playlist_items')
917 playlistitems = None
918 if playlistitems_str is not None:
919 def iter_playlistitems(format):
920 for string_segment in format.split(','):
921 if '-' in string_segment:
922 start, end = string_segment.split('-')
923 for item in range(int(start), int(end) + 1):
924 yield int(item)
925 else:
926 yield int(string_segment)
927 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
928
929 ie_entries = ie_result['entries']
930
931 def make_playlistitems_entries(list_ie_entries):
932 num_entries = len(list_ie_entries)
933 return [
934 list_ie_entries[i - 1] for i in playlistitems
935 if -num_entries <= i - 1 < num_entries]
936
937 def report_download(num_entries):
938 self.to_screen(
939 '[%s] playlist %s: Downloading %d videos' %
940 (ie_result['extractor'], playlist, num_entries))
941
942 if isinstance(ie_entries, list):
943 n_all_entries = len(ie_entries)
944 if playlistitems:
945 entries = make_playlistitems_entries(ie_entries)
946 else:
947 entries = ie_entries[playliststart:playlistend]
948 n_entries = len(entries)
949 self.to_screen(
950 '[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
951 (ie_result['extractor'], playlist, n_all_entries, n_entries))
952 elif isinstance(ie_entries, PagedList):
953 if playlistitems:
954 entries = []
955 for item in playlistitems:
956 entries.extend(ie_entries.getslice(
957 item - 1, item
958 ))
959 else:
960 entries = ie_entries.getslice(
961 playliststart, playlistend)
962 n_entries = len(entries)
963 report_download(n_entries)
964 else: # iterable
965 if playlistitems:
966 entries = make_playlistitems_entries(list(itertools.islice(
967 ie_entries, 0, max(playlistitems))))
968 else:
969 entries = list(itertools.islice(
970 ie_entries, playliststart, playlistend))
971 n_entries = len(entries)
972 report_download(n_entries)
973
974 if self.params.get('playlistreverse', False):
975 entries = entries[::-1]
976
977 if self.params.get('playlistrandom', False):
978 random.shuffle(entries)
979
980 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
981
982 for i, entry in enumerate(entries, 1):
983 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
984 # This __x_forwarded_for_ip thing is a bit ugly but requires
985 # minimal changes
986 if x_forwarded_for:
987 entry['__x_forwarded_for_ip'] = x_forwarded_for
988 extra = {
989 'n_entries': n_entries,
990 'playlist': playlist,
991 'playlist_id': ie_result.get('id'),
992 'playlist_title': ie_result.get('title'),
993 'playlist_uploader': ie_result.get('uploader'),
994 'playlist_uploader_id': ie_result.get('uploader_id'),
995 'playlist_index': playlistitems[i - 1] if playlistitems else i + playliststart,
996 'extractor': ie_result['extractor'],
997 'webpage_url': ie_result['webpage_url'],
998 'webpage_url_basename': url_basename(ie_result['webpage_url']),
999 'extractor_key': ie_result['extractor_key'],
1000 }
1001
1002 reason = self._match_entry(entry, incomplete=True)
1003 if reason is not None:
1004 self.to_screen('[download] ' + reason)
1005 continue
1006
1007 entry_result = self.process_ie_result(entry,
1008 download=download,
1009 extra_info=extra)
1010 playlist_results.append(entry_result)
1011 ie_result['entries'] = playlist_results
1012 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1013 return ie_result
1014 elif result_type == 'compat_list':
1015 self.report_warning(
1016 'Extractor %s returned a compat_list result. '
1017 'It needs to be updated.' % ie_result.get('extractor'))
1018
1019 def _fixup(r):
1020 self.add_extra_info(
1021 r,
1022 {
1023 'extractor': ie_result['extractor'],
1024 'webpage_url': ie_result['webpage_url'],
1025 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1026 'extractor_key': ie_result['extractor_key'],
1027 }
1028 )
1029 return r
1030 ie_result['entries'] = [
1031 self.process_ie_result(_fixup(r), download, extra_info)
1032 for r in ie_result['entries']
1033 ]
1034 return ie_result
1035 else:
1036 raise Exception('Invalid result type: %s' % result_type)
1037
1038 def _build_format_filter(self, filter_spec):
1039 " Returns a function to filter the formats according to the filter_spec "
1040
1041 OPERATORS = {
1042 '<': operator.lt,
1043 '<=': operator.le,
1044 '>': operator.gt,
1045 '>=': operator.ge,
1046 '=': operator.eq,
1047 '!=': operator.ne,
1048 }
1049 operator_rex = re.compile(r'''(?x)\s*
1050 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
1051 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1052 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
1053 $
1054 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1055 m = operator_rex.search(filter_spec)
1056 if m:
1057 try:
1058 comparison_value = int(m.group('value'))
1059 except ValueError:
1060 comparison_value = parse_filesize(m.group('value'))
1061 if comparison_value is None:
1062 comparison_value = parse_filesize(m.group('value') + 'B')
1063 if comparison_value is None:
1064 raise ValueError(
1065 'Invalid value %r in format specification %r' % (
1066 m.group('value'), filter_spec))
1067 op = OPERATORS[m.group('op')]
1068
1069 if not m:
1070 STR_OPERATORS = {
1071 '=': operator.eq,
1072 '^=': lambda attr, value: attr.startswith(value),
1073 '$=': lambda attr, value: attr.endswith(value),
1074 '*=': lambda attr, value: value in attr,
1075 }
1076 str_operator_rex = re.compile(r'''(?x)
1077 \s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
1078 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
1079 \s*(?P<value>[a-zA-Z0-9._-]+)
1080 \s*$
1081 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1082 m = str_operator_rex.search(filter_spec)
1083 if m:
1084 comparison_value = m.group('value')
1085 str_op = STR_OPERATORS[m.group('op')]
1086 if m.group('negation'):
1087 op = lambda attr, value: not str_op(attr, value)
1088 else:
1089 op = str_op
1090
1091 if not m:
1092 raise ValueError('Invalid filter specification %r' % filter_spec)
1093
1094 def _filter(f):
1095 actual_value = f.get(m.group('key'))
1096 if actual_value is None:
1097 return m.group('none_inclusive')
1098 return op(actual_value, comparison_value)
1099 return _filter
1100
1101 def _default_format_spec(self, info_dict, download=True):
1102
1103 def can_merge():
1104 merger = FFmpegMergerPP(self)
1105 return merger.available and merger.can_merge()
1106
1107 def prefer_best():
1108 if self.params.get('simulate', False):
1109 return False
1110 if not download:
1111 return False
1112 if self.params.get('outtmpl', DEFAULT_OUTTMPL) == '-':
1113 return True
1114 if info_dict.get('is_live'):
1115 return True
1116 if not can_merge():
1117 return True
1118 return False
1119
1120 req_format_list = ['bestvideo+bestaudio', 'best']
1121 if prefer_best():
1122 req_format_list.reverse()
1123 return '/'.join(req_format_list)
1124
1125 def build_format_selector(self, format_spec):
1126 def syntax_error(note, start):
1127 message = (
1128 'Invalid format specification: '
1129 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1130 return SyntaxError(message)
1131
1132 PICKFIRST = 'PICKFIRST'
1133 MERGE = 'MERGE'
1134 SINGLE = 'SINGLE'
1135 GROUP = 'GROUP'
1136 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1137
1138 def _parse_filter(tokens):
1139 filter_parts = []
1140 for type, string, start, _, _ in tokens:
1141 if type == tokenize.OP and string == ']':
1142 return ''.join(filter_parts)
1143 else:
1144 filter_parts.append(string)
1145
1146 def _remove_unused_ops(tokens):
1147 # Remove operators that we don't use and join them with the surrounding strings
1148 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1149 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1150 last_string, last_start, last_end, last_line = None, None, None, None
1151 for type, string, start, end, line in tokens:
1152 if type == tokenize.OP and string == '[':
1153 if last_string:
1154 yield tokenize.NAME, last_string, last_start, last_end, last_line
1155 last_string = None
1156 yield type, string, start, end, line
1157 # everything inside brackets will be handled by _parse_filter
1158 for type, string, start, end, line in tokens:
1159 yield type, string, start, end, line
1160 if type == tokenize.OP and string == ']':
1161 break
1162 elif type == tokenize.OP and string in ALLOWED_OPS:
1163 if last_string:
1164 yield tokenize.NAME, last_string, last_start, last_end, last_line
1165 last_string = None
1166 yield type, string, start, end, line
1167 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1168 if not last_string:
1169 last_string = string
1170 last_start = start
1171 last_end = end
1172 else:
1173 last_string += string
1174 if last_string:
1175 yield tokenize.NAME, last_string, last_start, last_end, last_line
1176
1177 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
1178 selectors = []
1179 current_selector = None
1180 for type, string, start, _, _ in tokens:
1181 # ENCODING is only defined in python 3.x
1182 if type == getattr(tokenize, 'ENCODING', None):
1183 continue
1184 elif type in [tokenize.NAME, tokenize.NUMBER]:
1185 current_selector = FormatSelector(SINGLE, string, [])
1186 elif type == tokenize.OP:
1187 if string == ')':
1188 if not inside_group:
1189 # ')' will be handled by the parentheses group
1190 tokens.restore_last_token()
1191 break
1192 elif inside_merge and string in ['/', ',']:
1193 tokens.restore_last_token()
1194 break
1195 elif inside_choice and string == ',':
1196 tokens.restore_last_token()
1197 break
1198 elif string == ',':
1199 if not current_selector:
1200 raise syntax_error('"," must follow a format selector', start)
1201 selectors.append(current_selector)
1202 current_selector = None
1203 elif string == '/':
1204 if not current_selector:
1205 raise syntax_error('"/" must follow a format selector', start)
1206 first_choice = current_selector
1207 second_choice = _parse_format_selection(tokens, inside_choice=True)
1208 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
1209 elif string == '[':
1210 if not current_selector:
1211 current_selector = FormatSelector(SINGLE, 'best', [])
1212 format_filter = _parse_filter(tokens)
1213 current_selector.filters.append(format_filter)
1214 elif string == '(':
1215 if current_selector:
1216 raise syntax_error('Unexpected "("', start)
1217 group = _parse_format_selection(tokens, inside_group=True)
1218 current_selector = FormatSelector(GROUP, group, [])
1219 elif string == '+':
1220 video_selector = current_selector
1221 audio_selector = _parse_format_selection(tokens, inside_merge=True)
1222 if not video_selector or not audio_selector:
1223 raise syntax_error('"+" must be between two format selectors', start)
1224 current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
1225 else:
1226 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1227 elif type == tokenize.ENDMARKER:
1228 break
1229 if current_selector:
1230 selectors.append(current_selector)
1231 return selectors
1232
1233 def _build_selector_function(selector):
1234 if isinstance(selector, list):
1235 fs = [_build_selector_function(s) for s in selector]
1236
1237 def selector_function(ctx):
1238 for f in fs:
1239 for format in f(ctx):
1240 yield format
1241 return selector_function
1242 elif selector.type == GROUP:
1243 selector_function = _build_selector_function(selector.selector)
1244 elif selector.type == PICKFIRST:
1245 fs = [_build_selector_function(s) for s in selector.selector]
1246
1247 def selector_function(ctx):
1248 for f in fs:
1249 picked_formats = list(f(ctx))
1250 if picked_formats:
1251 return picked_formats
1252 return []
1253 elif selector.type == SINGLE:
1254 format_spec = selector.selector
1255
1256 def selector_function(ctx):
1257 formats = list(ctx['formats'])
1258 if not formats:
1259 return
1260 if format_spec == 'all':
1261 for f in formats:
1262 yield f
1263 elif format_spec in ['best', 'worst', None]:
1264 format_idx = 0 if format_spec == 'worst' else -1
1265 audiovideo_formats = [
1266 f for f in formats
1267 if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
1268 if audiovideo_formats:
1269 yield audiovideo_formats[format_idx]
1270 # for extractors with incomplete formats (audio only (soundcloud)
1271 # or video only (imgur)) we will fallback to best/worst
1272 # {video,audio}-only format
1273 elif ctx['incomplete_formats']:
1274 yield formats[format_idx]
1275 elif format_spec == 'bestaudio':
1276 audio_formats = [
1277 f for f in formats
1278 if f.get('vcodec') == 'none']
1279 if audio_formats:
1280 yield audio_formats[-1]
1281 elif format_spec == 'worstaudio':
1282 audio_formats = [
1283 f for f in formats
1284 if f.get('vcodec') == 'none']
1285 if audio_formats:
1286 yield audio_formats[0]
1287 elif format_spec == 'bestvideo':
1288 video_formats = [
1289 f for f in formats
1290 if f.get('acodec') == 'none']
1291 if video_formats:
1292 yield video_formats[-1]
1293 elif format_spec == 'worstvideo':
1294 video_formats = [
1295 f for f in formats
1296 if f.get('acodec') == 'none']
1297 if video_formats:
1298 yield video_formats[0]
1299 else:
1300 extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
1301 if format_spec in extensions:
1302 filter_f = lambda f: f['ext'] == format_spec
1303 else:
1304 filter_f = lambda f: f['format_id'] == format_spec
1305 matches = list(filter(filter_f, formats))
1306 if matches:
1307 yield matches[-1]
1308 elif selector.type == MERGE:
1309 def _merge(formats_info):
1310 format_1, format_2 = [f['format_id'] for f in formats_info]
1311 # The first format must contain the video and the
1312 # second the audio
1313 if formats_info[0].get('vcodec') == 'none':
1314 self.report_error('The first format must '
1315 'contain the video, try using '
1316 '"-f %s+%s"' % (format_2, format_1))
1317 return
1318 # Formats must be opposite (video+audio)
1319 if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none':
1320 self.report_error(
1321 'Both formats %s and %s are video-only, you must specify "-f video+audio"'
1322 % (format_1, format_2))
1323 return
1324 output_ext = (
1325 formats_info[0]['ext']
1326 if self.params.get('merge_output_format') is None
1327 else self.params['merge_output_format'])
1328 return {
1329 'requested_formats': formats_info,
1330 'format': '%s+%s' % (formats_info[0].get('format'),
1331 formats_info[1].get('format')),
1332 'format_id': '%s+%s' % (formats_info[0].get('format_id'),
1333 formats_info[1].get('format_id')),
1334 'width': formats_info[0].get('width'),
1335 'height': formats_info[0].get('height'),
1336 'resolution': formats_info[0].get('resolution'),
1337 'fps': formats_info[0].get('fps'),
1338 'vcodec': formats_info[0].get('vcodec'),
1339 'vbr': formats_info[0].get('vbr'),
1340 'stretched_ratio': formats_info[0].get('stretched_ratio'),
1341 'acodec': formats_info[1].get('acodec'),
1342 'abr': formats_info[1].get('abr'),
1343 'ext': output_ext,
1344 }
1345 video_selector, audio_selector = map(_build_selector_function, selector.selector)
1346
1347 def selector_function(ctx):
1348 for pair in itertools.product(
1349 video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))):
1350 yield _merge(pair)
1351
1352 filters = [self._build_format_filter(f) for f in selector.filters]
1353
1354 def final_selector(ctx):
1355 ctx_copy = copy.deepcopy(ctx)
1356 for _filter in filters:
1357 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1358 return selector_function(ctx_copy)
1359 return final_selector
1360
1361 stream = io.BytesIO(format_spec.encode('utf-8'))
1362 try:
1363 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
1364 except tokenize.TokenError:
1365 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1366
1367 class TokenIterator(object):
1368 def __init__(self, tokens):
1369 self.tokens = tokens
1370 self.counter = 0
1371
1372 def __iter__(self):
1373 return self
1374
1375 def __next__(self):
1376 if self.counter >= len(self.tokens):
1377 raise StopIteration()
1378 value = self.tokens[self.counter]
1379 self.counter += 1
1380 return value
1381
1382 next = __next__
1383
1384 def restore_last_token(self):
1385 self.counter -= 1
1386
1387 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
1388 return _build_selector_function(parsed_selector)
1389
1390 def _calc_headers(self, info_dict):
1391 res = std_headers.copy()
1392
1393 add_headers = info_dict.get('http_headers')
1394 if add_headers:
1395 res.update(add_headers)
1396
1397 cookies = self._calc_cookies(info_dict)
1398 if cookies:
1399 res['Cookie'] = cookies
1400
1401 if 'X-Forwarded-For' not in res:
1402 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
1403 if x_forwarded_for_ip:
1404 res['X-Forwarded-For'] = x_forwarded_for_ip
1405
1406 return res
1407
1408 def _calc_cookies(self, info_dict):
1409 pr = sanitized_Request(info_dict['url'])
1410 self.cookiejar.add_cookie_header(pr)
1411 return pr.get_header('Cookie')
1412
1413 def process_video_result(self, info_dict, download=True):
1414 assert info_dict.get('_type', 'video') == 'video'
1415
1416 if 'id' not in info_dict:
1417 raise ExtractorError('Missing "id" field in extractor result')
1418 if 'title' not in info_dict:
1419 raise ExtractorError('Missing "title" field in extractor result')
1420
1421 def report_force_conversion(field, field_not, conversion):
1422 self.report_warning(
1423 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
1424 % (field, field_not, conversion))
1425
1426 def sanitize_string_field(info, string_field):
1427 field = info.get(string_field)
1428 if field is None or isinstance(field, compat_str):
1429 return
1430 report_force_conversion(string_field, 'a string', 'string')
1431 info[string_field] = compat_str(field)
1432
1433 def sanitize_numeric_fields(info):
1434 for numeric_field in self._NUMERIC_FIELDS:
1435 field = info.get(numeric_field)
1436 if field is None or isinstance(field, compat_numeric_types):
1437 continue
1438 report_force_conversion(numeric_field, 'numeric', 'int')
1439 info[numeric_field] = int_or_none(field)
1440
1441 sanitize_string_field(info_dict, 'id')
1442 sanitize_numeric_fields(info_dict)
1443
1444 if 'playlist' not in info_dict:
1445 # It isn't part of a playlist
1446 info_dict['playlist'] = None
1447 info_dict['playlist_index'] = None
1448
1449 thumbnails = info_dict.get('thumbnails')
1450 if thumbnails is None:
1451 thumbnail = info_dict.get('thumbnail')
1452 if thumbnail:
1453 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
1454 if thumbnails:
1455 thumbnails.sort(key=lambda t: (
1456 t.get('preference') if t.get('preference') is not None else -1,
1457 t.get('width') if t.get('width') is not None else -1,
1458 t.get('height') if t.get('height') is not None else -1,
1459 t.get('id') if t.get('id') is not None else '', t.get('url')))
1460 for i, t in enumerate(thumbnails):
1461 t['url'] = sanitize_url(t['url'])
1462 if t.get('width') and t.get('height'):
1463 t['resolution'] = '%dx%d' % (t['width'], t['height'])
1464 if t.get('id') is None:
1465 t['id'] = '%d' % i
1466
1467 if self.params.get('list_thumbnails'):
1468 self.list_thumbnails(info_dict)
1469 return
1470
1471 thumbnail = info_dict.get('thumbnail')
1472 if thumbnail:
1473 info_dict['thumbnail'] = sanitize_url(thumbnail)
1474 elif thumbnails:
1475 info_dict['thumbnail'] = thumbnails[-1]['url']
1476
1477 if 'display_id' not in info_dict and 'id' in info_dict:
1478 info_dict['display_id'] = info_dict['id']
1479
1480 if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
1481 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
1482 # see http://bugs.python.org/issue1646728)
1483 try:
1484 upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
1485 info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
1486 except (ValueError, OverflowError, OSError):
1487 pass
1488
1489 # Auto generate title fields corresponding to the *_number fields when missing
1490 # in order to always have clean titles. This is very common for TV series.
1491 for field in ('chapter', 'season', 'episode'):
1492 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
1493 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
1494
1495 for cc_kind in ('subtitles', 'automatic_captions'):
1496 cc = info_dict.get(cc_kind)
1497 if cc:
1498 for _, subtitle in cc.items():
1499 for subtitle_format in subtitle:
1500 if subtitle_format.get('url'):
1501 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
1502 if subtitle_format.get('ext') is None:
1503 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
1504
1505 automatic_captions = info_dict.get('automatic_captions')
1506 subtitles = info_dict.get('subtitles')
1507
1508 if self.params.get('listsubtitles', False):
1509 if 'automatic_captions' in info_dict:
1510 self.list_subtitles(
1511 info_dict['id'], automatic_captions, 'automatic captions')
1512 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
1513 return
1514
1515 info_dict['requested_subtitles'] = self.process_subtitles(
1516 info_dict['id'], subtitles, automatic_captions)
1517
1518 # We now pick which formats have to be downloaded
1519 if info_dict.get('formats') is None:
1520 # There's only one format available
1521 formats = [info_dict]
1522 else:
1523 formats = info_dict['formats']
1524
1525 if not formats:
1526 raise ExtractorError('No video formats found!')
1527
1528 def is_wellformed(f):
1529 url = f.get('url')
1530 if not url:
1531 self.report_warning(
1532 '"url" field is missing or empty - skipping format, '
1533 'there is an error in extractor')
1534 return False
1535 if isinstance(url, bytes):
1536 sanitize_string_field(f, 'url')
1537 return True
1538
1539 # Filter out malformed formats for better extraction robustness
1540 formats = list(filter(is_wellformed, formats))
1541
1542 formats_dict = {}
1543
1544 # We check that all the formats have the format and format_id fields
1545 for i, format in enumerate(formats):
1546 sanitize_string_field(format, 'format_id')
1547 sanitize_numeric_fields(format)
1548 format['url'] = sanitize_url(format['url'])
1549 if not format.get('format_id'):
1550 format['format_id'] = compat_str(i)
1551 else:
1552 # Sanitize format_id from characters used in format selector expression
1553 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
1554 format_id = format['format_id']
1555 if format_id not in formats_dict:
1556 formats_dict[format_id] = []
1557 formats_dict[format_id].append(format)
1558
1559 # Make sure all formats have unique format_id
1560 for format_id, ambiguous_formats in formats_dict.items():
1561 if len(ambiguous_formats) > 1:
1562 for i, format in enumerate(ambiguous_formats):
1563 format['format_id'] = '%s-%d' % (format_id, i)
1564
1565 for i, format in enumerate(formats):
1566 if format.get('format') is None:
1567 format['format'] = '{id} - {res}{note}'.format(
1568 id=format['format_id'],
1569 res=self.format_resolution(format),
1570 note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
1571 )
1572 # Automatically determine file extension if missing
1573 if format.get('ext') is None:
1574 format['ext'] = determine_ext(format['url']).lower()
1575 # Automatically determine protocol if missing (useful for format
1576 # selection purposes)
1577 if format.get('protocol') is None:
1578 format['protocol'] = determine_protocol(format)
1579 # Add HTTP headers, so that external programs can use them from the
1580 # json output
1581 full_format_info = info_dict.copy()
1582 full_format_info.update(format)
1583 format['http_headers'] = self._calc_headers(full_format_info)
1584 # Remove private housekeeping stuff
1585 if '__x_forwarded_for_ip' in info_dict:
1586 del info_dict['__x_forwarded_for_ip']
1587
1588 # TODO Central sorting goes here
1589
1590 if formats[0] is not info_dict:
1591 # only set the 'formats' fields if the original info_dict list them
1592 # otherwise we end up with a circular reference, the first (and unique)
1593 # element in the 'formats' field in info_dict is info_dict itself,
1594 # which can't be exported to json
1595 info_dict['formats'] = formats
1596 if self.params.get('listformats'):
1597 self.list_formats(info_dict)
1598 return
1599
1600 req_format = self.params.get('format')
1601 if req_format is None:
1602 req_format = self._default_format_spec(info_dict, download=download)
1603 if self.params.get('verbose'):
1604 self.to_stdout('[debug] Default format spec: %s' % req_format)
1605
1606 format_selector = self.build_format_selector(req_format)
1607
1608 # While in format selection we may need to have an access to the original
1609 # format set in order to calculate some metrics or do some processing.
1610 # For now we need to be able to guess whether original formats provided
1611 # by extractor are incomplete or not (i.e. whether extractor provides only
1612 # video-only or audio-only formats) for proper formats selection for
1613 # extractors with such incomplete formats (see
1614 # https://github.com/ytdl-org/youtube-dl/pull/5556).
1615 # Since formats may be filtered during format selection and may not match
1616 # the original formats the results may be incorrect. Thus original formats
1617 # or pre-calculated metrics should be passed to format selection routines
1618 # as well.
1619 # We will pass a context object containing all necessary additional data
1620 # instead of just formats.
1621 # This fixes incorrect format selection issue (see
1622 # https://github.com/ytdl-org/youtube-dl/issues/10083).
1623 incomplete_formats = (
1624 # All formats are video-only or
1625 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
1626 # all formats are audio-only
1627 or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
1628
1629 ctx = {
1630 'formats': formats,
1631 'incomplete_formats': incomplete_formats,
1632 }
1633
1634 formats_to_download = list(format_selector(ctx))
1635 if not formats_to_download:
1636 raise ExtractorError('requested format not available',
1637 expected=True)
1638
1639 if download:
1640 if len(formats_to_download) > 1:
1641 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
1642 for format in formats_to_download:
1643 new_info = dict(info_dict)
1644 new_info.update(format)
1645 self.process_info(new_info)
1646 # We update the info dict with the best quality format (backwards compatibility)
1647 info_dict.update(formats_to_download[-1])
1648 return info_dict
1649
1650 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
1651 """Select the requested subtitles and their format"""
1652 available_subs = {}
1653 if normal_subtitles and self.params.get('writesubtitles'):
1654 available_subs.update(normal_subtitles)
1655 if automatic_captions and self.params.get('writeautomaticsub'):
1656 for lang, cap_info in automatic_captions.items():
1657 if lang not in available_subs:
1658 available_subs[lang] = cap_info
1659
1660 if (not self.params.get('writesubtitles') and not
1661 self.params.get('writeautomaticsub') or not
1662 available_subs):
1663 return None
1664
1665 if self.params.get('allsubtitles', False):
1666 requested_langs = available_subs.keys()
1667 else:
1668 if self.params.get('subtitleslangs', False):
1669 requested_langs = self.params.get('subtitleslangs')
1670 elif 'en' in available_subs:
1671 requested_langs = ['en']
1672 else:
1673 requested_langs = [list(available_subs.keys())[0]]
1674
1675 formats_query = self.params.get('subtitlesformat', 'best')
1676 formats_preference = formats_query.split('/') if formats_query else []
1677 subs = {}
1678 for lang in requested_langs:
1679 formats = available_subs.get(lang)
1680 if formats is None:
1681 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
1682 continue
1683 for ext in formats_preference:
1684 if ext == 'best':
1685 f = formats[-1]
1686 break
1687 matches = list(filter(lambda f: f['ext'] == ext, formats))
1688 if matches:
1689 f = matches[-1]
1690 break
1691 else:
1692 f = formats[-1]
1693 self.report_warning(
1694 'No subtitle format found matching "%s" for language %s, '
1695 'using %s' % (formats_query, lang, f['ext']))
1696 subs[lang] = f
1697 return subs
1698
1699 def __forced_printings(self, info_dict, filename, incomplete):
1700 def print_mandatory(field):
1701 if (self.params.get('force%s' % field, False)
1702 and (not incomplete or info_dict.get(field) is not None)):
1703 self.to_stdout(info_dict[field])
1704
1705 def print_optional(field):
1706 if (self.params.get('force%s' % field, False)
1707 and info_dict.get(field) is not None):
1708 self.to_stdout(info_dict[field])
1709
1710 print_mandatory('title')
1711 print_mandatory('id')
1712 if self.params.get('forceurl', False) and not incomplete:
1713 if info_dict.get('requested_formats') is not None:
1714 for f in info_dict['requested_formats']:
1715 self.to_stdout(f['url'] + f.get('play_path', ''))
1716 else:
1717 # For RTMP URLs, also include the playpath
1718 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
1719 print_optional('thumbnail')
1720 print_optional('description')
1721 if self.params.get('forcefilename', False) and filename is not None:
1722 self.to_stdout(filename)
1723 if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
1724 self.to_stdout(formatSeconds(info_dict['duration']))
1725 print_mandatory('format')
1726 if self.params.get('forcejson', False):
1727 self.to_stdout(json.dumps(info_dict))
1728
1729 def process_info(self, info_dict):
1730 """Process a single resolved IE result."""
1731
1732 assert info_dict.get('_type', 'video') == 'video'
1733
1734 max_downloads = self.params.get('max_downloads')
1735 if max_downloads is not None:
1736 if self._num_downloads >= int(max_downloads):
1737 raise MaxDownloadsReached()
1738
1739 # TODO: backward compatibility, to be removed
1740 info_dict['fulltitle'] = info_dict['title']
1741
1742 if 'format' not in info_dict:
1743 info_dict['format'] = info_dict['ext']
1744
1745 reason = self._match_entry(info_dict, incomplete=False)
1746 if reason is not None:
1747 self.to_screen('[download] ' + reason)
1748 return
1749
1750 self._num_downloads += 1
1751
1752 info_dict['_filename'] = filename = self.prepare_filename(info_dict)
1753
1754 # Forced printings
1755 self.__forced_printings(info_dict, filename, incomplete=False)
1756
1757 # Do nothing else if in simulate mode
1758 if self.params.get('simulate', False):
1759 return
1760
1761 if filename is None:
1762 return
1763
1764 def ensure_dir_exists(path):
1765 try:
1766 dn = os.path.dirname(path)
1767 if dn and not os.path.exists(dn):
1768 os.makedirs(dn)
1769 return True
1770 except (OSError, IOError) as err:
1771 self.report_error('unable to create directory ' + error_to_compat_str(err))
1772 return False
1773
1774 if not ensure_dir_exists(sanitize_path(encodeFilename(filename))):
1775 return
1776
1777 if self.params.get('writedescription', False):
1778 descfn = replace_extension(filename, 'description', info_dict.get('ext'))
1779 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
1780 self.to_screen('[info] Video description is already present')
1781 elif info_dict.get('description') is None:
1782 self.report_warning('There\'s no description to write.')
1783 else:
1784 try:
1785 self.to_screen('[info] Writing video description to: ' + descfn)
1786 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1787 descfile.write(info_dict['description'])
1788 except (OSError, IOError):
1789 self.report_error('Cannot write description file ' + descfn)
1790 return
1791
1792 if self.params.get('writeannotations', False):
1793 annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext'))
1794 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
1795 self.to_screen('[info] Video annotations are already present')
1796 elif not info_dict.get('annotations'):
1797 self.report_warning('There are no annotations to write.')
1798 else:
1799 try:
1800 self.to_screen('[info] Writing video annotations to: ' + annofn)
1801 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
1802 annofile.write(info_dict['annotations'])
1803 except (KeyError, TypeError):
1804 self.report_warning('There are no annotations to write.')
1805 except (OSError, IOError):
1806 self.report_error('Cannot write annotations file: ' + annofn)
1807 return
1808
1809 def dl(name, info):
1810 fd = get_suitable_downloader(info, self.params)(self, self.params)
1811 for ph in self._progress_hooks:
1812 fd.add_progress_hook(ph)
1813 if self.params.get('verbose'):
1814 self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
1815 return fd.download(name, info)
1816
1817 subtitles_are_requested = any([self.params.get('writesubtitles', False),
1818 self.params.get('writeautomaticsub')])
1819
1820 if subtitles_are_requested and info_dict.get('requested_subtitles'):
1821 # subtitles download errors are already managed as troubles in relevant IE
1822 # that way it will silently go on when used with unsupporting IE
1823 subtitles = info_dict['requested_subtitles']
1824 for sub_lang, sub_info in subtitles.items():
1825 sub_format = sub_info['ext']
1826 sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
1827 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
1828 self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
1829 else:
1830 if sub_info.get('data') is not None:
1831 try:
1832 # Use newline='' to prevent conversion of newline characters
1833 # See https://github.com/ytdl-org/youtube-dl/issues/10268
1834 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
1835 subfile.write(sub_info['data'])
1836 except (OSError, IOError):
1837 self.report_error('Cannot write subtitles file ' + sub_filename)
1838 return
1839 else:
1840 try:
1841 dl(sub_filename, sub_info)
1842 except (ExtractorError, IOError, OSError, ValueError,
1843 compat_urllib_error.URLError,
1844 compat_http_client.HTTPException,
1845 socket.error) as err:
1846 self.report_warning('Unable to download subtitle for "%s": %s' %
1847 (sub_lang, error_to_compat_str(err)))
1848 continue
1849
1850 if self.params.get('skip_download', False):
1851 if self.params.get('convertsubtitles', False):
1852 subconv = FFmpegSubtitlesConvertorPP(self, format=self.params.get('convertsubtitles'))
1853 filename_real_ext = os.path.splitext(filename)[1][1:]
1854 filename_wo_ext = (
1855 os.path.splitext(filename)[0]
1856 if filename_real_ext == info_dict['ext']
1857 else filename)
1858 afilename = '%s.%s' % (filename_wo_ext, self.params.get('convertsubtitles'))
1859 if subconv.available:
1860 info_dict.setdefault('__postprocessors', [])
1861 # info_dict['__postprocessors'].append(subconv)
1862 if os.path.exists(encodeFilename(afilename)):
1863 self.to_screen(
1864 '[download] %s has already been downloaded and '
1865 'converted' % afilename)
1866 else:
1867 try:
1868 self.post_process(filename, info_dict)
1869 except (PostProcessingError) as err:
1870 self.report_error('postprocessing: %s' % str(err))
1871 return
1872
1873 if self.params.get('writeinfojson', False):
1874 infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
1875 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
1876 self.to_screen('[info] Video description metadata is already present')
1877 else:
1878 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
1879 try:
1880 write_json_file(self.filter_requested_info(info_dict), infofn)
1881 except (OSError, IOError):
1882 self.report_error('Cannot write metadata to JSON file ' + infofn)
1883 return
1884
1885 self._write_thumbnails(info_dict, filename)
1886
1887 if not self.params.get('skip_download', False):
1888 try:
1889 if info_dict.get('requested_formats') is not None:
1890 downloaded = []
1891 success = True
1892 merger = FFmpegMergerPP(self)
1893 if not merger.available:
1894 postprocessors = []
1895 self.report_warning('You have requested multiple '
1896 'formats but ffmpeg or avconv are not installed.'
1897 ' The formats won\'t be merged.')
1898 else:
1899 postprocessors = [merger]
1900
1901 def compatible_formats(formats):
1902 video, audio = formats
1903 # Check extension
1904 video_ext, audio_ext = video.get('ext'), audio.get('ext')
1905 if video_ext and audio_ext:
1906 COMPATIBLE_EXTS = (
1907 ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
1908 ('webm')
1909 )
1910 for exts in COMPATIBLE_EXTS:
1911 if video_ext in exts and audio_ext in exts:
1912 return True
1913 # TODO: Check acodec/vcodec
1914 return False
1915
1916 filename_real_ext = os.path.splitext(filename)[1][1:]
1917 filename_wo_ext = (
1918 os.path.splitext(filename)[0]
1919 if filename_real_ext == info_dict['ext']
1920 else filename)
1921 requested_formats = info_dict['requested_formats']
1922 if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
1923 info_dict['ext'] = 'mkv'
1924 self.report_warning(
1925 'Requested formats are incompatible for merge and will be merged into mkv.')
1926 # Ensure filename always has a correct extension for successful merge
1927 filename = '%s.%s' % (filename_wo_ext, info_dict['ext'])
1928 if os.path.exists(encodeFilename(filename)):
1929 self.to_screen(
1930 '[download] %s has already been downloaded and '
1931 'merged' % filename)
1932 else:
1933 for f in requested_formats:
1934 new_info = dict(info_dict)
1935 new_info.update(f)
1936 fname = prepend_extension(
1937 self.prepare_filename(new_info),
1938 'f%s' % f['format_id'], new_info['ext'])
1939 if not ensure_dir_exists(fname):
1940 return
1941 downloaded.append(fname)
1942 partial_success = dl(fname, new_info)
1943 success = success and partial_success
1944 info_dict['__postprocessors'] = postprocessors
1945 info_dict['__files_to_merge'] = downloaded
1946 else:
1947 # Just a single file
1948 success = dl(filename, info_dict)
1949 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1950 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
1951 return
1952 except (OSError, IOError) as err:
1953 raise UnavailableVideoError(err)
1954 except (ContentTooShortError, ) as err:
1955 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
1956 return
1957
1958 if success and filename != '-':
1959 # Fixup content
1960 fixup_policy = self.params.get('fixup')
1961 if fixup_policy is None:
1962 fixup_policy = 'detect_or_warn'
1963
1964 INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.'
1965
1966 stretched_ratio = info_dict.get('stretched_ratio')
1967 if stretched_ratio is not None and stretched_ratio != 1:
1968 if fixup_policy == 'warn':
1969 self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
1970 info_dict['id'], stretched_ratio))
1971 elif fixup_policy == 'detect_or_warn':
1972 stretched_pp = FFmpegFixupStretchedPP(self)
1973 if stretched_pp.available:
1974 info_dict.setdefault('__postprocessors', [])
1975 info_dict['__postprocessors'].append(stretched_pp)
1976 else:
1977 self.report_warning(
1978 '%s: Non-uniform pixel ratio (%s). %s'
1979 % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
1980 else:
1981 assert fixup_policy in ('ignore', 'never')
1982
1983 if (info_dict.get('requested_formats') is None
1984 and info_dict.get('container') == 'm4a_dash'):
1985 if fixup_policy == 'warn':
1986 self.report_warning(
1987 '%s: writing DASH m4a. '
1988 'Only some players support this container.'
1989 % info_dict['id'])
1990 elif fixup_policy == 'detect_or_warn':
1991 fixup_pp = FFmpegFixupM4aPP(self)
1992 if fixup_pp.available:
1993 info_dict.setdefault('__postprocessors', [])
1994 info_dict['__postprocessors'].append(fixup_pp)
1995 else:
1996 self.report_warning(
1997 '%s: writing DASH m4a. '
1998 'Only some players support this container. %s'
1999 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
2000 else:
2001 assert fixup_policy in ('ignore', 'never')
2002
2003 if (info_dict.get('protocol') == 'm3u8_native'
2004 or info_dict.get('protocol') == 'm3u8'
2005 and self.params.get('hls_prefer_native')):
2006 if fixup_policy == 'warn':
2007 self.report_warning('%s: malformed AAC bitstream detected.' % (
2008 info_dict['id']))
2009 elif fixup_policy == 'detect_or_warn':
2010 fixup_pp = FFmpegFixupM3u8PP(self)
2011 if fixup_pp.available:
2012 info_dict.setdefault('__postprocessors', [])
2013 info_dict['__postprocessors'].append(fixup_pp)
2014 else:
2015 self.report_warning(
2016 '%s: malformed AAC bitstream detected. %s'
2017 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
2018 else:
2019 assert fixup_policy in ('ignore', 'never')
2020
2021 try:
2022 self.post_process(filename, info_dict)
2023 except (PostProcessingError) as err:
2024 self.report_error('postprocessing: %s' % str(err))
2025 return
2026 self.record_download_archive(info_dict)
2027
2028 def download(self, url_list):
2029 """Download a given list of URLs."""
2030 outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
2031 if (len(url_list) > 1
2032 and outtmpl != '-'
2033 and '%' not in outtmpl
2034 and self.params.get('max_downloads') != 1):
2035 raise SameFileError(outtmpl)
2036
2037 for url in url_list:
2038 try:
2039 # It also downloads the videos
2040 res = self.extract_info(
2041 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
2042 except UnavailableVideoError:
2043 self.report_error('unable to download video')
2044 except MaxDownloadsReached:
2045 self.to_screen('[info] Maximum number of downloaded files reached.')
2046 raise
2047 else:
2048 if self.params.get('dump_single_json', False):
2049 self.to_stdout(json.dumps(res))
2050
2051 return self._download_retcode
2052
2053 def download_with_info_file(self, info_filename):
2054 with contextlib.closing(fileinput.FileInput(
2055 [info_filename], mode='r',
2056 openhook=fileinput.hook_encoded('utf-8'))) as f:
2057 # FileInput doesn't have a read method, we can't call json.load
2058 info = self.filter_requested_info(json.loads('\n'.join(f)))
2059 try:
2060 self.process_ie_result(info, download=True)
2061 except DownloadError:
2062 webpage_url = info.get('webpage_url')
2063 if webpage_url is not None:
2064 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
2065 return self.download([webpage_url])
2066 else:
2067 raise
2068 return self._download_retcode
2069
2070 @staticmethod
2071 def filter_requested_info(info_dict):
2072 return dict(
2073 (k, v) for k, v in info_dict.items()
2074 if k not in ['requested_formats', 'requested_subtitles'])
2075
2076 def post_process(self, filename, ie_info):
2077 """Run all the postprocessors on the given file."""
2078 info = dict(ie_info)
2079 info['filepath'] = filename
2080 pps_chain = []
2081 if ie_info.get('__postprocessors') is not None:
2082 pps_chain.extend(ie_info['__postprocessors'])
2083 pps_chain.extend(self._pps)
2084 for pp in pps_chain:
2085 files_to_delete = []
2086 try:
2087 files_to_delete, info = pp.run(info)
2088 except PostProcessingError as e:
2089 self.report_error(e.msg)
2090 if files_to_delete and not self.params.get('keepvideo', False):
2091 for old_filename in files_to_delete:
2092 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2093 try:
2094 os.remove(encodeFilename(old_filename))
2095 except (IOError, OSError):
2096 self.report_warning('Unable to remove downloaded original file')
2097
2098 def _make_archive_id(self, info_dict):
2099 video_id = info_dict.get('id')
2100 if not video_id:
2101 return
2102 # Future-proof against any change in case
2103 # and backwards compatibility with prior versions
2104 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
2105 if extractor is None:
2106 url = str_or_none(info_dict.get('url'))
2107 if not url:
2108 return
2109 # Try to find matching extractor for the URL and take its ie_key
2110 for ie in self._ies:
2111 if ie.suitable(url):
2112 extractor = ie.ie_key()
2113 break
2114 else:
2115 return
2116 return extractor.lower() + ' ' + video_id
2117
2118 def in_download_archive(self, info_dict):
2119 fn = self.params.get('download_archive')
2120 if fn is None:
2121 return False
2122
2123 vid_id = self._make_archive_id(info_dict)
2124 if not vid_id:
2125 return False # Incomplete video information
2126
2127 try:
2128 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
2129 for line in archive_file:
2130 if line.strip() == vid_id:
2131 return True
2132 except IOError as ioe:
2133 if ioe.errno != errno.ENOENT:
2134 raise
2135 return False
2136
2137 def record_download_archive(self, info_dict):
2138 fn = self.params.get('download_archive')
2139 if fn is None:
2140 return
2141 vid_id = self._make_archive_id(info_dict)
2142 assert vid_id
2143 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
2144 archive_file.write(vid_id + '\n')
2145
2146 @staticmethod
2147 def format_resolution(format, default='unknown'):
2148 if format.get('vcodec') == 'none':
2149 return 'audio only'
2150 if format.get('resolution') is not None:
2151 return format['resolution']
2152 if format.get('height') is not None:
2153 if format.get('width') is not None:
2154 res = '%sx%s' % (format['width'], format['height'])
2155 else:
2156 res = '%sp' % format['height']
2157 elif format.get('width') is not None:
2158 res = '%dx?' % format['width']
2159 else:
2160 res = default
2161 return res
2162
2163 def _format_note(self, fdict):
2164 res = ''
2165 if fdict.get('ext') in ['f4f', 'f4m']:
2166 res += '(unsupported) '
2167 if fdict.get('language'):
2168 if res:
2169 res += ' '
2170 res += '[%s] ' % fdict['language']
2171 if fdict.get('format_note') is not None:
2172 res += fdict['format_note'] + ' '
2173 if fdict.get('tbr') is not None:
2174 res += '%4dk ' % fdict['tbr']
2175 if fdict.get('container') is not None:
2176 if res:
2177 res += ', '
2178 res += '%s container' % fdict['container']
2179 if (fdict.get('vcodec') is not None
2180 and fdict.get('vcodec') != 'none'):
2181 if res:
2182 res += ', '
2183 res += fdict['vcodec']
2184 if fdict.get('vbr') is not None:
2185 res += '@'
2186 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
2187 res += 'video@'
2188 if fdict.get('vbr') is not None:
2189 res += '%4dk' % fdict['vbr']
2190 if fdict.get('fps') is not None:
2191 if res:
2192 res += ', '
2193 res += '%sfps' % fdict['fps']
2194 if fdict.get('acodec') is not None:
2195 if res:
2196 res += ', '
2197 if fdict['acodec'] == 'none':
2198 res += 'video only'
2199 else:
2200 res += '%-5s' % fdict['acodec']
2201 elif fdict.get('abr') is not None:
2202 if res:
2203 res += ', '
2204 res += 'audio'
2205 if fdict.get('abr') is not None:
2206 res += '@%3dk' % fdict['abr']
2207 if fdict.get('asr') is not None:
2208 res += ' (%5dHz)' % fdict['asr']
2209 if fdict.get('filesize') is not None:
2210 if res:
2211 res += ', '
2212 res += format_bytes(fdict['filesize'])
2213 elif fdict.get('filesize_approx') is not None:
2214 if res:
2215 res += ', '
2216 res += '~' + format_bytes(fdict['filesize_approx'])
2217 return res
2218
2219 def list_formats(self, info_dict):
2220 formats = info_dict.get('formats', [info_dict])
2221 table = [
2222 [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)]
2223 for f in formats
2224 if f.get('preference') is None or f['preference'] >= -1000]
2225 if len(formats) > 1:
2226 table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'
2227
2228 header_line = ['format code', 'extension', 'resolution', 'note']
2229 self.to_screen(
2230 '[info] Available formats for %s:\n%s' %
2231 (info_dict['id'], render_table(header_line, table)))
2232
2233 def list_thumbnails(self, info_dict):
2234 thumbnails = info_dict.get('thumbnails')
2235 if not thumbnails:
2236 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
2237 return
2238
2239 self.to_screen(
2240 '[info] Thumbnails for %s:' % info_dict['id'])
2241 self.to_screen(render_table(
2242 ['ID', 'width', 'height', 'URL'],
2243 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
2244
2245 def list_subtitles(self, video_id, subtitles, name='subtitles'):
2246 if not subtitles:
2247 self.to_screen('%s has no %s' % (video_id, name))
2248 return
2249 self.to_screen(
2250 'Available %s for %s:' % (name, video_id))
2251 self.to_screen(render_table(
2252 ['Language', 'formats'],
2253 [[lang, ', '.join(f['ext'] for f in reversed(formats))]
2254 for lang, formats in subtitles.items()]))
2255
2256 def urlopen(self, req):
2257 """ Start an HTTP download """
2258 if isinstance(req, compat_basestring):
2259 req = sanitized_Request(req)
2260 return self._opener.open(req, timeout=self._socket_timeout)
2261
2262 def print_debug_header(self):
2263 if not self.params.get('verbose'):
2264 return
2265
2266 if type('') is not compat_str:
2267 # Python 2.6 on SLES11 SP1 (https://github.com/ytdl-org/youtube-dl/issues/3326)
2268 self.report_warning(
2269 'Your Python is broken! Update to a newer and supported version')
2270
2271 stdout_encoding = getattr(
2272 sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
2273 encoding_str = (
2274 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
2275 locale.getpreferredencoding(),
2276 sys.getfilesystemencoding(),
2277 stdout_encoding,
2278 self.get_encoding()))
2279 write_string(encoding_str, encoding=None)
2280
2281 self._write_string('[debug] youtube-dlc version ' + __version__ + '\n')
2282 if _LAZY_LOADER:
2283 self._write_string('[debug] Lazy loading extractors enabled' + '\n')
2284 try:
2285 sp = subprocess.Popen(
2286 ['git', 'rev-parse', '--short', 'HEAD'],
2287 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2288 cwd=os.path.dirname(os.path.abspath(__file__)))
2289 out, err = sp.communicate()
2290 out = out.decode().strip()
2291 if re.match('[0-9a-f]+', out):
2292 self._write_string('[debug] Git HEAD: ' + out + '\n')
2293 except Exception:
2294 try:
2295 sys.exc_clear()
2296 except Exception:
2297 pass
2298
2299 def python_implementation():
2300 impl_name = platform.python_implementation()
2301 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2302 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
2303 return impl_name
2304
2305 self._write_string('[debug] Python version %s (%s) - %s\n' % (
2306 platform.python_version(), python_implementation(),
2307 platform_name()))
2308
2309 exe_versions = FFmpegPostProcessor.get_versions(self)
2310 exe_versions['rtmpdump'] = rtmpdump_version()
2311 exe_versions['phantomjs'] = PhantomJSwrapper._version()
2312 exe_str = ', '.join(
2313 '%s %s' % (exe, v)
2314 for exe, v in sorted(exe_versions.items())
2315 if v
2316 )
2317 if not exe_str:
2318 exe_str = 'none'
2319 self._write_string('[debug] exe versions: %s\n' % exe_str)
2320
2321 proxy_map = {}
2322 for handler in self._opener.handlers:
2323 if hasattr(handler, 'proxies'):
2324 proxy_map.update(handler.proxies)
2325 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
2326
2327 if self.params.get('call_home', False):
2328 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
2329 self._write_string('[debug] Public IP address: %s\n' % ipaddr)
2330 latest_version = self.urlopen(
2331 'https://yt-dl.org/latest/version').read().decode('utf-8')
2332 if version_tuple(latest_version) > version_tuple(__version__):
2333 self.report_warning(
2334 'You are using an outdated version (newest version: %s)! '
2335 'See https://yt-dl.org/update if you need help updating.' %
2336 latest_version)
2337
2338 def _setup_opener(self):
2339 timeout_val = self.params.get('socket_timeout')
2340 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
2341
2342 opts_cookiefile = self.params.get('cookiefile')
2343 opts_proxy = self.params.get('proxy')
2344
2345 if opts_cookiefile is None:
2346 self.cookiejar = compat_cookiejar.CookieJar()
2347 else:
2348 opts_cookiefile = expand_path(opts_cookiefile)
2349 self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
2350 if os.access(opts_cookiefile, os.R_OK):
2351 self.cookiejar.load(ignore_discard=True, ignore_expires=True)
2352
2353 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
2354 if opts_proxy is not None:
2355 if opts_proxy == '':
2356 proxies = {}
2357 else:
2358 proxies = {'http': opts_proxy, 'https': opts_proxy}
2359 else:
2360 proxies = compat_urllib_request.getproxies()
2361 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
2362 if 'http' in proxies and 'https' not in proxies:
2363 proxies['https'] = proxies['http']
2364 proxy_handler = PerRequestProxyHandler(proxies)
2365
2366 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
2367 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
2368 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
2369 redirect_handler = YoutubeDLRedirectHandler()
2370 data_handler = compat_urllib_request_DataHandler()
2371
2372 # When passing our own FileHandler instance, build_opener won't add the
2373 # default FileHandler and allows us to disable the file protocol, which
2374 # can be used for malicious purposes (see
2375 # https://github.com/ytdl-org/youtube-dl/issues/8227)
2376 file_handler = compat_urllib_request.FileHandler()
2377
2378 def file_open(*args, **kwargs):
2379 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dlc for security reasons')
2380 file_handler.file_open = file_open
2381
2382 opener = compat_urllib_request.build_opener(
2383 proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
2384
2385 # Delete the default user-agent header, which would otherwise apply in
2386 # cases where our custom HTTP handler doesn't come into play
2387 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
2388 opener.addheaders = []
2389 self._opener = opener
2390
2391 def encode(self, s):
2392 if isinstance(s, bytes):
2393 return s # Already encoded
2394
2395 try:
2396 return s.encode(self.get_encoding())
2397 except UnicodeEncodeError as err:
2398 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
2399 raise
2400
2401 def get_encoding(self):
2402 encoding = self.params.get('encoding')
2403 if encoding is None:
2404 encoding = preferredencoding()
2405 return encoding
2406
2407 def _write_thumbnails(self, info_dict, filename):
2408 if self.params.get('writethumbnail', False):
2409 thumbnails = info_dict.get('thumbnails')
2410 if thumbnails:
2411 thumbnails = [thumbnails[-1]]
2412 elif self.params.get('write_all_thumbnails', False):
2413 thumbnails = info_dict.get('thumbnails')
2414 else:
2415 return
2416
2417 if not thumbnails:
2418 # No thumbnails present, so return immediately
2419 return
2420
2421 for t in thumbnails:
2422 thumb_ext = determine_ext(t['url'], 'jpg')
2423 suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
2424 thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
2425 t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
2426
2427 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
2428 self.to_screen('[%s] %s: Thumbnail %sis already present' %
2429 (info_dict['extractor'], info_dict['id'], thumb_display_id))
2430 else:
2431 self.to_screen('[%s] %s: Downloading thumbnail %s...' %
2432 (info_dict['extractor'], info_dict['id'], thumb_display_id))
2433 try:
2434 uf = self.urlopen(t['url'])
2435 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
2436 shutil.copyfileobj(uf, thumbf)
2437 self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
2438 (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
2439 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
2440 self.report_warning('Unable to download thumbnail "%s": %s' %
2441 (t['url'], error_to_compat_str(err)))