]>
Commit | Line | Data |
---|---|---|
1 | import collections | |
2 | import itertools | |
3 | import json | |
4 | import os | |
5 | import re | |
6 | import subprocess | |
7 | import time | |
8 | ||
9 | from .common import PostProcessor | |
10 | from ..compat import functools, imghdr | |
11 | from ..utils import ( | |
12 | ISO639Utils, | |
13 | Popen, | |
14 | PostProcessingError, | |
15 | _get_exe_version_output, | |
16 | detect_exe_version, | |
17 | determine_ext, | |
18 | dfxp2srt, | |
19 | encodeArgument, | |
20 | encodeFilename, | |
21 | filter_dict, | |
22 | float_or_none, | |
23 | is_outdated_version, | |
24 | orderedSet, | |
25 | prepend_extension, | |
26 | replace_extension, | |
27 | shell_quote, | |
28 | traverse_obj, | |
29 | variadic, | |
30 | write_json_file, | |
31 | write_string, | |
32 | ) | |
33 | ||
34 | EXT_TO_OUT_FORMATS = { | |
35 | 'aac': 'adts', | |
36 | 'flac': 'flac', | |
37 | 'm4a': 'ipod', | |
38 | 'mka': 'matroska', | |
39 | 'mkv': 'matroska', | |
40 | 'mpg': 'mpeg', | |
41 | 'ogv': 'ogg', | |
42 | 'ts': 'mpegts', | |
43 | 'wma': 'asf', | |
44 | 'wmv': 'asf', | |
45 | 'vtt': 'webvtt', | |
46 | } | |
47 | ACODECS = { | |
48 | # name: (ext, encoder, opts) | |
49 | 'mp3': ('mp3', 'libmp3lame', ()), | |
50 | 'aac': ('m4a', 'aac', ('-f', 'adts')), | |
51 | 'm4a': ('m4a', 'aac', ('-bsf:a', 'aac_adtstoasc')), | |
52 | 'opus': ('opus', 'libopus', ()), | |
53 | 'vorbis': ('ogg', 'libvorbis', ()), | |
54 | 'flac': ('flac', 'flac', ()), | |
55 | 'alac': ('m4a', None, ('-acodec', 'alac')), | |
56 | 'wav': ('wav', None, ('-f', 'wav')), | |
57 | } | |
58 | ||
59 | ||
60 | def create_mapping_re(supported): | |
61 | return re.compile(r'{0}(?:/{0})*$'.format(r'(?:\s*\w+\s*>)?\s*(?:%s)\s*' % '|'.join(supported))) | |
62 | ||
63 | ||
64 | def resolve_mapping(source, mapping): | |
65 | """ | |
66 | Get corresponding item from a mapping string like 'A>B/C>D/E' | |
67 | @returns (target, error_message) | |
68 | """ | |
69 | for pair in mapping.lower().split('/'): | |
70 | kv = pair.split('>', 1) | |
71 | if len(kv) == 1 or kv[0].strip() == source: | |
72 | target = kv[-1].strip() | |
73 | if target == source: | |
74 | return target, f'already is in target format {source}' | |
75 | return target, None | |
76 | return None, f'could not find a mapping for {source}' | |
77 | ||
78 | ||
79 | class FFmpegPostProcessorError(PostProcessingError): | |
80 | pass | |
81 | ||
82 | ||
83 | class FFmpegPostProcessor(PostProcessor): | |
84 | def __init__(self, downloader=None): | |
85 | PostProcessor.__init__(self, downloader) | |
86 | self._prefer_ffmpeg = self.get_param('prefer_ffmpeg', True) | |
87 | self._paths = self._determine_executables() | |
88 | ||
89 | @staticmethod | |
90 | def get_versions_and_features(downloader=None): | |
91 | pp = FFmpegPostProcessor(downloader) | |
92 | return pp._versions, pp._features | |
93 | ||
94 | @staticmethod | |
95 | def get_versions(downloader=None): | |
96 | return FFmpegPostProcessor.get_versions_and_features(downloader)[0] | |
97 | ||
98 | _ffmpeg_to_avconv = {'ffmpeg': 'avconv', 'ffprobe': 'avprobe'} | |
99 | ||
100 | def _determine_executables(self): | |
101 | programs = [*self._ffmpeg_to_avconv.keys(), *self._ffmpeg_to_avconv.values()] | |
102 | ||
103 | location = self.get_param('ffmpeg_location') | |
104 | if location is None: | |
105 | return {p: p for p in programs} | |
106 | ||
107 | if not os.path.exists(location): | |
108 | self.report_warning(f'ffmpeg-location {location} does not exist! Continuing without ffmpeg') | |
109 | return {} | |
110 | elif os.path.isdir(location): | |
111 | dirname, basename = location, None | |
112 | else: | |
113 | basename = os.path.splitext(os.path.basename(location))[0] | |
114 | basename = next((p for p in programs if basename.startswith(p)), 'ffmpeg') | |
115 | dirname = os.path.dirname(os.path.abspath(location)) | |
116 | if basename in self._ffmpeg_to_avconv.keys(): | |
117 | self._prefer_ffmpeg = True | |
118 | ||
119 | paths = {p: os.path.join(dirname, p) for p in programs} | |
120 | if basename: | |
121 | paths[basename] = location | |
122 | return paths | |
123 | ||
124 | _version_cache, _features_cache = {None: None}, {} | |
125 | ||
126 | def _get_ffmpeg_version(self, prog): | |
127 | path = self._paths.get(prog) | |
128 | if path in self._version_cache: | |
129 | return self._version_cache[path], self._features_cache.get(path, {}) | |
130 | out = _get_exe_version_output(path, ['-bsfs'], to_screen=self.write_debug) | |
131 | ver = detect_exe_version(out) if out else False | |
132 | if ver: | |
133 | regexs = [ | |
134 | r'(?:\d+:)?([0-9.]+)-[0-9]+ubuntu[0-9.]+$', # Ubuntu, see [1] | |
135 | r'n([0-9.]+)$', # Arch Linux | |
136 | # 1. http://www.ducea.com/2006/06/17/ubuntu-package-version-naming-explanation/ | |
137 | ] | |
138 | for regex in regexs: | |
139 | mobj = re.match(regex, ver) | |
140 | if mobj: | |
141 | ver = mobj.group(1) | |
142 | self._version_cache[path] = ver | |
143 | if prog != 'ffmpeg' or not out: | |
144 | return ver, {} | |
145 | ||
146 | mobj = re.search(r'(?m)^\s+libavformat\s+(?:[0-9. ]+)\s+/\s+(?P<runtime>[0-9. ]+)', out) | |
147 | lavf_runtime_version = mobj.group('runtime').replace(' ', '') if mobj else None | |
148 | self._features_cache[path] = features = { | |
149 | 'fdk': '--enable-libfdk-aac' in out, | |
150 | 'setts': 'setts' in out.splitlines(), | |
151 | 'needs_adtstoasc': is_outdated_version(lavf_runtime_version, '57.56.100', False), | |
152 | } | |
153 | return ver, features | |
154 | ||
155 | @property | |
156 | def _versions(self): | |
157 | return filter_dict({self.basename: self._version, self.probe_basename: self._probe_version}) | |
158 | ||
159 | @functools.cached_property | |
160 | def basename(self): | |
161 | self._version # run property | |
162 | return self.basename | |
163 | ||
164 | @functools.cached_property | |
165 | def probe_basename(self): | |
166 | self._probe_version # run property | |
167 | return self.probe_basename | |
168 | ||
169 | def _get_version(self, kind): | |
170 | executables = (kind, self._ffmpeg_to_avconv[kind]) | |
171 | if not self._prefer_ffmpeg: | |
172 | executables = reversed(executables) | |
173 | basename, version, features = next(filter( | |
174 | lambda x: x[1], ((p, *self._get_ffmpeg_version(p)) for p in executables)), (None, None, {})) | |
175 | if kind == 'ffmpeg': | |
176 | self.basename, self._features = basename, features | |
177 | else: | |
178 | self.probe_basename = basename | |
179 | if basename == self._ffmpeg_to_avconv[kind]: | |
180 | self.deprecation_warning( | |
181 | f'Support for {self._ffmpeg_to_avconv[kind]} is deprecated and may be removed in a future version. Use {kind} instead') | |
182 | return version | |
183 | ||
184 | @functools.cached_property | |
185 | def _version(self): | |
186 | return self._get_version('ffmpeg') | |
187 | ||
188 | @functools.cached_property | |
189 | def _probe_version(self): | |
190 | return self._get_version('ffprobe') | |
191 | ||
192 | @property | |
193 | def available(self): | |
194 | return self.basename is not None | |
195 | ||
196 | @property | |
197 | def executable(self): | |
198 | return self._paths.get(self.basename) | |
199 | ||
200 | @property | |
201 | def probe_available(self): | |
202 | return self.probe_basename is not None | |
203 | ||
204 | @property | |
205 | def probe_executable(self): | |
206 | return self._paths.get(self.probe_basename) | |
207 | ||
208 | @staticmethod | |
209 | def stream_copy_opts(copy=True, *, ext=None): | |
210 | yield from ('-map', '0') | |
211 | # Don't copy Apple TV chapters track, bin_data | |
212 | # See https://github.com/yt-dlp/yt-dlp/issues/2, #19042, #19024, https://trac.ffmpeg.org/ticket/6016 | |
213 | yield from ('-dn', '-ignore_unknown') | |
214 | if copy: | |
215 | yield from ('-c', 'copy') | |
216 | if ext in ('mp4', 'mov', 'm4a'): | |
217 | yield from ('-c:s', 'mov_text') | |
218 | ||
219 | def check_version(self): | |
220 | if not self.available: | |
221 | raise FFmpegPostProcessorError('ffmpeg not found. Please install or provide the path using --ffmpeg-location') | |
222 | ||
223 | required_version = '10-0' if self.basename == 'avconv' else '1.0' | |
224 | if is_outdated_version(self._version, required_version): | |
225 | self.report_warning(f'Your copy of {self.basename} is outdated, update {self.basename} ' | |
226 | f'to version {required_version} or newer if you encounter any errors') | |
227 | ||
228 | def get_audio_codec(self, path): | |
229 | if not self.probe_available and not self.available: | |
230 | raise PostProcessingError('ffprobe and ffmpeg not found. Please install or provide the path using --ffmpeg-location') | |
231 | try: | |
232 | if self.probe_available: | |
233 | cmd = [ | |
234 | encodeFilename(self.probe_executable, True), | |
235 | encodeArgument('-show_streams')] | |
236 | else: | |
237 | cmd = [ | |
238 | encodeFilename(self.executable, True), | |
239 | encodeArgument('-i')] | |
240 | cmd.append(encodeFilename(self._ffmpeg_filename_argument(path), True)) | |
241 | self.write_debug(f'{self.basename} command line: {shell_quote(cmd)}') | |
242 | handle = Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | |
243 | stdout_data, stderr_data = handle.communicate_or_kill() | |
244 | expected_ret = 0 if self.probe_available else 1 | |
245 | if handle.wait() != expected_ret: | |
246 | return None | |
247 | except OSError: | |
248 | return None | |
249 | output = (stdout_data if self.probe_available else stderr_data).decode('ascii', 'ignore') | |
250 | if self.probe_available: | |
251 | audio_codec = None | |
252 | for line in output.split('\n'): | |
253 | if line.startswith('codec_name='): | |
254 | audio_codec = line.split('=')[1].strip() | |
255 | elif line.strip() == 'codec_type=audio' and audio_codec is not None: | |
256 | return audio_codec | |
257 | else: | |
258 | # Stream #FILE_INDEX:STREAM_INDEX[STREAM_ID](LANGUAGE): CODEC_TYPE: CODEC_NAME | |
259 | mobj = re.search( | |
260 | r'Stream\s*#\d+:\d+(?:\[0x[0-9a-f]+\])?(?:\([a-z]{3}\))?:\s*Audio:\s*([0-9a-z]+)', | |
261 | output) | |
262 | if mobj: | |
263 | return mobj.group(1) | |
264 | return None | |
265 | ||
266 | def get_metadata_object(self, path, opts=[]): | |
267 | if self.probe_basename != 'ffprobe': | |
268 | if self.probe_available: | |
269 | self.report_warning('Only ffprobe is supported for metadata extraction') | |
270 | raise PostProcessingError('ffprobe not found. Please install or provide the path using --ffmpeg-location') | |
271 | self.check_version() | |
272 | ||
273 | cmd = [ | |
274 | encodeFilename(self.probe_executable, True), | |
275 | encodeArgument('-hide_banner'), | |
276 | encodeArgument('-show_format'), | |
277 | encodeArgument('-show_streams'), | |
278 | encodeArgument('-print_format'), | |
279 | encodeArgument('json'), | |
280 | ] | |
281 | ||
282 | cmd += opts | |
283 | cmd.append(encodeFilename(self._ffmpeg_filename_argument(path), True)) | |
284 | self.write_debug('ffprobe command line: %s' % shell_quote(cmd)) | |
285 | p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) | |
286 | stdout, stderr = p.communicate() | |
287 | return json.loads(stdout.decode('utf-8', 'replace')) | |
288 | ||
289 | def get_stream_number(self, path, keys, value): | |
290 | streams = self.get_metadata_object(path)['streams'] | |
291 | num = next( | |
292 | (i for i, stream in enumerate(streams) if traverse_obj(stream, keys, casesense=False) == value), | |
293 | None) | |
294 | return num, len(streams) | |
295 | ||
296 | def _get_real_video_duration(self, filepath, fatal=True): | |
297 | try: | |
298 | duration = float_or_none( | |
299 | traverse_obj(self.get_metadata_object(filepath), ('format', 'duration'))) | |
300 | if not duration: | |
301 | raise PostProcessingError('ffprobe returned empty duration') | |
302 | return duration | |
303 | except PostProcessingError as e: | |
304 | if fatal: | |
305 | raise PostProcessingError(f'Unable to determine video duration: {e.msg}') | |
306 | ||
307 | def _duration_mismatch(self, d1, d2, tolerance=2): | |
308 | if not d1 or not d2: | |
309 | return None | |
310 | # The duration is often only known to nearest second. So there can be <1sec disparity natually. | |
311 | # Further excuse an additional <1sec difference. | |
312 | return abs(d1 - d2) > tolerance | |
313 | ||
314 | def run_ffmpeg_multiple_files(self, input_paths, out_path, opts, **kwargs): | |
315 | return self.real_run_ffmpeg( | |
316 | [(path, []) for path in input_paths], | |
317 | [(out_path, opts)], **kwargs) | |
318 | ||
319 | def real_run_ffmpeg(self, input_path_opts, output_path_opts, *, expected_retcodes=(0,)): | |
320 | self.check_version() | |
321 | ||
322 | oldest_mtime = min( | |
323 | os.stat(encodeFilename(path)).st_mtime for path, _ in input_path_opts if path) | |
324 | ||
325 | cmd = [encodeFilename(self.executable, True), encodeArgument('-y')] | |
326 | # avconv does not have repeat option | |
327 | if self.basename == 'ffmpeg': | |
328 | cmd += [encodeArgument('-loglevel'), encodeArgument('repeat+info')] | |
329 | ||
330 | def make_args(file, args, name, number): | |
331 | keys = ['_%s%d' % (name, number), '_%s' % name] | |
332 | if name == 'o': | |
333 | args += ['-movflags', '+faststart'] | |
334 | if number == 1: | |
335 | keys.append('') | |
336 | args += self._configuration_args(self.basename, keys) | |
337 | if name == 'i': | |
338 | args.append('-i') | |
339 | return ( | |
340 | [encodeArgument(arg) for arg in args] | |
341 | + [encodeFilename(self._ffmpeg_filename_argument(file), True)]) | |
342 | ||
343 | for arg_type, path_opts in (('i', input_path_opts), ('o', output_path_opts)): | |
344 | cmd += itertools.chain.from_iterable( | |
345 | make_args(path, list(opts), arg_type, i + 1) | |
346 | for i, (path, opts) in enumerate(path_opts) if path) | |
347 | ||
348 | self.write_debug('ffmpeg command line: %s' % shell_quote(cmd)) | |
349 | p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) | |
350 | stdout, stderr = p.communicate_or_kill() | |
351 | if p.returncode not in variadic(expected_retcodes): | |
352 | stderr = stderr.decode('utf-8', 'replace').strip() | |
353 | self.write_debug(stderr) | |
354 | raise FFmpegPostProcessorError(stderr.split('\n')[-1]) | |
355 | for out_path, _ in output_path_opts: | |
356 | if out_path: | |
357 | self.try_utime(out_path, oldest_mtime, oldest_mtime) | |
358 | return stderr.decode('utf-8', 'replace') | |
359 | ||
360 | def run_ffmpeg(self, path, out_path, opts, **kwargs): | |
361 | return self.run_ffmpeg_multiple_files([path], out_path, opts, **kwargs) | |
362 | ||
363 | @staticmethod | |
364 | def _ffmpeg_filename_argument(fn): | |
365 | # Always use 'file:' because the filename may contain ':' (ffmpeg | |
366 | # interprets that as a protocol) or can start with '-' (-- is broken in | |
367 | # ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details) | |
368 | # Also leave '-' intact in order not to break streaming to stdout. | |
369 | if fn.startswith(('http://', 'https://')): | |
370 | return fn | |
371 | return 'file:' + fn if fn != '-' else fn | |
372 | ||
373 | @staticmethod | |
374 | def _quote_for_ffmpeg(string): | |
375 | # See https://ffmpeg.org/ffmpeg-utils.html#toc-Quoting-and-escaping | |
376 | # A sequence of '' produces '\'''\''; | |
377 | # final replace removes the empty '' between \' \'. | |
378 | string = string.replace("'", r"'\''").replace("'''", "'") | |
379 | # Handle potential ' at string boundaries. | |
380 | string = string[1:] if string[0] == "'" else "'" + string | |
381 | return string[:-1] if string[-1] == "'" else string + "'" | |
382 | ||
383 | def force_keyframes(self, filename, timestamps): | |
384 | timestamps = orderedSet(timestamps) | |
385 | if timestamps[0] == 0: | |
386 | timestamps = timestamps[1:] | |
387 | keyframe_file = prepend_extension(filename, 'keyframes.temp') | |
388 | self.to_screen(f'Re-encoding "{filename}" with appropriate keyframes') | |
389 | self.run_ffmpeg(filename, keyframe_file, [ | |
390 | *self.stream_copy_opts(False, ext=determine_ext(filename)), | |
391 | '-force_key_frames', ','.join(f'{t:.6f}' for t in timestamps)]) | |
392 | return keyframe_file | |
393 | ||
394 | def concat_files(self, in_files, out_file, concat_opts=None): | |
395 | """ | |
396 | Use concat demuxer to concatenate multiple files having identical streams. | |
397 | ||
398 | Only inpoint, outpoint, and duration concat options are supported. | |
399 | See https://ffmpeg.org/ffmpeg-formats.html#concat-1 for details | |
400 | """ | |
401 | concat_file = f'{out_file}.concat' | |
402 | self.write_debug(f'Writing concat spec to {concat_file}') | |
403 | with open(concat_file, 'wt', encoding='utf-8') as f: | |
404 | f.writelines(self._concat_spec(in_files, concat_opts)) | |
405 | ||
406 | out_flags = list(self.stream_copy_opts(ext=determine_ext(out_file))) | |
407 | ||
408 | self.real_run_ffmpeg( | |
409 | [(concat_file, ['-hide_banner', '-nostdin', '-f', 'concat', '-safe', '0'])], | |
410 | [(out_file, out_flags)]) | |
411 | self._delete_downloaded_files(concat_file) | |
412 | ||
413 | @classmethod | |
414 | def _concat_spec(cls, in_files, concat_opts=None): | |
415 | if concat_opts is None: | |
416 | concat_opts = [{}] * len(in_files) | |
417 | yield 'ffconcat version 1.0\n' | |
418 | for file, opts in zip(in_files, concat_opts): | |
419 | yield f'file {cls._quote_for_ffmpeg(cls._ffmpeg_filename_argument(file))}\n' | |
420 | # Iterate explicitly to yield the following directives in order, ignoring the rest. | |
421 | for directive in 'inpoint', 'outpoint', 'duration': | |
422 | if directive in opts: | |
423 | yield f'{directive} {opts[directive]}\n' | |
424 | ||
425 | ||
426 | class FFmpegExtractAudioPP(FFmpegPostProcessor): | |
427 | COMMON_AUDIO_EXTS = ('wav', 'flac', 'm4a', 'aiff', 'mp3', 'ogg', 'mka', 'opus', 'wma') | |
428 | SUPPORTED_EXTS = tuple(ACODECS.keys()) | |
429 | FORMAT_RE = create_mapping_re(('best', *SUPPORTED_EXTS)) | |
430 | ||
431 | def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False): | |
432 | FFmpegPostProcessor.__init__(self, downloader) | |
433 | self.mapping = preferredcodec or 'best' | |
434 | self._preferredquality = float_or_none(preferredquality) | |
435 | self._nopostoverwrites = nopostoverwrites | |
436 | ||
437 | def _quality_args(self, codec): | |
438 | if self._preferredquality is None: | |
439 | return [] | |
440 | elif self._preferredquality > 10: | |
441 | return ['-b:a', f'{self._preferredquality}k'] | |
442 | ||
443 | limits = { | |
444 | 'libmp3lame': (10, 0), | |
445 | 'libvorbis': (0, 10), | |
446 | # FFmpeg's AAC encoder does not have an upper limit for the value of -q:a. | |
447 | # Experimentally, with values over 4, bitrate changes were minimal or non-existent | |
448 | 'aac': (0.1, 4), | |
449 | 'libfdk_aac': (1, 5), | |
450 | }.get(codec) | |
451 | if not limits: | |
452 | return [] | |
453 | ||
454 | q = limits[1] + (limits[0] - limits[1]) * (self._preferredquality / 10) | |
455 | if codec == 'libfdk_aac': | |
456 | return ['-vbr', f'{int(q)}'] | |
457 | return ['-q:a', f'{q}'] | |
458 | ||
459 | def run_ffmpeg(self, path, out_path, codec, more_opts): | |
460 | if codec is None: | |
461 | acodec_opts = [] | |
462 | else: | |
463 | acodec_opts = ['-acodec', codec] | |
464 | opts = ['-vn'] + acodec_opts + more_opts | |
465 | try: | |
466 | FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts) | |
467 | except FFmpegPostProcessorError as err: | |
468 | raise PostProcessingError(f'audio conversion failed: {err.msg}') | |
469 | ||
470 | @PostProcessor._restrict_to(images=False) | |
471 | def run(self, information): | |
472 | orig_path = path = information['filepath'] | |
473 | target_format, _skip_msg = resolve_mapping(information['ext'], self.mapping) | |
474 | if target_format == 'best' and information['ext'] in self.COMMON_AUDIO_EXTS: | |
475 | target_format, _skip_msg = None, 'the file is already in a common audio format' | |
476 | if not target_format: | |
477 | self.to_screen(f'Not converting audio {orig_path}; {_skip_msg}') | |
478 | return [], information | |
479 | ||
480 | filecodec = self.get_audio_codec(path) | |
481 | if filecodec is None: | |
482 | raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe') | |
483 | ||
484 | if filecodec == 'aac' and target_format in ('m4a', 'best'): | |
485 | # Lossless, but in another container | |
486 | extension, _, more_opts, acodec = *ACODECS['m4a'], 'copy' | |
487 | elif target_format == 'best' or target_format == filecodec: | |
488 | # Lossless if possible | |
489 | try: | |
490 | extension, _, more_opts, acodec = *ACODECS[filecodec], 'copy' | |
491 | except KeyError: | |
492 | extension, acodec, more_opts = ACODECS['mp3'] | |
493 | else: | |
494 | # We convert the audio (lossy if codec is lossy) | |
495 | extension, acodec, more_opts = ACODECS[target_format] | |
496 | if acodec == 'aac' and self._features.get('fdk'): | |
497 | acodec, more_opts = 'libfdk_aac', [] | |
498 | ||
499 | more_opts = list(more_opts) | |
500 | if acodec != 'copy': | |
501 | more_opts = self._quality_args(acodec) | |
502 | ||
503 | # not os.path.splitext, since the latter does not work on unicode in all setups | |
504 | temp_path = new_path = f'{path.rpartition(".")[0]}.{extension}' | |
505 | ||
506 | if new_path == path: | |
507 | if acodec == 'copy': | |
508 | self.to_screen(f'Not converting audio {orig_path}; file is already in target format {target_format}') | |
509 | return [], information | |
510 | orig_path = prepend_extension(path, 'orig') | |
511 | temp_path = prepend_extension(path, 'temp') | |
512 | if (self._nopostoverwrites and os.path.exists(encodeFilename(new_path)) | |
513 | and os.path.exists(encodeFilename(orig_path))): | |
514 | self.to_screen('Post-process file %s exists, skipping' % new_path) | |
515 | return [], information | |
516 | ||
517 | self.to_screen(f'Destination: {new_path}') | |
518 | self.run_ffmpeg(path, temp_path, acodec, more_opts) | |
519 | ||
520 | os.replace(path, orig_path) | |
521 | os.replace(temp_path, new_path) | |
522 | information['filepath'] = new_path | |
523 | information['ext'] = extension | |
524 | ||
525 | # Try to update the date time for extracted audio file. | |
526 | if information.get('filetime') is not None: | |
527 | self.try_utime( | |
528 | new_path, time.time(), information['filetime'], errnote='Cannot update utime of audio file') | |
529 | ||
530 | return [orig_path], information | |
531 | ||
532 | ||
533 | class FFmpegVideoConvertorPP(FFmpegPostProcessor): | |
534 | SUPPORTED_EXTS = ('mp4', 'mkv', 'flv', 'webm', 'mov', 'avi', 'mka', 'ogg', *FFmpegExtractAudioPP.SUPPORTED_EXTS) | |
535 | FORMAT_RE = create_mapping_re(SUPPORTED_EXTS) | |
536 | _ACTION = 'converting' | |
537 | ||
538 | def __init__(self, downloader=None, preferedformat=None): | |
539 | super().__init__(downloader) | |
540 | self.mapping = preferedformat | |
541 | ||
542 | @staticmethod | |
543 | def _options(target_ext): | |
544 | yield from FFmpegPostProcessor.stream_copy_opts(False) | |
545 | if target_ext == 'avi': | |
546 | yield from ('-c:v', 'libxvid', '-vtag', 'XVID') | |
547 | ||
548 | @PostProcessor._restrict_to(images=False) | |
549 | def run(self, info): | |
550 | filename, source_ext = info['filepath'], info['ext'].lower() | |
551 | target_ext, _skip_msg = resolve_mapping(source_ext, self.mapping) | |
552 | if _skip_msg: | |
553 | self.to_screen(f'Not {self._ACTION} media file "{filename}"; {_skip_msg}') | |
554 | return [], info | |
555 | ||
556 | outpath = replace_extension(filename, target_ext, source_ext) | |
557 | self.to_screen(f'{self._ACTION.title()} video from {source_ext} to {target_ext}; Destination: {outpath}') | |
558 | self.run_ffmpeg(filename, outpath, self._options(target_ext)) | |
559 | ||
560 | info['filepath'] = outpath | |
561 | info['format'] = info['ext'] = target_ext | |
562 | return [filename], info | |
563 | ||
564 | ||
565 | class FFmpegVideoRemuxerPP(FFmpegVideoConvertorPP): | |
566 | _ACTION = 'remuxing' | |
567 | ||
568 | @staticmethod | |
569 | def _options(target_ext): | |
570 | return FFmpegPostProcessor.stream_copy_opts() | |
571 | ||
572 | ||
573 | class FFmpegEmbedSubtitlePP(FFmpegPostProcessor): | |
574 | SUPPORTED_EXTS = ('mp4', 'mov', 'm4a', 'webm', 'mkv', 'mka') | |
575 | ||
576 | def __init__(self, downloader=None, already_have_subtitle=False): | |
577 | super().__init__(downloader) | |
578 | self._already_have_subtitle = already_have_subtitle | |
579 | ||
580 | @PostProcessor._restrict_to(images=False) | |
581 | def run(self, info): | |
582 | if info['ext'] not in self.SUPPORTED_EXTS: | |
583 | self.to_screen(f'Subtitles can only be embedded in {", ".join(self.SUPPORTED_EXTS)} files') | |
584 | return [], info | |
585 | subtitles = info.get('requested_subtitles') | |
586 | if not subtitles: | |
587 | self.to_screen('There aren\'t any subtitles to embed') | |
588 | return [], info | |
589 | ||
590 | filename = info['filepath'] | |
591 | ||
592 | # Disabled temporarily. There needs to be a way to overide this | |
593 | # in case of duration actually mismatching in extractor | |
594 | # See: https://github.com/yt-dlp/yt-dlp/issues/1870, https://github.com/yt-dlp/yt-dlp/issues/1385 | |
595 | ''' | |
596 | if info.get('duration') and not info.get('__real_download') and self._duration_mismatch( | |
597 | self._get_real_video_duration(filename, False), info['duration']): | |
598 | self.to_screen(f'Skipping {self.pp_key()} since the real and expected durations mismatch') | |
599 | return [], info | |
600 | ''' | |
601 | ||
602 | ext = info['ext'] | |
603 | sub_langs, sub_names, sub_filenames = [], [], [] | |
604 | webm_vtt_warn = False | |
605 | mp4_ass_warn = False | |
606 | ||
607 | for lang, sub_info in subtitles.items(): | |
608 | if not os.path.exists(sub_info.get('filepath', '')): | |
609 | self.report_warning(f'Skipping embedding {lang} subtitle because the file is missing') | |
610 | continue | |
611 | sub_ext = sub_info['ext'] | |
612 | if sub_ext == 'json': | |
613 | self.report_warning('JSON subtitles cannot be embedded') | |
614 | elif ext != 'webm' or ext == 'webm' and sub_ext == 'vtt': | |
615 | sub_langs.append(lang) | |
616 | sub_names.append(sub_info.get('name')) | |
617 | sub_filenames.append(sub_info['filepath']) | |
618 | else: | |
619 | if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt': | |
620 | webm_vtt_warn = True | |
621 | self.report_warning('Only WebVTT subtitles can be embedded in webm files') | |
622 | if not mp4_ass_warn and ext == 'mp4' and sub_ext == 'ass': | |
623 | mp4_ass_warn = True | |
624 | self.report_warning('ASS subtitles cannot be properly embedded in mp4 files; expect issues') | |
625 | ||
626 | if not sub_langs: | |
627 | return [], info | |
628 | ||
629 | input_files = [filename] + sub_filenames | |
630 | ||
631 | opts = [ | |
632 | *self.stream_copy_opts(ext=info['ext']), | |
633 | # Don't copy the existing subtitles, we may be running the | |
634 | # postprocessor a second time | |
635 | '-map', '-0:s', | |
636 | ] | |
637 | for i, (lang, name) in enumerate(zip(sub_langs, sub_names)): | |
638 | opts.extend(['-map', '%d:0' % (i + 1)]) | |
639 | lang_code = ISO639Utils.short2long(lang) or lang | |
640 | opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code]) | |
641 | if name: | |
642 | opts.extend(['-metadata:s:s:%d' % i, 'handler_name=%s' % name, | |
643 | '-metadata:s:s:%d' % i, 'title=%s' % name]) | |
644 | ||
645 | temp_filename = prepend_extension(filename, 'temp') | |
646 | self.to_screen('Embedding subtitles in "%s"' % filename) | |
647 | self.run_ffmpeg_multiple_files(input_files, temp_filename, opts) | |
648 | os.replace(temp_filename, filename) | |
649 | ||
650 | files_to_delete = [] if self._already_have_subtitle else sub_filenames | |
651 | return files_to_delete, info | |
652 | ||
653 | ||
654 | class FFmpegMetadataPP(FFmpegPostProcessor): | |
655 | ||
656 | def __init__(self, downloader, add_metadata=True, add_chapters=True, add_infojson='if_exists'): | |
657 | FFmpegPostProcessor.__init__(self, downloader) | |
658 | self._add_metadata = add_metadata | |
659 | self._add_chapters = add_chapters | |
660 | self._add_infojson = add_infojson | |
661 | ||
662 | @staticmethod | |
663 | def _options(target_ext): | |
664 | audio_only = target_ext == 'm4a' | |
665 | yield from FFmpegPostProcessor.stream_copy_opts(not audio_only) | |
666 | if audio_only: | |
667 | yield from ('-vn', '-acodec', 'copy') | |
668 | ||
669 | @PostProcessor._restrict_to(images=False) | |
670 | def run(self, info): | |
671 | filename, metadata_filename = info['filepath'], None | |
672 | files_to_delete, options = [], [] | |
673 | if self._add_chapters and info.get('chapters'): | |
674 | metadata_filename = replace_extension(filename, 'meta') | |
675 | options.extend(self._get_chapter_opts(info['chapters'], metadata_filename)) | |
676 | files_to_delete.append(metadata_filename) | |
677 | if self._add_metadata: | |
678 | options.extend(self._get_metadata_opts(info)) | |
679 | ||
680 | if self._add_infojson: | |
681 | if info['ext'] in ('mkv', 'mka'): | |
682 | infojson_filename = info.get('infojson_filename') | |
683 | options.extend(self._get_infojson_opts(info, infojson_filename)) | |
684 | if not infojson_filename: | |
685 | files_to_delete.append(info.get('infojson_filename')) | |
686 | elif self._add_infojson is True: | |
687 | self.to_screen('The info-json can only be attached to mkv/mka files') | |
688 | ||
689 | if not options: | |
690 | self.to_screen('There isn\'t any metadata to add') | |
691 | return [], info | |
692 | ||
693 | temp_filename = prepend_extension(filename, 'temp') | |
694 | self.to_screen('Adding metadata to "%s"' % filename) | |
695 | self.run_ffmpeg_multiple_files( | |
696 | (filename, metadata_filename), temp_filename, | |
697 | itertools.chain(self._options(info['ext']), *options)) | |
698 | self._delete_downloaded_files(*files_to_delete) | |
699 | os.replace(temp_filename, filename) | |
700 | return [], info | |
701 | ||
702 | @staticmethod | |
703 | def _get_chapter_opts(chapters, metadata_filename): | |
704 | with open(metadata_filename, 'wt', encoding='utf-8') as f: | |
705 | def ffmpeg_escape(text): | |
706 | return re.sub(r'([\\=;#\n])', r'\\\1', text) | |
707 | ||
708 | metadata_file_content = ';FFMETADATA1\n' | |
709 | for chapter in chapters: | |
710 | metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n' | |
711 | metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000) | |
712 | metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000) | |
713 | chapter_title = chapter.get('title') | |
714 | if chapter_title: | |
715 | metadata_file_content += 'title=%s\n' % ffmpeg_escape(chapter_title) | |
716 | f.write(metadata_file_content) | |
717 | yield ('-map_metadata', '1') | |
718 | ||
719 | def _get_metadata_opts(self, info): | |
720 | meta_prefix = 'meta' | |
721 | metadata = collections.defaultdict(dict) | |
722 | ||
723 | def add(meta_list, info_list=None): | |
724 | value = next(( | |
725 | str(info[key]) for key in [f'{meta_prefix}_'] + list(variadic(info_list or meta_list)) | |
726 | if info.get(key) is not None), None) | |
727 | if value not in ('', None): | |
728 | value = value.replace('\0', '') # nul character cannot be passed in command line | |
729 | metadata['common'].update({meta_f: value for meta_f in variadic(meta_list)}) | |
730 | ||
731 | # See [1-4] for some info on media metadata/metadata supported | |
732 | # by ffmpeg. | |
733 | # 1. https://kdenlive.org/en/project/adding-meta-data-to-mp4-video/ | |
734 | # 2. https://wiki.multimedia.cx/index.php/FFmpeg_Metadata | |
735 | # 3. https://kodi.wiki/view/Video_file_tagging | |
736 | ||
737 | add('title', ('track', 'title')) | |
738 | add('date', 'upload_date') | |
739 | add(('description', 'synopsis'), 'description') | |
740 | add(('purl', 'comment'), 'webpage_url') | |
741 | add('track', 'track_number') | |
742 | add('artist', ('artist', 'creator', 'uploader', 'uploader_id')) | |
743 | add('genre') | |
744 | add('album') | |
745 | add('album_artist') | |
746 | add('disc', 'disc_number') | |
747 | add('show', 'series') | |
748 | add('season_number') | |
749 | add('episode_id', ('episode', 'episode_id')) | |
750 | add('episode_sort', 'episode_number') | |
751 | if 'embed-metadata' in self.get_param('compat_opts', []): | |
752 | add('comment', 'description') | |
753 | metadata['common'].pop('synopsis', None) | |
754 | ||
755 | meta_regex = rf'{re.escape(meta_prefix)}(?P<i>\d+)?_(?P<key>.+)' | |
756 | for key, value in info.items(): | |
757 | mobj = re.fullmatch(meta_regex, key) | |
758 | if value is not None and mobj: | |
759 | metadata[mobj.group('i') or 'common'][mobj.group('key')] = value.replace('\0', '') | |
760 | ||
761 | # Write id3v1 metadata also since Windows Explorer can't handle id3v2 tags | |
762 | yield ('-write_id3v1', '1') | |
763 | ||
764 | for name, value in metadata['common'].items(): | |
765 | yield ('-metadata', f'{name}={value}') | |
766 | ||
767 | stream_idx = 0 | |
768 | for fmt in info.get('requested_formats') or []: | |
769 | stream_count = 2 if 'none' not in (fmt.get('vcodec'), fmt.get('acodec')) else 1 | |
770 | lang = ISO639Utils.short2long(fmt.get('language') or '') or fmt.get('language') | |
771 | for i in range(stream_idx, stream_idx + stream_count): | |
772 | if lang: | |
773 | metadata[str(i)].setdefault('language', lang) | |
774 | for name, value in metadata[str(i)].items(): | |
775 | yield (f'-metadata:s:{i}', f'{name}={value}') | |
776 | stream_idx += stream_count | |
777 | ||
778 | def _get_infojson_opts(self, info, infofn): | |
779 | if not infofn or not os.path.exists(infofn): | |
780 | if self._add_infojson is not True: | |
781 | return | |
782 | infofn = infofn or '%s.temp' % ( | |
783 | self._downloader.prepare_filename(info, 'infojson') | |
784 | or replace_extension(self._downloader.prepare_filename(info), 'info.json', info['ext'])) | |
785 | if not self._downloader._ensure_dir_exists(infofn): | |
786 | return | |
787 | self.write_debug(f'Writing info-json to: {infofn}') | |
788 | write_json_file(self._downloader.sanitize_info(info, self.get_param('clean_infojson', True)), infofn) | |
789 | info['infojson_filename'] = infofn | |
790 | ||
791 | old_stream, new_stream = self.get_stream_number(info['filepath'], ('tags', 'mimetype'), 'application/json') | |
792 | if old_stream is not None: | |
793 | yield ('-map', '-0:%d' % old_stream) | |
794 | new_stream -= 1 | |
795 | ||
796 | yield ( | |
797 | '-attach', infofn, | |
798 | f'-metadata:s:{new_stream}', 'mimetype=application/json', | |
799 | f'-metadata:s:{new_stream}', 'filename=info.json', | |
800 | ) | |
801 | ||
802 | ||
803 | class FFmpegMergerPP(FFmpegPostProcessor): | |
804 | @PostProcessor._restrict_to(images=False) | |
805 | def run(self, info): | |
806 | filename = info['filepath'] | |
807 | temp_filename = prepend_extension(filename, 'temp') | |
808 | args = ['-c', 'copy'] | |
809 | audio_streams = 0 | |
810 | for (i, fmt) in enumerate(info['requested_formats']): | |
811 | if fmt.get('acodec') != 'none': | |
812 | args.extend(['-map', f'{i}:a:0']) | |
813 | aac_fixup = fmt['protocol'].startswith('m3u8') and self.get_audio_codec(fmt['filepath']) == 'aac' | |
814 | if aac_fixup: | |
815 | args.extend([f'-bsf:a:{audio_streams}', 'aac_adtstoasc']) | |
816 | audio_streams += 1 | |
817 | if fmt.get('vcodec') != 'none': | |
818 | args.extend(['-map', '%u:v:0' % (i)]) | |
819 | self.to_screen('Merging formats into "%s"' % filename) | |
820 | self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args) | |
821 | os.rename(encodeFilename(temp_filename), encodeFilename(filename)) | |
822 | return info['__files_to_merge'], info | |
823 | ||
824 | def can_merge(self): | |
825 | # TODO: figure out merge-capable ffmpeg version | |
826 | if self.basename != 'avconv': | |
827 | return True | |
828 | ||
829 | required_version = '10-0' | |
830 | if is_outdated_version( | |
831 | self._versions[self.basename], required_version): | |
832 | warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, ' | |
833 | 'yt-dlp will download single file media. ' | |
834 | 'Update %s to version %s or newer to fix this.') % ( | |
835 | self.basename, self.basename, required_version) | |
836 | self.report_warning(warning) | |
837 | return False | |
838 | return True | |
839 | ||
840 | ||
841 | class FFmpegFixupPostProcessor(FFmpegPostProcessor): | |
842 | def _fixup(self, msg, filename, options): | |
843 | temp_filename = prepend_extension(filename, 'temp') | |
844 | ||
845 | self.to_screen(f'{msg} of "{filename}"') | |
846 | self.run_ffmpeg(filename, temp_filename, options) | |
847 | ||
848 | os.replace(temp_filename, filename) | |
849 | ||
850 | ||
851 | class FFmpegFixupStretchedPP(FFmpegFixupPostProcessor): | |
852 | @PostProcessor._restrict_to(images=False, audio=False) | |
853 | def run(self, info): | |
854 | stretched_ratio = info.get('stretched_ratio') | |
855 | if stretched_ratio not in (None, 1): | |
856 | self._fixup('Fixing aspect ratio', info['filepath'], [ | |
857 | *self.stream_copy_opts(), '-aspect', '%f' % stretched_ratio]) | |
858 | return [], info | |
859 | ||
860 | ||
861 | class FFmpegFixupM4aPP(FFmpegFixupPostProcessor): | |
862 | @PostProcessor._restrict_to(images=False, video=False) | |
863 | def run(self, info): | |
864 | if info.get('container') == 'm4a_dash': | |
865 | self._fixup('Correcting container', info['filepath'], [*self.stream_copy_opts(), '-f', 'mp4']) | |
866 | return [], info | |
867 | ||
868 | ||
869 | class FFmpegFixupM3u8PP(FFmpegFixupPostProcessor): | |
870 | def _needs_fixup(self, info): | |
871 | yield info['ext'] in ('mp4', 'm4a') | |
872 | yield info['protocol'].startswith('m3u8') | |
873 | try: | |
874 | metadata = self.get_metadata_object(info['filepath']) | |
875 | except PostProcessingError as e: | |
876 | self.report_warning(f'Unable to extract metadata: {e.msg}') | |
877 | yield True | |
878 | else: | |
879 | yield traverse_obj(metadata, ('format', 'format_name'), casesense=False) == 'mpegts' | |
880 | ||
881 | @PostProcessor._restrict_to(images=False) | |
882 | def run(self, info): | |
883 | if all(self._needs_fixup(info)): | |
884 | self._fixup('Fixing MPEG-TS in MP4 container', info['filepath'], [ | |
885 | *self.stream_copy_opts(), '-f', 'mp4', '-bsf:a', 'aac_adtstoasc']) | |
886 | return [], info | |
887 | ||
888 | ||
889 | class FFmpegFixupTimestampPP(FFmpegFixupPostProcessor): | |
890 | ||
891 | def __init__(self, downloader=None, trim=0.001): | |
892 | # "trim" should be used when the video contains unintended packets | |
893 | super().__init__(downloader) | |
894 | assert isinstance(trim, (int, float)) | |
895 | self.trim = str(trim) | |
896 | ||
897 | @PostProcessor._restrict_to(images=False) | |
898 | def run(self, info): | |
899 | if not self._features.get('setts'): | |
900 | self.report_warning( | |
901 | 'A re-encode is needed to fix timestamps in older versions of ffmpeg. ' | |
902 | 'Please install ffmpeg 4.4 or later to fixup without re-encoding') | |
903 | opts = ['-vf', 'setpts=PTS-STARTPTS'] | |
904 | else: | |
905 | opts = ['-c', 'copy', '-bsf', 'setts=ts=TS-STARTPTS'] | |
906 | self._fixup('Fixing frame timestamp', info['filepath'], opts + [*self.stream_copy_opts(False), '-ss', self.trim]) | |
907 | return [], info | |
908 | ||
909 | ||
910 | class FFmpegCopyStreamPP(FFmpegFixupPostProcessor): | |
911 | MESSAGE = 'Copying stream' | |
912 | ||
913 | @PostProcessor._restrict_to(images=False) | |
914 | def run(self, info): | |
915 | self._fixup(self.MESSAGE, info['filepath'], self.stream_copy_opts()) | |
916 | return [], info | |
917 | ||
918 | ||
919 | class FFmpegFixupDurationPP(FFmpegCopyStreamPP): | |
920 | MESSAGE = 'Fixing video duration' | |
921 | ||
922 | ||
923 | class FFmpegFixupDuplicateMoovPP(FFmpegCopyStreamPP): | |
924 | MESSAGE = 'Fixing duplicate MOOV atoms' | |
925 | ||
926 | ||
927 | class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor): | |
928 | SUPPORTED_EXTS = ('srt', 'vtt', 'ass', 'lrc') | |
929 | ||
930 | def __init__(self, downloader=None, format=None): | |
931 | super().__init__(downloader) | |
932 | self.format = format | |
933 | ||
934 | def run(self, info): | |
935 | subs = info.get('requested_subtitles') | |
936 | new_ext = self.format | |
937 | new_format = new_ext | |
938 | if new_format == 'vtt': | |
939 | new_format = 'webvtt' | |
940 | if subs is None: | |
941 | self.to_screen('There aren\'t any subtitles to convert') | |
942 | return [], info | |
943 | self.to_screen('Converting subtitles') | |
944 | sub_filenames = [] | |
945 | for lang, sub in subs.items(): | |
946 | if not os.path.exists(sub.get('filepath', '')): | |
947 | self.report_warning(f'Skipping embedding {lang} subtitle because the file is missing') | |
948 | continue | |
949 | ext = sub['ext'] | |
950 | if ext == new_ext: | |
951 | self.to_screen('Subtitle file for %s is already in the requested format' % new_ext) | |
952 | continue | |
953 | elif ext == 'json': | |
954 | self.to_screen( | |
955 | 'You have requested to convert json subtitles into another format, ' | |
956 | 'which is currently not possible') | |
957 | continue | |
958 | old_file = sub['filepath'] | |
959 | sub_filenames.append(old_file) | |
960 | new_file = replace_extension(old_file, new_ext) | |
961 | ||
962 | if ext in ('dfxp', 'ttml', 'tt'): | |
963 | self.report_warning( | |
964 | 'You have requested to convert dfxp (TTML) subtitles into another format, ' | |
965 | 'which results in style information loss') | |
966 | ||
967 | dfxp_file = old_file | |
968 | srt_file = replace_extension(old_file, 'srt') | |
969 | ||
970 | with open(dfxp_file, 'rb') as f: | |
971 | srt_data = dfxp2srt(f.read()) | |
972 | ||
973 | with open(srt_file, 'wt', encoding='utf-8') as f: | |
974 | f.write(srt_data) | |
975 | old_file = srt_file | |
976 | ||
977 | subs[lang] = { | |
978 | 'ext': 'srt', | |
979 | 'data': srt_data, | |
980 | 'filepath': srt_file, | |
981 | } | |
982 | ||
983 | if new_ext == 'srt': | |
984 | continue | |
985 | else: | |
986 | sub_filenames.append(srt_file) | |
987 | ||
988 | self.run_ffmpeg(old_file, new_file, ['-f', new_format]) | |
989 | ||
990 | with open(new_file, encoding='utf-8') as f: | |
991 | subs[lang] = { | |
992 | 'ext': new_ext, | |
993 | 'data': f.read(), | |
994 | 'filepath': new_file, | |
995 | } | |
996 | ||
997 | info['__files_to_move'][new_file] = replace_extension( | |
998 | info['__files_to_move'][sub['filepath']], new_ext) | |
999 | ||
1000 | return sub_filenames, info | |
1001 | ||
1002 | ||
1003 | class FFmpegSplitChaptersPP(FFmpegPostProcessor): | |
1004 | def __init__(self, downloader, force_keyframes=False): | |
1005 | FFmpegPostProcessor.__init__(self, downloader) | |
1006 | self._force_keyframes = force_keyframes | |
1007 | ||
1008 | def _prepare_filename(self, number, chapter, info): | |
1009 | info = info.copy() | |
1010 | info.update({ | |
1011 | 'section_number': number, | |
1012 | 'section_title': chapter.get('title'), | |
1013 | 'section_start': chapter.get('start_time'), | |
1014 | 'section_end': chapter.get('end_time'), | |
1015 | }) | |
1016 | return self._downloader.prepare_filename(info, 'chapter') | |
1017 | ||
1018 | def _ffmpeg_args_for_chapter(self, number, chapter, info): | |
1019 | destination = self._prepare_filename(number, chapter, info) | |
1020 | if not self._downloader._ensure_dir_exists(encodeFilename(destination)): | |
1021 | return | |
1022 | ||
1023 | chapter['filepath'] = destination | |
1024 | self.to_screen('Chapter %03d; Destination: %s' % (number, destination)) | |
1025 | return ( | |
1026 | destination, | |
1027 | ['-ss', str(chapter['start_time']), | |
1028 | '-t', str(chapter['end_time'] - chapter['start_time'])]) | |
1029 | ||
1030 | @PostProcessor._restrict_to(images=False) | |
1031 | def run(self, info): | |
1032 | chapters = info.get('chapters') or [] | |
1033 | if not chapters: | |
1034 | self.to_screen('Chapter information is unavailable') | |
1035 | return [], info | |
1036 | ||
1037 | in_file = info['filepath'] | |
1038 | if self._force_keyframes and len(chapters) > 1: | |
1039 | in_file = self.force_keyframes(in_file, (c['start_time'] for c in chapters)) | |
1040 | self.to_screen('Splitting video by chapters; %d chapters found' % len(chapters)) | |
1041 | for idx, chapter in enumerate(chapters): | |
1042 | destination, opts = self._ffmpeg_args_for_chapter(idx + 1, chapter, info) | |
1043 | self.real_run_ffmpeg([(in_file, opts)], [(destination, self.stream_copy_opts())]) | |
1044 | if in_file != info['filepath']: | |
1045 | self._delete_downloaded_files(in_file, msg=None) | |
1046 | return [], info | |
1047 | ||
1048 | ||
1049 | class FFmpegThumbnailsConvertorPP(FFmpegPostProcessor): | |
1050 | SUPPORTED_EXTS = ('jpg', 'png', 'webp') | |
1051 | FORMAT_RE = create_mapping_re(SUPPORTED_EXTS) | |
1052 | ||
1053 | def __init__(self, downloader=None, format=None): | |
1054 | super().__init__(downloader) | |
1055 | self.mapping = format | |
1056 | ||
1057 | @classmethod | |
1058 | def is_webp(cls, path): | |
1059 | write_string(f'DeprecationWarning: {cls.__module__}.{cls.__name__}.is_webp is deprecated') | |
1060 | return imghdr.what(path) == 'webp' | |
1061 | ||
1062 | def fixup_webp(self, info, idx=-1): | |
1063 | thumbnail_filename = info['thumbnails'][idx]['filepath'] | |
1064 | _, thumbnail_ext = os.path.splitext(thumbnail_filename) | |
1065 | if thumbnail_ext: | |
1066 | if thumbnail_ext.lower() != '.webp' and imghdr.what(thumbnail_filename) == 'webp': | |
1067 | self.to_screen('Correcting thumbnail "%s" extension to webp' % thumbnail_filename) | |
1068 | webp_filename = replace_extension(thumbnail_filename, 'webp') | |
1069 | os.replace(thumbnail_filename, webp_filename) | |
1070 | info['thumbnails'][idx]['filepath'] = webp_filename | |
1071 | info['__files_to_move'][webp_filename] = replace_extension( | |
1072 | info['__files_to_move'].pop(thumbnail_filename), 'webp') | |
1073 | ||
1074 | @staticmethod | |
1075 | def _options(target_ext): | |
1076 | if target_ext == 'jpg': | |
1077 | return ['-bsf:v', 'mjpeg2jpeg'] | |
1078 | return [] | |
1079 | ||
1080 | def convert_thumbnail(self, thumbnail_filename, target_ext): | |
1081 | thumbnail_conv_filename = replace_extension(thumbnail_filename, target_ext) | |
1082 | ||
1083 | self.to_screen(f'Converting thumbnail "{thumbnail_filename}" to {target_ext}') | |
1084 | self.real_run_ffmpeg( | |
1085 | [(thumbnail_filename, ['-f', 'image2', '-pattern_type', 'none'])], | |
1086 | [(thumbnail_conv_filename.replace('%', '%%'), self._options(target_ext))]) | |
1087 | return thumbnail_conv_filename | |
1088 | ||
1089 | def run(self, info): | |
1090 | files_to_delete = [] | |
1091 | has_thumbnail = False | |
1092 | ||
1093 | for idx, thumbnail_dict in enumerate(info.get('thumbnails') or []): | |
1094 | original_thumbnail = thumbnail_dict.get('filepath') | |
1095 | if not original_thumbnail: | |
1096 | continue | |
1097 | has_thumbnail = True | |
1098 | self.fixup_webp(info, idx) | |
1099 | thumbnail_ext = os.path.splitext(original_thumbnail)[1][1:].lower() | |
1100 | if thumbnail_ext == 'jpeg': | |
1101 | thumbnail_ext = 'jpg' | |
1102 | target_ext, _skip_msg = resolve_mapping(thumbnail_ext, self.mapping) | |
1103 | if _skip_msg: | |
1104 | self.to_screen(f'Not converting thumbnail "{original_thumbnail}"; {_skip_msg}') | |
1105 | continue | |
1106 | thumbnail_dict['filepath'] = self.convert_thumbnail(original_thumbnail, target_ext) | |
1107 | files_to_delete.append(original_thumbnail) | |
1108 | info['__files_to_move'][thumbnail_dict['filepath']] = replace_extension( | |
1109 | info['__files_to_move'][original_thumbnail], target_ext) | |
1110 | ||
1111 | if not has_thumbnail: | |
1112 | self.to_screen('There aren\'t any thumbnails to convert') | |
1113 | return files_to_delete, info | |
1114 | ||
1115 | ||
1116 | class FFmpegConcatPP(FFmpegPostProcessor): | |
1117 | def __init__(self, downloader, only_multi_video=False): | |
1118 | self._only_multi_video = only_multi_video | |
1119 | super().__init__(downloader) | |
1120 | ||
1121 | def _get_codecs(self, file): | |
1122 | codecs = traverse_obj(self.get_metadata_object(file), ('streams', ..., 'codec_name')) | |
1123 | self.write_debug(f'Codecs = {", ".join(codecs)}') | |
1124 | return tuple(codecs) | |
1125 | ||
1126 | def concat_files(self, in_files, out_file): | |
1127 | if not self._downloader._ensure_dir_exists(out_file): | |
1128 | return | |
1129 | if len(in_files) == 1: | |
1130 | if os.path.realpath(in_files[0]) != os.path.realpath(out_file): | |
1131 | self.to_screen(f'Moving "{in_files[0]}" to "{out_file}"') | |
1132 | os.replace(in_files[0], out_file) | |
1133 | return [] | |
1134 | ||
1135 | if len(set(map(self._get_codecs, in_files))) > 1: | |
1136 | raise PostProcessingError( | |
1137 | 'The files have different streams/codecs and cannot be concatenated. ' | |
1138 | 'Either select different formats or --recode-video them to a common format') | |
1139 | ||
1140 | self.to_screen(f'Concatenating {len(in_files)} files; Destination: {out_file}') | |
1141 | super().concat_files(in_files, out_file) | |
1142 | return in_files | |
1143 | ||
1144 | @PostProcessor._restrict_to(images=False, simulated=False) | |
1145 | def run(self, info): | |
1146 | entries = info.get('entries') or [] | |
1147 | if not any(entries) or (self._only_multi_video and info['_type'] != 'multi_video'): | |
1148 | return [], info | |
1149 | elif traverse_obj(entries, (..., lambda k, v: k == 'requested_downloads' and len(v) > 1)): | |
1150 | raise PostProcessingError('Concatenation is not supported when downloading multiple separate formats') | |
1151 | ||
1152 | in_files = traverse_obj(entries, (..., 'requested_downloads', 0, 'filepath')) or [] | |
1153 | if len(in_files) < len(entries): | |
1154 | raise PostProcessingError('Aborting concatenation because some downloads failed') | |
1155 | ||
1156 | ie_copy = self._downloader._playlist_infodict(info) | |
1157 | exts = traverse_obj(entries, (..., 'requested_downloads', 0, 'ext'), (..., 'ext')) | |
1158 | ie_copy['ext'] = exts[0] if len(set(exts)) == 1 else 'mkv' | |
1159 | out_file = self._downloader.prepare_filename(ie_copy, 'pl_video') | |
1160 | ||
1161 | files_to_delete = self.concat_files(in_files, out_file) | |
1162 | ||
1163 | info['requested_downloads'] = [{ | |
1164 | 'filepath': out_file, | |
1165 | 'ext': ie_copy['ext'], | |
1166 | }] | |
1167 | return files_to_delete, info |