]>
Commit | Line | Data |
---|---|---|
7a340e0d NA |
1 | import copy |
2 | import heapq | |
3 | import os | |
4 | ||
5 | from .common import PostProcessor | |
f8271158 | 6 | from .ffmpeg import FFmpegPostProcessor, FFmpegSubtitlesConvertorPP |
7a340e0d | 7 | from .sponsorblock import SponsorBlockPP |
f8271158 | 8 | from ..utils import PostProcessingError, orderedSet, prepend_extension |
7a340e0d | 9 | |
c6af2dd8 | 10 | _TINY_CHAPTER_DURATION = 1 |
7a340e0d NA |
11 | DEFAULT_SPONSORBLOCK_CHAPTER_TITLE = '[SponsorBlock]: %(category_names)l' |
12 | ||
13 | ||
14 | class ModifyChaptersPP(FFmpegPostProcessor): | |
2d9ec704 | 15 | def __init__(self, downloader, remove_chapters_patterns=None, remove_sponsor_segments=None, remove_ranges=None, |
16 | *, sponsorblock_chapter_title=DEFAULT_SPONSORBLOCK_CHAPTER_TITLE, force_keyframes=False): | |
7a340e0d NA |
17 | FFmpegPostProcessor.__init__(self, downloader) |
18 | self._remove_chapters_patterns = set(remove_chapters_patterns or []) | |
8157a09d | 19 | self._remove_sponsor_segments = set(remove_sponsor_segments or []) - set(SponsorBlockPP.POI_CATEGORIES.keys()) |
2d9ec704 | 20 | self._ranges_to_remove = set(remove_ranges or []) |
7a340e0d NA |
21 | self._sponsorblock_chapter_title = sponsorblock_chapter_title |
22 | self._force_keyframes = force_keyframes | |
23 | ||
24 | @PostProcessor._restrict_to(images=False) | |
25 | def run(self, info): | |
e619d8a7 | 26 | # Chapters must be preserved intact when downloading multiple formats of the same video. |
7a340e0d | 27 | chapters, sponsor_chapters = self._mark_chapters_to_remove( |
e619d8a7 NA |
28 | copy.deepcopy(info.get('chapters')) or [], |
29 | copy.deepcopy(info.get('sponsorblock_chapters')) or []) | |
7a340e0d NA |
30 | if not chapters and not sponsor_chapters: |
31 | return [], info | |
32 | ||
5ce1d13e | 33 | real_duration = self._get_real_video_duration(info['filepath']) |
7a340e0d | 34 | if not chapters: |
d9473db7 | 35 | chapters = [{'start_time': 0, 'end_time': info.get('duration') or real_duration, 'title': info['title']}] |
7a340e0d NA |
36 | |
37 | info['chapters'], cuts = self._remove_marked_arrange_sponsors(chapters + sponsor_chapters) | |
38 | if not cuts: | |
39 | return [], info | |
40 | ||
4019bf05 | 41 | original_duration, info['duration'] = info.get('duration'), info['chapters'][-1]['end_time'] |
42 | if self._duration_mismatch(real_duration, original_duration, 1): | |
43 | if not self._duration_mismatch(real_duration, info['duration']): | |
7a340e0d NA |
44 | self.to_screen(f'Skipping {self.pp_key()} since the video appears to be already cut') |
45 | return [], info | |
46 | if not info.get('__real_download'): | |
47 | raise PostProcessingError('Cannot cut video since the real and expected durations mismatch. ' | |
48 | 'Different chapters may have already been removed') | |
7a340e0d NA |
49 | else: |
50 | self.write_debug('Expected and actual durations mismatch') | |
51 | ||
52 | concat_opts = self._make_concat_opts(cuts, real_duration) | |
ed8d87f9 | 53 | self.write_debug('Concat spec = %s' % ', '.join(f'{c.get("inpoint", 0.0)}-{c.get("outpoint", "inf")}' for c in concat_opts)) |
7a340e0d NA |
54 | |
55 | def remove_chapters(file, is_sub): | |
56 | return file, self.remove_chapters(file, cuts, concat_opts, self._force_keyframes and not is_sub) | |
57 | ||
58 | in_out_files = [remove_chapters(info['filepath'], False)] | |
59 | in_out_files.extend(remove_chapters(in_file, True) for in_file in self._get_supported_subs(info)) | |
60 | ||
61 | # Renaming should only happen after all files are processed | |
62 | files_to_remove = [] | |
63 | for in_file, out_file in in_out_files: | |
ae419aa9 | 64 | mtime = os.stat(in_file).st_mtime |
7a340e0d NA |
65 | uncut_file = prepend_extension(in_file, 'uncut') |
66 | os.replace(in_file, uncut_file) | |
67 | os.replace(out_file, in_file) | |
ae419aa9 | 68 | self.try_utime(in_file, mtime, mtime) |
7a340e0d NA |
69 | files_to_remove.append(uncut_file) |
70 | ||
71 | return files_to_remove, info | |
72 | ||
73 | def _mark_chapters_to_remove(self, chapters, sponsor_chapters): | |
74 | if self._remove_chapters_patterns: | |
75 | warn_no_chapter_to_remove = True | |
76 | if not chapters: | |
77 | self.to_screen('Chapter information is unavailable') | |
78 | warn_no_chapter_to_remove = False | |
79 | for c in chapters: | |
80 | if any(regex.search(c['title']) for regex in self._remove_chapters_patterns): | |
81 | c['remove'] = True | |
82 | warn_no_chapter_to_remove = False | |
83 | if warn_no_chapter_to_remove: | |
84 | self.to_screen('There are no chapters matching the regex') | |
85 | ||
86 | if self._remove_sponsor_segments: | |
87 | warn_no_chapter_to_remove = True | |
88 | if not sponsor_chapters: | |
89 | self.to_screen('SponsorBlock information is unavailable') | |
90 | warn_no_chapter_to_remove = False | |
91 | for c in sponsor_chapters: | |
92 | if c['category'] in self._remove_sponsor_segments: | |
93 | c['remove'] = True | |
94 | warn_no_chapter_to_remove = False | |
95 | if warn_no_chapter_to_remove: | |
96 | self.to_screen('There are no matching SponsorBlock chapters') | |
97 | ||
2d9ec704 | 98 | sponsor_chapters.extend({ |
99 | 'start_time': start, | |
100 | 'end_time': end, | |
101 | 'category': 'manually_removed', | |
102 | '_categories': [('manually_removed', start, end)], | |
103 | 'remove': True, | |
104 | } for start, end in self._ranges_to_remove) | |
105 | ||
7a340e0d NA |
106 | return chapters, sponsor_chapters |
107 | ||
7a340e0d NA |
108 | def _get_supported_subs(self, info): |
109 | for sub in (info.get('requested_subtitles') or {}).values(): | |
110 | sub_file = sub.get('filepath') | |
111 | # The file might have been removed by --embed-subs | |
112 | if not sub_file or not os.path.exists(sub_file): | |
113 | continue | |
114 | ext = sub['ext'] | |
115 | if ext not in FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS: | |
116 | self.report_warning(f'Cannot remove chapters from external {ext} subtitles; "{sub_file}" is now out of sync') | |
117 | continue | |
118 | # TODO: create __real_download for subs? | |
119 | yield sub_file | |
120 | ||
121 | def _remove_marked_arrange_sponsors(self, chapters): | |
122 | # Store cuts separately, since adjacent and overlapping cuts must be merged. | |
123 | cuts = [] | |
124 | ||
125 | def append_cut(c): | |
e619d8a7 | 126 | assert 'remove' in c, 'Not a cut is appended to cuts' |
7a340e0d NA |
127 | last_to_cut = cuts[-1] if cuts else None |
128 | if last_to_cut and last_to_cut['end_time'] >= c['start_time']: | |
129 | last_to_cut['end_time'] = max(last_to_cut['end_time'], c['end_time']) | |
130 | else: | |
131 | cuts.append(c) | |
132 | return len(cuts) - 1 | |
133 | ||
134 | def excess_duration(c): | |
135 | # Cuts that are completely within the chapter reduce chapters' duration. | |
136 | # Since cuts can overlap, excess duration may be less that the sum of cuts' durations. | |
137 | # To avoid that, chapter stores the index to the fist cut within the chapter, | |
138 | # instead of storing excess duration. append_cut ensures that subsequent cuts (if any) | |
139 | # will be merged with previous ones (if necessary). | |
140 | cut_idx, excess = c.pop('cut_idx', len(cuts)), 0 | |
141 | while cut_idx < len(cuts): | |
142 | cut = cuts[cut_idx] | |
143 | if cut['start_time'] >= c['end_time']: | |
144 | break | |
145 | if cut['end_time'] > c['start_time']: | |
146 | excess += min(cut['end_time'], c['end_time']) | |
147 | excess -= max(cut['start_time'], c['start_time']) | |
148 | cut_idx += 1 | |
149 | return excess | |
150 | ||
151 | new_chapters = [] | |
152 | ||
7a340e0d | 153 | def append_chapter(c): |
e619d8a7 | 154 | assert 'remove' not in c, 'Cut is appended to chapters' |
c6af2dd8 | 155 | length = c['end_time'] - c['start_time'] - excess_duration(c) |
7a340e0d NA |
156 | # Chapter is completely covered by cuts or sponsors. |
157 | if length <= 0: | |
158 | return | |
159 | start = new_chapters[-1]['end_time'] if new_chapters else 0 | |
160 | c.update(start_time=start, end_time=start + length) | |
c6af2dd8 | 161 | new_chapters.append(c) |
7a340e0d NA |
162 | |
163 | # Turn into a priority queue, index is a tie breaker. | |
164 | # Plain stack sorted by start_time is not enough: after splitting the chapter, | |
165 | # the part returned to the stack is not guaranteed to have start_time | |
166 | # less than or equal to the that of the stack's head. | |
167 | chapters = [(c['start_time'], i, c) for i, c in enumerate(chapters)] | |
168 | heapq.heapify(chapters) | |
169 | ||
170 | _, cur_i, cur_chapter = heapq.heappop(chapters) | |
171 | while chapters: | |
172 | _, i, c = heapq.heappop(chapters) | |
173 | # Non-overlapping chapters or cuts can be appended directly. However, | |
174 | # adjacent non-overlapping cuts must be merged, which is handled by append_cut. | |
175 | if cur_chapter['end_time'] <= c['start_time']: | |
176 | (append_chapter if 'remove' not in cur_chapter else append_cut)(cur_chapter) | |
177 | cur_i, cur_chapter = i, c | |
178 | continue | |
179 | ||
180 | # Eight possibilities for overlapping chapters: (cut, cut), (cut, sponsor), | |
181 | # (cut, normal), (sponsor, cut), (normal, cut), (sponsor, sponsor), | |
182 | # (sponsor, normal), and (normal, sponsor). There is no (normal, normal): | |
183 | # normal chapters are assumed not to overlap. | |
184 | if 'remove' in cur_chapter: | |
185 | # (cut, cut): adjust end_time. | |
186 | if 'remove' in c: | |
187 | cur_chapter['end_time'] = max(cur_chapter['end_time'], c['end_time']) | |
188 | # (cut, sponsor/normal): chop the beginning of the later chapter | |
189 | # (if it's not completely hidden by the cut). Push to the priority queue | |
190 | # to restore sorting by start_time: with beginning chopped, c may actually | |
191 | # start later than the remaining chapters from the queue. | |
192 | elif cur_chapter['end_time'] < c['end_time']: | |
193 | c['start_time'] = cur_chapter['end_time'] | |
194 | c['_was_cut'] = True | |
195 | heapq.heappush(chapters, (c['start_time'], i, c)) | |
196 | # (sponsor/normal, cut). | |
197 | elif 'remove' in c: | |
198 | cur_chapter['_was_cut'] = True | |
199 | # Chop the end of the current chapter if the cut is not contained within it. | |
200 | # Chopping the end doesn't break start_time sorting, no PQ push is necessary. | |
201 | if cur_chapter['end_time'] <= c['end_time']: | |
202 | cur_chapter['end_time'] = c['start_time'] | |
203 | append_chapter(cur_chapter) | |
204 | cur_i, cur_chapter = i, c | |
205 | continue | |
206 | # Current chapter contains the cut within it. If the current chapter is | |
207 | # a sponsor chapter, check whether the categories before and after the cut differ. | |
208 | if '_categories' in cur_chapter: | |
209 | after_c = dict(cur_chapter, start_time=c['end_time'], _categories=[]) | |
210 | cur_cats = [] | |
211 | for cat_start_end in cur_chapter['_categories']: | |
212 | if cat_start_end[1] < c['start_time']: | |
213 | cur_cats.append(cat_start_end) | |
214 | if cat_start_end[2] > c['end_time']: | |
215 | after_c['_categories'].append(cat_start_end) | |
216 | cur_chapter['_categories'] = cur_cats | |
217 | if cur_chapter['_categories'] != after_c['_categories']: | |
218 | # Categories before and after the cut differ: push the after part to PQ. | |
219 | heapq.heappush(chapters, (after_c['start_time'], cur_i, after_c)) | |
220 | cur_chapter['end_time'] = c['start_time'] | |
221 | append_chapter(cur_chapter) | |
222 | cur_i, cur_chapter = i, c | |
223 | continue | |
224 | # Either sponsor categories before and after the cut are the same or | |
225 | # we're dealing with a normal chapter. Just register an outstanding cut: | |
226 | # subsequent append_chapter will reduce the duration. | |
227 | cur_chapter.setdefault('cut_idx', append_cut(c)) | |
228 | # (sponsor, normal): if a normal chapter is not completely overlapped, | |
229 | # chop the beginning of it and push it to PQ. | |
230 | elif '_categories' in cur_chapter and '_categories' not in c: | |
231 | if cur_chapter['end_time'] < c['end_time']: | |
232 | c['start_time'] = cur_chapter['end_time'] | |
233 | c['_was_cut'] = True | |
234 | heapq.heappush(chapters, (c['start_time'], i, c)) | |
235 | # (normal, sponsor) and (sponsor, sponsor) | |
236 | else: | |
e619d8a7 | 237 | assert '_categories' in c, 'Normal chapters overlap' |
7a340e0d NA |
238 | cur_chapter['_was_cut'] = True |
239 | c['_was_cut'] = True | |
240 | # Push the part after the sponsor to PQ. | |
241 | if cur_chapter['end_time'] > c['end_time']: | |
242 | # deepcopy to make categories in after_c and cur_chapter/c refer to different lists. | |
243 | after_c = dict(copy.deepcopy(cur_chapter), start_time=c['end_time']) | |
244 | heapq.heappush(chapters, (after_c['start_time'], cur_i, after_c)) | |
245 | # Push the part after the overlap to PQ. | |
246 | elif c['end_time'] > cur_chapter['end_time']: | |
247 | after_cur = dict(copy.deepcopy(c), start_time=cur_chapter['end_time']) | |
248 | heapq.heappush(chapters, (after_cur['start_time'], cur_i, after_cur)) | |
249 | c['end_time'] = cur_chapter['end_time'] | |
250 | # (sponsor, sponsor): merge categories in the overlap. | |
251 | if '_categories' in cur_chapter: | |
252 | c['_categories'] = cur_chapter['_categories'] + c['_categories'] | |
253 | # Inherit the cuts that the current chapter has accumulated within it. | |
254 | if 'cut_idx' in cur_chapter: | |
255 | c['cut_idx'] = cur_chapter['cut_idx'] | |
256 | cur_chapter['end_time'] = c['start_time'] | |
257 | append_chapter(cur_chapter) | |
258 | cur_i, cur_chapter = i, c | |
259 | (append_chapter if 'remove' not in cur_chapter else append_cut)(cur_chapter) | |
c6af2dd8 NA |
260 | return self._remove_tiny_rename_sponsors(new_chapters), cuts |
261 | ||
262 | def _remove_tiny_rename_sponsors(self, chapters): | |
263 | new_chapters = [] | |
264 | for i, c in enumerate(chapters): | |
265 | # Merge with the previous/next if the chapter is tiny. | |
266 | # Only tiny chapters resulting from a cut can be skipped. | |
267 | # Chapters that were already tiny in the original list will be preserved. | |
268 | if (('_was_cut' in c or '_categories' in c) | |
269 | and c['end_time'] - c['start_time'] < _TINY_CHAPTER_DURATION): | |
270 | if not new_chapters: | |
271 | # Prepend tiny chapter to the next one if possible. | |
272 | if i < len(chapters) - 1: | |
273 | chapters[i + 1]['start_time'] = c['start_time'] | |
274 | continue | |
275 | else: | |
276 | old_c = new_chapters[-1] | |
277 | if i < len(chapters) - 1: | |
278 | next_c = chapters[i + 1] | |
279 | # Not a typo: key names in old_c and next_c are really different. | |
280 | prev_is_sponsor = 'categories' in old_c | |
281 | next_is_sponsor = '_categories' in next_c | |
282 | # Preferentially prepend tiny normals to normals and sponsors to sponsors. | |
283 | if (('_categories' not in c and prev_is_sponsor and not next_is_sponsor) | |
284 | or ('_categories' in c and not prev_is_sponsor and next_is_sponsor)): | |
285 | next_c['start_time'] = c['start_time'] | |
286 | continue | |
287 | old_c['end_time'] = c['end_time'] | |
288 | continue | |
7a340e0d | 289 | |
7a340e0d NA |
290 | c.pop('_was_cut', None) |
291 | cats = c.pop('_categories', None) | |
292 | if cats: | |
293 | category = min(cats, key=lambda c: c[2] - c[1])[0] | |
294 | cats = orderedSet(x[0] for x in cats) | |
295 | c.update({ | |
296 | 'category': category, | |
297 | 'categories': cats, | |
298 | 'name': SponsorBlockPP.CATEGORIES[category], | |
299 | 'category_names': [SponsorBlockPP.CATEGORIES[c] for c in cats] | |
300 | }) | |
8157a09d | 301 | c['title'] = self._downloader.evaluate_outtmpl(self._sponsorblock_chapter_title, c.copy()) |
c6af2dd8 NA |
302 | # Merge identically named sponsors. |
303 | if (new_chapters and 'categories' in new_chapters[-1] | |
304 | and new_chapters[-1]['title'] == c['title']): | |
305 | new_chapters[-1]['end_time'] = c['end_time'] | |
306 | continue | |
307 | new_chapters.append(c) | |
308 | return new_chapters | |
7a340e0d NA |
309 | |
310 | def remove_chapters(self, filename, ranges_to_cut, concat_opts, force_keyframes=False): | |
311 | in_file = filename | |
312 | out_file = prepend_extension(in_file, 'temp') | |
313 | if force_keyframes: | |
165efb82 | 314 | in_file = self.force_keyframes(in_file, (t for c in ranges_to_cut for t in (c['start_time'], c['end_time']))) |
7a340e0d NA |
315 | self.to_screen(f'Removing chapters from {filename}') |
316 | self.concat_files([in_file] * len(concat_opts), out_file, concat_opts) | |
317 | if in_file != filename: | |
43d7f5a5 | 318 | self._delete_downloaded_files(in_file, msg=None) |
7a340e0d NA |
319 | return out_file |
320 | ||
321 | @staticmethod | |
322 | def _make_concat_opts(chapters_to_remove, duration): | |
323 | opts = [{}] | |
324 | for s in chapters_to_remove: | |
325 | # Do not create 0 duration chunk at the beginning. | |
326 | if s['start_time'] == 0: | |
327 | opts[-1]['inpoint'] = f'{s["end_time"]:.6f}' | |
328 | continue | |
329 | opts[-1]['outpoint'] = f'{s["start_time"]:.6f}' | |
330 | # Do not create 0 duration chunk at the end. | |
ed8d87f9 | 331 | if s['end_time'] < duration: |
7a340e0d NA |
332 | opts.append({'inpoint': f'{s["end_time"]:.6f}'}) |
333 | return opts |