]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/postprocessor/modify_chapters.py
[cleanup] Add more ruff rules (#10149)
[yt-dlp.git] / yt_dlp / postprocessor / modify_chapters.py
index 8a2ef906512055eca54923944434f9aa9abf9f35..d82685ed850faabadb943892034c55f5ecdd6657 100644 (file)
@@ -16,13 +16,14 @@ def __init__(self, downloader, remove_chapters_patterns=None, remove_sponsor_seg
                  *, sponsorblock_chapter_title=DEFAULT_SPONSORBLOCK_CHAPTER_TITLE, force_keyframes=False):
         FFmpegPostProcessor.__init__(self, downloader)
         self._remove_chapters_patterns = set(remove_chapters_patterns or [])
-        self._remove_sponsor_segments = set(remove_sponsor_segments or []) - set(SponsorBlockPP.POI_CATEGORIES.keys())
+        self._remove_sponsor_segments = set(remove_sponsor_segments or []) - set(SponsorBlockPP.NON_SKIPPABLE_CATEGORIES.keys())
         self._ranges_to_remove = set(remove_ranges or [])
         self._sponsorblock_chapter_title = sponsorblock_chapter_title
         self._force_keyframes = force_keyframes
 
     @PostProcessor._restrict_to(images=False)
     def run(self, info):
+        self._fixup_chapters(info)
         # Chapters must be preserved intact when downloading multiple formats of the same video.
         chapters, sponsor_chapters = self._mark_chapters_to_remove(
             copy.deepcopy(info.get('chapters')) or [],
@@ -32,14 +33,18 @@ def run(self, info):
 
         real_duration = self._get_real_video_duration(info['filepath'])
         if not chapters:
-            chapters = [{'start_time': 0, 'end_time': real_duration, 'title': info['title']}]
+            chapters = [{'start_time': 0, 'end_time': info.get('duration') or real_duration, 'title': info['title']}]
 
         info['chapters'], cuts = self._remove_marked_arrange_sponsors(chapters + sponsor_chapters)
         if not cuts:
             return [], info
+        elif not info['chapters']:
+            self.report_warning('You have requested to remove the entire video, which is not possible')
+            return [], info
 
-        if self._duration_mismatch(real_duration, info.get('duration')):
-            if not self._duration_mismatch(real_duration, info['chapters'][-1]['end_time']):
+        original_duration, info['duration'] = info.get('duration'), info['chapters'][-1]['end_time']
+        if self._duration_mismatch(real_duration, original_duration, 1):
+            if not self._duration_mismatch(real_duration, info['duration']):
                 self.to_screen(f'Skipping {self.pp_key()} since the video appears to be already cut')
                 return [], info
             if not info.get('__real_download'):
@@ -49,7 +54,7 @@ def run(self, info):
                 self.write_debug('Expected and actual durations mismatch')
 
         concat_opts = self._make_concat_opts(cuts, real_duration)
-        self.write_debug('Concat spec = %s' % ', '.join(f'{c.get("inpoint", 0.0)}-{c.get("outpoint", "inf")}' for c in concat_opts))
+        self.write_debug('Concat spec = {}'.format(', '.join(f'{c.get("inpoint", 0.0)}-{c.get("outpoint", "inf")}' for c in concat_opts)))
 
         def remove_chapters(file, is_sub):
             return file, self.remove_chapters(file, cuts, concat_opts, self._force_keyframes and not is_sub)
@@ -98,7 +103,7 @@ def _mark_chapters_to_remove(self, chapters, sponsor_chapters):
             'start_time': start,
             'end_time': end,
             'category': 'manually_removed',
-            '_categories': [('manually_removed', start, end)],
+            '_categories': [('manually_removed', start, end, 'Manually removed')],
             'remove': True,
         } for start, end in self._ranges_to_remove)
 
@@ -289,13 +294,12 @@ def _remove_tiny_rename_sponsors(self, chapters):
             c.pop('_was_cut', None)
             cats = c.pop('_categories', None)
             if cats:
-                category = min(cats, key=lambda c: c[2] - c[1])[0]
-                cats = orderedSet(x[0] for x in cats)
+                category, _, _, category_name = min(cats, key=lambda c: c[2] - c[1])
                 c.update({
                     'category': category,
-                    'categories': cats,
-                    'name': SponsorBlockPP.CATEGORIES[category],
-                    'category_names': [SponsorBlockPP.CATEGORIES[c] for c in cats]
+                    'categories': orderedSet(x[0] for x in cats),
+                    'name': category_name,
+                    'category_names': orderedSet(x[3] for x in cats),
                 })
                 c['title'] = self._downloader.evaluate_outtmpl(self._sponsorblock_chapter_title, c.copy())
                 # Merge identically named sponsors.