]> jfr.im git - yt-dlp.git/commitdiff
[postprocessor,cleanup] Create `_download_json`
authorpukkandan <redacted>
Fri, 25 Mar 2022 03:01:45 +0000 (08:31 +0530)
committerpukkandan <redacted>
Fri, 25 Mar 2022 03:15:35 +0000 (08:45 +0530)
yt_dlp/postprocessor/common.py
yt_dlp/postprocessor/sponsorblock.py

index d761c9303b1646b2a06e52876b2d6ceaff27ea15..8420ee86417c23954639711570d28309130b4b9e 100644 (file)
@@ -1,13 +1,18 @@
 from __future__ import unicode_literals
 
 import functools
+import itertools
+import json
 import os
+import time
+import urllib.error
 
-from ..compat import compat_str
 from ..utils import (
     _configuration_args,
     encodeFilename,
+    network_exceptions,
     PostProcessingError,
+    sanitized_Request,
     write_string,
 )
 
@@ -63,7 +68,7 @@ def __init__(self, downloader=None):
     @classmethod
     def pp_key(cls):
         name = cls.__name__[:-2]
-        return compat_str(name[6:]) if name[:6].lower() == 'ffmpeg' else name
+        return name[6:] if name[:6].lower() == 'ffmpeg' else name
 
     def to_screen(self, text, prefix=True, *args, **kwargs):
         tag = '[%s] ' % self.PP_NAME if prefix else ''
@@ -180,6 +185,28 @@ def report_progress(self, s):
             progress_template.get('postprocess-title') or 'yt-dlp %(progress._default_template)s',
             progress_dict))
 
+    def _download_json(self, url, *, expected_http_errors=(404,)):
+        # While this is not an extractor, it behaves similar to one and
+        # so obey extractor_retries and sleep_interval_requests
+        max_retries = self.get_param('extractor_retries', 3)
+        sleep_interval = self.get_param('sleep_interval_requests') or 0
+
+        self.write_debug(f'{self.PP_NAME} query: {url}')
+        for retries in itertools.count():
+            try:
+                rsp = self._downloader.urlopen(sanitized_Request(url))
+                return json.loads(rsp.read().decode(rsp.info().get_param('charset') or 'utf-8'))
+            except network_exceptions as e:
+                if isinstance(e, urllib.error.HTTPError) and e.code in expected_http_errors:
+                    return None
+                if retries < max_retries:
+                    self.report_warning(f'{e}. Retrying...')
+                    if sleep_interval > 0:
+                        self.to_screen(f'Sleeping {sleep_interval} seconds ...')
+                        time.sleep(sleep_interval)
+                    continue
+                raise PostProcessingError(f'Unable to communicate with {self.PP_NAME} API: {e}')
+
 
 class AudioConversionError(PostProcessingError):
     pass
index e7e04e86e76c35768e88ec8ccfa07df55e650a93..7943014e2dcdec75f68768fc0dfbbff1e9abd8df 100644 (file)
@@ -1,12 +1,9 @@
 from hashlib import sha256
-import itertools
 import json
 import re
-import time
 
 from .ffmpeg import FFmpegPostProcessor
-from ..compat import compat_urllib_parse_urlencode, compat_HTTPError
-from ..utils import PostProcessingError, network_exceptions, sanitized_Request
+from ..compat import compat_urllib_parse_urlencode
 
 
 class SponsorBlockPP(FFmpegPostProcessor):
@@ -94,28 +91,7 @@ def _get_sponsor_segments(self, video_id, service):
             'categories': json.dumps(self._categories),
             'actionTypes': json.dumps(['skip', 'poi'])
         })
-        self.write_debug(f'SponsorBlock query: {url}')
-        for d in self._get_json(url):
+        for d in self._download_json(url) or []:
             if d['videoID'] == video_id:
                 return d['segments']
         return []
-
-    def _get_json(self, url):
-        # While this is not an extractor, it behaves similar to one and
-        # so obey extractor_retries and sleep_interval_requests
-        max_retries = self.get_param('extractor_retries', 3)
-        sleep_interval = self.get_param('sleep_interval_requests') or 0
-        for retries in itertools.count():
-            try:
-                rsp = self._downloader.urlopen(sanitized_Request(url))
-                return json.loads(rsp.read().decode(rsp.info().get_param('charset') or 'utf-8'))
-            except network_exceptions as e:
-                if isinstance(e, compat_HTTPError) and e.code == 404:
-                    return []
-                if retries < max_retries:
-                    self.report_warning(f'{e}. Retrying...')
-                    if sleep_interval > 0:
-                        self.to_screen(f'Sleeping {sleep_interval} seconds ...')
-                        time.sleep(sleep_interval)
-                    continue
-                raise PostProcessingError(f'Unable to communicate with SponsorBlock API: {e}')