]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/gedidigital.py
[ie/matchtv] Fix extractor (#10190)
[yt-dlp.git] / yt_dlp / extractor / gedidigital.py
index 4ae5362b4db0550c0ffafbf51f18000cfbd9acdd..2ffa66030741f7cb23a61cb0e08c09f98f707836 100644 (file)
@@ -11,7 +11,7 @@
 
 
 class GediDigitalIE(InfoExtractor):
-    _VALID_URL = r'''(?x:(?P<url>(?:https?:)//video\.
+    _VALID_URL = r'''(?x:(?P<base_url>(?:https?:)//video\.
         (?:
             (?:
                 (?:espresso\.)?repubblica
@@ -34,6 +34,12 @@ class GediDigitalIE(InfoExtractor):
                 |lasentinella
             )\.gelocal
         )\.it(?:/[^/]+){2,4}/(?P<id>\d+))(?:$|[?&].*))'''
+    _EMBED_REGEX = [rf'''(?x)
+            (?:
+                data-frame-src=|
+                <iframe[^\n]+src=
+            )
+            (["'])(?P<url>{_VALID_URL})\1''']
     _TESTS = [{
         'url': 'https://video.lastampa.it/politica/il-paradosso-delle-regionali-la-lega-vince-ma-sembra-aver-perso/121559/121683',
         'md5': '84658d7fb9e55a6e57ecc77b73137494',
@@ -103,28 +109,15 @@ def _sanitize_urls(urls):
         # add protocol if missing
         for i, e in enumerate(urls):
             if e.startswith('//'):
-                urls[i] = 'https:%s' % e
+                urls[i] = f'https:{e}'
         # clean iframes urls
         for i, e in enumerate(urls):
             urls[i] = urljoin(base_url(e), url_basename(e))
         return urls
 
-    @staticmethod
-    def _extract_urls(webpage):
-        entries = [
-            mobj.group('eurl')
-            for mobj in re.finditer(r'''(?x)
-            (?:
-                data-frame-src=|
-                <iframe[^\n]+src=
-            )
-            (["'])(?P<eurl>%s)\1''' % GediDigitalIE._VALID_URL, webpage)]
-        return GediDigitalIE._sanitize_urls(entries)
-
-    @staticmethod
-    def _extract_url(webpage):
-        urls = GediDigitalIE._extract_urls(webpage)
-        return urls[0] if urls else None
+    @classmethod
+    def _extract_embed_urls(cls, url, webpage):
+        return cls._sanitize_urls(tuple(super()._extract_embed_urls(url, webpage)))
 
     @staticmethod
     def _clean_formats(formats):
@@ -139,8 +132,7 @@ def _clean_formats(formats):
         formats[:] = clean_formats
 
     def _real_extract(self, url):
-        video_id = self._match_id(url)
-        url = self._match_valid_url(url).group('url')
+        video_id, url = self._match_valid_url(url).group('id', 'base_url')
         webpage = self._download_webpage(url, video_id)
         title = self._html_search_meta(
             ['twitter:title', 'og:title'], webpage, fatal=True)
@@ -174,7 +166,7 @@ def _real_extract(self, url):
                             'abr': abr,
                             'tbr': abr,
                             'acodec': ext,
-                            'vcodec': 'none'
+                            'vcodec': 'none',
                         })
                     else:
                         mobj = re.match(r'^video-rrtv-(\d+)(?:-(\d+))?$', n)
@@ -194,7 +186,6 @@ def _real_extract(self, url):
                     duration = int_or_none(v)
 
         self._clean_formats(formats)
-        self._sort_formats(formats)
 
         return {
             'id': video_id,