]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/wat.py
[ie/mlbtv] Fix extraction (#10296)
[yt-dlp.git] / yt_dlp / extractor / wat.py
index f6940b371bfc60e268be2c1e1be2be9e3f843e35..03bac66ac6824a104a064febace41fea34ba9022 100644 (file)
@@ -1,12 +1,9 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
 from .common import InfoExtractor
-from ..compat import compat_str
 from ..utils import (
-    unified_strdate,
-    HEADRequest,
+    ExtractorError,
     int_or_none,
+    try_get,
+    unified_strdate,
 )
 
 
@@ -29,6 +26,7 @@ class WatIE(InfoExtractor):
                 'skip_download': True,
             },
             'expected_warnings': ['HTTP Error 404'],
+            'skip': 'This content is no longer available',
         },
         {
             'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html',
@@ -40,80 +38,81 @@ class WatIE(InfoExtractor):
                 'upload_date': '20140816',
             },
             'expected_warnings': ["Ce contenu n'est pas disponible pour l'instant."],
+            'skip': 'This content is no longer available',
+        },
+        {
+            'url': 'wat:14010600',
+            'info_dict': {
+                'id': '14010600',
+                'title': 'Burger Quiz - S03 EP21 avec Eye Haidara, Anne Depétrini, Jonathan Zaccaï et Pio Marmaï',
+                'thumbnail': 'https://photos.tf1.fr/1280/720/burger-quiz-11-9adb79-0@1x.jpg',
+                'upload_date': '20230819',
+                'duration': 2312,
+                'ext': 'mp4',
+            },
+            'params': {'skip_download': 'm3u8'},
         },
     ]
+    _GEO_BYPASS = False
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        video_id = video_id if video_id.isdigit() and len(video_id) > 6 else compat_str(int(video_id, 36))
+        video_id = video_id if video_id.isdigit() and len(video_id) > 6 else str(int(video_id, 36))
 
         # 'contentv4' is used in the website, but it also returns the related
         # videos, we don't need them
+        # video_data = self._download_json(
+        #     'http://www.wat.tv/interface/contentv4s/' + video_id, video_id)
         video_data = self._download_json(
-            'http://www.wat.tv/interface/contentv4s/' + video_id, video_id)
+            'https://mediainfo.tf1.fr/mediainfocombo/' + video_id,
+            video_id, query={'pver': '5010000'})
         video_info = video_data['media']
 
         error_desc = video_info.get('error_desc')
         if error_desc:
-            self.report_warning(
-                '%s returned error: %s' % (self.IE_NAME, error_desc))
-
-        chapters = video_info['chapters']
-        if chapters:
-            first_chapter = chapters[0]
-
-            def video_id_for_chapter(chapter):
-                return chapter['tc_start'].split('-')[0]
+            if video_info.get('error_code') == 'GEOBLOCKED':
+                self.raise_geo_restricted(error_desc, video_info.get('geoList'))
+            raise ExtractorError(error_desc, expected=True)
 
-            if video_id_for_chapter(first_chapter) != video_id:
-                self.to_screen('Multipart video detected')
-                entries = [self.url_result('wat:%s' % video_id_for_chapter(chapter)) for chapter in chapters]
-                return self.playlist_result(entries, video_id, video_info['title'])
-            # Otherwise we can continue and extract just one part, we have to use
-            # the video id for getting the video url
-        else:
-            first_chapter = video_info
-
-        title = first_chapter['title']
-
-        def extract_url(path_template, url_type):
-            req_url = 'http://www.wat.tv/get/%s' % (path_template % video_id)
-            head = self._request_webpage(HEADRequest(req_url), video_id, 'Extracting %s url' % url_type, fatal=False)
-            if head:
-                red_url = head.geturl()
-                if req_url != red_url:
-                    return red_url
-            return None
+        title = video_info['title']
 
         formats = []
-        manifest_urls = self._download_json(
-            'http://www.wat.tv/get/webhtml/' + video_id, video_id)
-        m3u8_url = manifest_urls.get('hls')
-        if m3u8_url:
-            formats.extend(self._extract_m3u8_formats(
-                m3u8_url, video_id, 'mp4',
-                'm3u8_native', m3u8_id='hls', fatal=False))
-        mpd_url = manifest_urls.get('mpd')
-        if mpd_url:
-            formats.extend(self._extract_mpd_formats(
-                mpd_url.replace('://das-q1.tf1.fr/', '://das-q1-ssl.tf1.fr/'),
-                video_id, mpd_id='dash', fatal=False))
-        self._sort_formats(formats)
+        subtitles = {}
+
+        def extract_formats(manifest_urls):
+            for f, f_url in manifest_urls.items():
+                if not f_url:
+                    continue
+                if f in ('dash', 'mpd'):
+                    fmts, subs = self._extract_mpd_formats_and_subtitles(
+                        f_url.replace('://das-q1.tf1.fr/', '://das-q1-ssl.tf1.fr/'),
+                        video_id, mpd_id='dash', fatal=False)
+                elif f == 'hls':
+                    fmts, subs = self._extract_m3u8_formats_and_subtitles(
+                        f_url, video_id, 'mp4',
+                        'm3u8_native', m3u8_id='hls', fatal=False)
+                else:
+                    continue
+                formats.extend(fmts)
+                self._merge_subtitles(subs, target=subtitles)
 
-        date_diffusion = first_chapter.get('date_diffusion') or video_data.get('configv4', {}).get('estatS4')
-        upload_date = unified_strdate(date_diffusion) if date_diffusion else None
-        duration = None
-        files = video_info['files']
-        if files:
-            duration = int_or_none(files[0].get('duration'))
+        delivery = video_data.get('delivery') or {}
+        extract_formats({delivery.get('format'): delivery.get('url')})
+        if not formats:
+            if delivery.get('drm'):
+                self.report_drm(video_id)
+            manifest_urls = self._download_json(
+                'http://www.wat.tv/get/webhtml/' + video_id, video_id, fatal=False)
+            if manifest_urls:
+                extract_formats(manifest_urls)
 
         return {
             'id': video_id,
             'title': title,
-            'thumbnail': first_chapter.get('preview'),
-            'description': first_chapter.get('description'),
-            'view_count': int_or_none(video_info.get('views')),
-            'upload_date': upload_date,
-            'duration': duration,
+            'thumbnail': video_info.get('preview'),
+            'upload_date': unified_strdate(try_get(
+                video_data, lambda x: x['mediametrie']['chapters'][0]['estatS4'])),
+            'duration': int_or_none(video_info.get('duration')),
             'formats': formats,
+            'subtitles': subtitles,
         }