]> jfr.im git - yt-dlp.git/commitdiff
[ITV] Fix extractor, add subtitles and thumbnails (#913)
authorcoletdjnz <redacted>
Sun, 12 Sep 2021 20:56:19 +0000 (20:56 +0000)
committerGitHub <redacted>
Sun, 12 Sep 2021 20:56:19 +0000 (02:26 +0530)
Original PR: https://github.com/ytdl-org/youtube-dl/pull/28955 (see also https://github.com/ytdl-org/youtube-dl/issues/28906#issuecomment-831008270)

Closes #861, https://github.com/ytdl-org/youtube-dl/issues/28906, https://github.com/ytdl-org/youtube-dl/issues/29337, https://github.com/ytdl-org/youtube-dl/issues/29190, https://github.com/ytdl-org/youtube-dl/issues/28939, https://github.com/ytdl-org/youtube-dl/issues/29620

Authored-by: coletdjnz, sleaux-meaux, Vangelis66
yt_dlp/extractor/itv.py

index 4122ac880cc178a8e8e4bee6f0167480c6c9f158..3418689d6787130affcda3925aad62849b69e23d 100644 (file)
@@ -5,10 +5,14 @@
 
 from .common import InfoExtractor
 from .brightcove import BrightcoveNewIE
+
+from ..compat import compat_str
 from ..utils import (
+    base_url,
     clean_html,
     determine_ext,
     extract_attributes,
+    ExtractorError,
     get_element_by_class,
     JSON_LD_RE,
     merge_dicts,
@@ -16,6 +20,8 @@
     smuggle_url,
     try_get,
     url_or_none,
+    url_basename,
+    urljoin,
 )
 
 
@@ -23,15 +29,32 @@ class ITVIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?itv\.com/hub/[^/]+/(?P<id>[0-9a-zA-Z]+)'
     _GEO_COUNTRIES = ['GB']
     _TESTS = [{
-        'url': 'https://www.itv.com/hub/liar/2a4547a0012',
+        'url': 'https://www.itv.com/hub/plebs/2a1873a0002',
+        'info_dict': {
+            'id': '2a1873a0002',
+            'ext': 'mp4',
+            'title': 'Plebs - The Orgy',
+            'description': 'md5:4d7159af53ebd5b36e8b3ec82a41fdb4',
+            'series': 'Plebs',
+            'season_number': 1,
+            'episode_number': 1,
+            'thumbnail': r're:https?://hubimages\.itv\.com/episode/2_1873_0002'
+        },
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
+    }, {
+        'url': 'https://www.itv.com/hub/the-jonathan-ross-show/2a1166a0209',
         'info_dict': {
-            'id': '2a4547a0012',
+            'id': '2a1166a0209',
             'ext': 'mp4',
-            'title': 'Liar - Series 2 - Episode 6',
-            'description': 'md5:d0f91536569dec79ea184f0a44cca089',
-            'series': 'Liar',
-            'season_number': 2,
-            'episode_number': 6,
+            'title': 'The Jonathan Ross Show - Series 17 - Episode 8',
+            'description': 'md5:3023dcdd375db1bc9967186cdb3f1399',
+            'series': 'The Jonathan Ross Show',
+            'episode_number': 8,
+            'season_number': 17,
+            'thumbnail': r're:https?://hubimages\.itv\.com/episode/2_1873_0002'
         },
         'params': {
             # m3u8 download
@@ -51,22 +74,16 @@ class ITVIE(InfoExtractor):
         'only_matching': True,
     }]
 
-    def _real_extract(self, url):
-        video_id = self._match_id(url)
-        webpage = self._download_webpage(url, video_id)
-        params = extract_attributes(self._search_regex(
-            r'(?s)(<[^>]+id="video"[^>]*>)', webpage, 'params'))
-
-        ios_playlist_url = params.get('data-video-playlist') or params['data-video-id']
-        hmac = params['data-video-hmac']
-        headers = self.geo_verification_headers()
-        headers.update({
+    def _generate_api_headers(self, hmac):
+        return merge_dicts({
             'Accept': 'application/vnd.itv.vod.playlist.v2+json',
             'Content-Type': 'application/json',
             'hmac': hmac.upper(),
-        })
-        ios_playlist = self._download_json(
-            ios_playlist_url, video_id, data=json.dumps({
+        }, self.geo_verification_headers())
+
+    def _call_api(self, video_id, playlist_url, headers, platform_tag, featureset, fatal=True):
+        return self._download_json(
+            playlist_url, video_id, data=json.dumps({
                 'user': {
                     'itvUserId': '',
                     'entitlements': [],
@@ -87,15 +104,56 @@ def _real_extract(self, url):
                 },
                 'variantAvailability': {
                     'featureset': {
-                        'min': ['hls', 'aes', 'outband-webvtt'],
-                        'max': ['hls', 'aes', 'outband-webvtt']
+                        'min': featureset,
+                        'max': featureset
                     },
-                    'platformTag': 'dotcom'
+                    'platformTag': platform_tag
                 }
-            }).encode(), headers=headers)
-        video_data = ios_playlist['Playlist']['Video']
-        ios_base_url = video_data.get('Base')
+            }).encode(), headers=headers, fatal=fatal)
+
+    def _get_subtitles(self, video_id, variants, ios_playlist_url, headers, *args, **kwargs):
+        subtitles = {}
+        platform_tag_subs, featureset_subs = next(
+            ((platform_tag, featureset)
+             for platform_tag, featuresets in variants.items() for featureset in featuresets
+             if try_get(featureset, lambda x: x[2]) == 'outband-webvtt'),
+            (None, None))
+        if platform_tag_subs or featureset_subs:
+            subs_playlist = self._call_api(
+                video_id, ios_playlist_url, headers, platform_tag_subs, featureset_subs, fatal=False)
+            subs = try_get(subs_playlist, lambda x: x['Playlist']['Video']['Subtitles'], list) or []
+            for sub in subs:
+                if not isinstance(sub, dict):
+                    continue
+                href = url_or_none(sub.get('Href'))
+                if not href:
+                    continue
+                subtitles.setdefault('en', []).append({'url': href})
+        return subtitles
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+        params = extract_attributes(self._search_regex(
+            r'(?s)(<[^>]+id="video"[^>]*>)', webpage, 'params'))
+        variants = self._parse_json(
+            try_get(params, lambda x: x['data-video-variants'], compat_str) or '{}',
+            video_id, fatal=False)
+        platform_tag_video, featureset_video = next(
+            ((platform_tag, featureset)
+             for platform_tag, featuresets in variants.items() for featureset in featuresets
+             if try_get(featureset, lambda x: x[:2]) == ['hls', 'aes']),
+            (None, None))
+        if not platform_tag_video or not featureset_video:
+            raise ExtractorError('No downloads available', expected=True, video_id=video_id)
 
+        ios_playlist_url = params.get('data-video-playlist') or params['data-video-id']
+        headers = self._generate_api_headers(params['data-video-hmac'])
+        ios_playlist = self._call_api(
+            video_id, ios_playlist_url, headers, platform_tag_video, featureset_video)
+
+        video_data = try_get(ios_playlist, lambda x: x['Playlist']['Video'], dict) or {}
+        ios_base_url = video_data.get('Base')
         formats = []
         for media_file in (video_data.get('MediaFiles') or []):
             href = media_file.get('Href')
@@ -113,20 +171,6 @@ def _real_extract(self, url):
                     'url': href,
                 })
         self._sort_formats(formats)
-
-        subtitles = {}
-        subs = video_data.get('Subtitles') or []
-        for sub in subs:
-            if not isinstance(sub, dict):
-                continue
-            href = url_or_none(sub.get('Href'))
-            if not href:
-                continue
-            subtitles.setdefault('en', []).append({
-                'url': href,
-                'ext': determine_ext(href, 'vtt'),
-            })
-
         info = self._search_json_ld(webpage, video_id, default={})
         if not info:
             json_ld = self._parse_json(self._search_regex(
@@ -140,13 +184,33 @@ def _real_extract(self, url):
                         info = self._json_ld(item, video_id, fatal=False) or {}
                         break
 
+        thumbnails = []
+        thumbnail_url = try_get(params, lambda x: x['data-video-posterframe'], compat_str)
+        if thumbnail_url:
+            thumbnails.extend([{
+                'url': thumbnail_url.format(width=1920, height=1080, quality=100, blur=0, bg='false'),
+                'width': 1920,
+                'height': 1080,
+            }, {
+                'url': urljoin(base_url(thumbnail_url), url_basename(thumbnail_url)),
+                'preference': -2
+            }])
+
+        thumbnail_url = self._html_search_meta(['og:image', 'twitter:image'], webpage, default=None)
+        if thumbnail_url:
+            thumbnails.append({
+                'url': thumbnail_url,
+            })
+        self._remove_duplicate_formats(thumbnails)
+
         return merge_dicts({
             'id': video_id,
             'title': self._html_search_meta(['og:title', 'twitter:title'], webpage),
             'formats': formats,
-            'subtitles': subtitles,
+            'subtitles': self.extract_subtitles(video_id, variants, ios_playlist_url, headers),
             'duration': parse_duration(video_data.get('Duration')),
             'description': clean_html(get_element_by_class('episode-info__synopsis', webpage)),
+            'thumbnails': thumbnails
         }, info)