]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/imdb.py
[cleanup] Add more ruff rules (#10149)
[yt-dlp.git] / yt_dlp / extractor / imdb.py
index 24f1fde6403a76205d5406dc64a0311754d187f7..a786ce32e8b4f39886f6c337666719da9ce500fe 100644 (file)
@@ -1,5 +1,3 @@
-from __future__ import unicode_literals
-
 import base64
 import json
 import re
@@ -7,9 +5,10 @@
 from .common import InfoExtractor
 from ..utils import (
     determine_ext,
+    int_or_none,
     mimetype2ext,
-    parse_duration,
     qualities,
+    traverse_obj,
     try_get,
     url_or_none,
 )
@@ -28,7 +27,18 @@ class ImdbIE(InfoExtractor):
             'title': 'No. 2',
             'description': 'md5:87bd0bdc61e351f21f20d2d7441cb4e7',
             'duration': 152,
-        }
+            'thumbnail': r're:^https?://.+\.jpg',
+        },
+    }, {
+        'url': 'https://www.imdb.com/video/vi3516832537',
+        'info_dict': {
+            'id': '3516832537',
+            'ext': 'mp4',
+            'title': 'Paul: U.S. Trailer #1',
+            'description': 'md5:17fcc4fe11ec29b4399be9d4c5ef126c',
+            'duration': 153,
+            'thumbnail': r're:^https?://.+\.jpg',
+        },
     }, {
         'url': 'http://www.imdb.com/video/_/vi2524815897',
         'only_matching': True,
@@ -51,20 +61,24 @@ class ImdbIE(InfoExtractor):
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-
-        data = self._download_json(
+        webpage = self._download_webpage(f'https://www.imdb.com/video/vi{video_id}', video_id)
+        info = self._search_nextjs_data(webpage, video_id)
+        video_info = traverse_obj(info, ('props', 'pageProps', 'videoPlaybackData', 'video'), default={})
+        title = (traverse_obj(video_info, ('name', 'value'), ('primaryTitle', 'titleText', 'text'))
+                 or self._html_search_meta(('og:title', 'twitter:title'), webpage, default=None)
+                 or self._html_extract_title(webpage))
+        data = video_info.get('playbackURLs') or try_get(self._download_json(
             'https://www.imdb.com/ve/data/VIDEO_PLAYBACK_DATA', video_id,
             query={
                 'key': base64.b64encode(json.dumps({
                     'type': 'VIDEO_PLAYER',
                     'subType': 'FORCE_LEGACY',
-                    'id': 'vi%s' % video_id,
+                    'id': f'vi{video_id}',
                 }).encode()).decode(),
-            })[0]
-
+            }), lambda x: x[0]['videoLegacyEncodings'])
         quality = qualities(('SD', '480p', '720p', '1080p'))
-        formats = []
-        for encoding in data['videoLegacyEncodings']:
+        formats, subtitles = [], {}
+        for encoding in data:
             if not encoding or not isinstance(encoding, dict):
                 continue
             video_url = url_or_none(encoding.get('url'))
@@ -73,46 +87,29 @@ def _real_extract(self, url):
             ext = mimetype2ext(encoding.get(
                 'mimeType')) or determine_ext(video_url)
             if ext == 'm3u8':
-                formats.extend(self._extract_m3u8_formats(
+                fmts, subs = self._extract_m3u8_formats_and_subtitles(
                     video_url, video_id, 'mp4', entry_protocol='m3u8_native',
-                    preference=1, m3u8_id='hls', fatal=False))
+                    preference=1, m3u8_id='hls', fatal=False)
+                subtitles = self._merge_subtitles(subtitles, subs)
+                formats.extend(fmts)
                 continue
-            format_id = encoding.get('definition')
+            format_id = traverse_obj(encoding, ('displayName', 'value'), 'definition')
             formats.append({
                 'format_id': format_id,
                 'url': video_url,
                 'ext': ext,
                 'quality': quality(format_id),
             })
-        self._sort_formats(formats)
-
-        webpage = self._download_webpage(
-            'https://www.imdb.com/video/vi' + video_id, video_id)
-        video_metadata = self._parse_json(self._search_regex(
-            r'args\.push\(\s*({.+?})\s*\)\s*;', webpage,
-            'video metadata'), video_id)
-
-        video_info = video_metadata.get('VIDEO_INFO')
-        if video_info and isinstance(video_info, dict):
-            info = try_get(
-                video_info, lambda x: x[list(video_info.keys())[0]][0], dict)
-        else:
-            info = {}
-
-        title = self._html_search_meta(
-            ['og:title', 'twitter:title'], webpage) or self._html_search_regex(
-            r'<title>(.+?)</title>', webpage, 'title',
-            default=None) or info['videoTitle']
 
         return {
             'id': video_id,
             'title': title,
             'alt_title': info.get('videoSubTitle'),
             'formats': formats,
-            'description': info.get('videoDescription'),
-            'thumbnail': url_or_none(try_get(
-                info, lambda x: x['videoSlate']['source'])),
-            'duration': parse_duration(info.get('videoRuntime')),
+            'description': try_get(video_info, lambda x: x['description']['value']),
+            'thumbnail': url_or_none(try_get(video_info, lambda x: x['thumbnail']['url'])),
+            'duration': int_or_none(try_get(video_info, lambda x: x['runtime']['value'])),
+            'subtitles': subtitles,
         }
 
 
@@ -135,7 +132,7 @@ def _real_extract(self, url):
         webpage = self._download_webpage(url, list_id)
         entries = [
             self.url_result('http://www.imdb.com' + m, 'Imdb')
-            for m in re.findall(r'href="(/list/ls%s/videoplayer/vi[^"]+)"' % list_id, webpage)]
+            for m in re.findall(rf'href="(/list/ls{list_id}/videoplayer/vi[^"]+)"', webpage)]
 
         list_title = self._html_search_regex(
             r'<h1[^>]+class="[^"]*header[^"]*"[^>]*>(.*?)</h1>',