]> jfr.im git - yt-dlp.git/commitdiff
[extractor/mgtv] Fix formats extraction (#7234)
authorbashonly <redacted>
Mon, 5 Jun 2023 15:52:45 +0000 (10:52 -0500)
committerGitHub <redacted>
Mon, 5 Jun 2023 15:52:45 +0000 (15:52 +0000)
Closes #7008
Authored by: bashonly

yt_dlp/extractor/mgtv.py

index edc92b371fbe43d390f0fd639c20a2736f7ae965..06edcb396a068517801a6d979c5136e2c1f1017a 100644 (file)
@@ -1,17 +1,17 @@
 import base64
 import time
+import urllib.error
 import uuid
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_HTTPError,
-    compat_str,
-)
 from ..utils import (
     ExtractorError,
     int_or_none,
+    parse_resolution,
+    traverse_obj,
     try_get,
     url_or_none,
+    urljoin,
 )
 
 
@@ -30,16 +30,18 @@ class MGTVIE(InfoExtractor):
             'duration': 7461,
             'thumbnail': r're:^https?://.*\.jpg$',
         },
+        'params': {'skip_download': 'm3u8'},
     }, {
         'url': 'https://w.mgtv.com/b/427837/15588271.html',
         'info_dict': {
             'id': '15588271',
             'ext': 'mp4',
-            'title': '春日迟迟再出发 沉浸版',
+            'title': '春日迟迟再出发 沉浸版第1期:陆莹结婚半年查出肾炎被离婚 吴雅婷把一半票根退给前夫',
             'description': 'md5:a7a05a05b1aa87bd50cae619b19bbca6',
             'thumbnail': r're:^https?://.+\.jpg',
             'duration': 4026,
         },
+        'params': {'skip_download': 'm3u8'},
     }, {
         'url': 'https://w.mgtv.com/b/333652/7329822.html',
         'info_dict': {
@@ -50,6 +52,7 @@ class MGTVIE(InfoExtractor):
             'thumbnail': r're:^https?://.+\.jpg',
             'duration': 2656,
         },
+        'params': {'skip_download': 'm3u8'},
     }, {
         'url': 'https://w.mgtv.com/b/427837/15591647.html',
         'only_matching': True,
@@ -64,6 +67,13 @@ class MGTVIE(InfoExtractor):
         'only_matching': True,
     }]
 
+    _RESOLUTIONS = {
+        '标清': ('480p', '854x480'),
+        '高清': ('540p', '960x540'),
+        '超清': ('720p', '1280x720'),
+        '蓝光': ('1080p', '1920x1080'),
+    }
+
     def _real_extract(self, url):
         video_id = self._match_id(url)
         tk2 = base64.urlsafe_b64encode(
@@ -76,55 +86,60 @@ def _real_extract(self, url):
                     'type': 'pch5'
                 }, headers=self.geo_verification_headers())['data']
         except ExtractorError as e:
-            if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
+            if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 401:
                 error = self._parse_json(e.cause.read().decode(), None)
                 if error.get('code') == 40005:
                     self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
                 raise ExtractorError(error['msg'], expected=True)
             raise
-        info = api_data['info']
-        title = info['title'].strip()
+
         stream_data = self._download_json(
             'https://pcweb.api.mgtv.com/player/getSource', video_id, query={
-                'pm2': api_data['atc']['pm2'],
                 'tk2': tk2,
+                'pm2': api_data['atc']['pm2'],
                 'video_id': video_id,
+                'type': 'pch5',
                 'src': 'intelmgtv',
             }, headers=self.geo_verification_headers())['data']
-        stream_domain = stream_data['stream_domain'][0]
+        stream_domain = traverse_obj(stream_data, ('stream_domain', ..., {url_or_none}), get_all=False)
 
         formats = []
-        for idx, stream in enumerate(stream_data['stream']):
-            stream_path = stream.get('url')
-            if not stream_path:
-                continue
-            format_data = self._download_json(
-                stream_domain + stream_path, video_id,
-                note=f'Download video info for format #{idx}')
-            format_url = format_data.get('info')
+        for idx, stream in enumerate(traverse_obj(stream_data, ('stream', lambda _, v: v['url']))):
+            stream_name = traverse_obj(stream, 'name', 'standardName', 'barName', expected_type=str)
+            resolution = traverse_obj(
+                self._RESOLUTIONS, (stream_name, 1 if stream.get('scale') == '16:9' else 0))
+            format_url = traverse_obj(self._download_json(
+                urljoin(stream_domain, stream['url']), video_id, fatal=False,
+                note=f'Downloading video info for format {resolution or stream_name}'),
+                ('info', {url_or_none}))
             if not format_url:
                 continue
             tbr = int_or_none(stream.get('filebitrate') or self._search_regex(
                 r'_(\d+)_mp4/', format_url, 'tbr', default=None))
             formats.append({
-                'format_id': compat_str(tbr or idx),
-                'url': url_or_none(format_url),
+                'format_id': str(tbr or idx),
+                'url': format_url,
                 'ext': 'mp4',
                 'tbr': tbr,
+                'vcodec': stream.get('videoFormat'),
+                'acodec': stream.get('audioFormat'),
+                **parse_resolution(resolution),
                 'protocol': 'm3u8_native',
                 'http_headers': {
                     'Referer': url,
                 },
-                'format_note': stream.get('name'),
+                'format_note': stream_name,
             })
 
         return {
             'id': video_id,
-            'title': title,
             'formats': formats,
-            'description': info.get('desc'),
-            'duration': int_or_none(info.get('duration')),
-            'thumbnail': info.get('thumb'),
+            **traverse_obj(api_data, ('info', {
+                'title': ('title', {str.strip}),
+                'description': ('desc', {str}),
+                'duration': ('duration', {int_or_none}),
+                'thumbnail': ('thumb', {url_or_none}),
+            })),
             'subtitles': self.extract_subtitles(video_id, stream_domain),
         }