]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/tv2.py
[extractor] Standardize `_live_title`
[yt-dlp.git] / yt_dlp / extractor / tv2.py
index f3480de56d22f3fb2f1309303f125586a6d62cef..b48dfe389f601456b260b25959d72a0963be4234 100644 (file)
 
 
 class TV2IE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?tv2\.no/v/(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:www\.)?tv2\.no/v\d*/(?P<id>\d+)'
     _TESTS = [{
         'url': 'http://www.tv2.no/v/916509/',
         'info_dict': {
             'id': '916509',
-            'ext': 'flv',
+            'ext': 'mp4',
             'title': 'Se Frode Gryttens hyllest av Steven Gerrard',
             'description': 'TV 2 Sportens huspoet tar avskjed med Liverpools kaptein Steven Gerrard.',
             'timestamp': 1431715610,
             'upload_date': '20150515',
-            'duration': 156.967,
+            'duration': 157,
             'view_count': int,
             'categories': list,
         },
+    }, {
+        'url': 'http://www.tv2.no/v2/916509',
+        'only_matching': True,
     }]
-    _API_DOMAIN = 'sumo.tv2.no'
-    _PROTOCOLS = ('HDS', 'HLS', 'DASH')
+    _PROTOCOLS = ('HLS', 'DASH')
     _GEO_COUNTRIES = ['NO']
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        api_base = 'http://%s/api/web/asset/%s' % (self._API_DOMAIN, video_id)
-
-        asset = self._download_json(
-            api_base + '.json', video_id,
-            'Downloading metadata JSON')['asset']
-        title = asset.get('subtitle') or asset['title']
+        asset = self._download_json('https://sumo.tv2.no/rest/assets/' + video_id, video_id,
+                                    'Downloading metadata JSON')
+        title = asset['title']
         is_live = asset.get('live') is True
 
         formats = []
         format_urls = []
         for protocol in self._PROTOCOLS:
             try:
-                data = self._download_json(
-                    api_base + '/play.json?protocol=%s&videoFormat=SMIL+ISMUSP' % protocol,
-                    video_id, 'Downloading play JSON')['playback']
+                data = self._download_json('https://api.sumo.tv2.no/play/%s?stream=%s' % (video_id, protocol),
+                                           video_id, 'Downloading playabck JSON',
+                                           headers={'content-type': 'application/json'},
+                                           data='{"device":{"id":"1-1-1","name":"Nettleser (HTML)"}}'.encode())['playback']
             except ExtractorError as e:
                 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
                     error = self._parse_json(e.cause.read().decode(), video_id)['error']
@@ -65,18 +65,12 @@ def _real_extract(self, url):
                         self.raise_login_required()
                     raise ExtractorError(error['description'])
                 raise
-            items = try_get(data, lambda x: x['items']['item'])
-            if not items:
-                continue
-            if not isinstance(items, list):
-                items = [items]
+            items = data.get('streams', [])
             for item in items:
-                if not isinstance(item, dict):
-                    continue
                 video_url = item.get('url')
                 if not video_url or video_url in format_urls:
                     continue
-                format_id = '%s-%s' % (protocol.lower(), item.get('mediaFormat'))
+                format_id = '%s-%s' % (protocol.lower(), item.get('type'))
                 if not self._is_valid_url(video_url, video_id, format_id):
                     continue
                 format_urls.append(video_url)
@@ -99,28 +93,26 @@ def _real_extract(self, url):
                     formats.append({
                         'url': video_url,
                         'format_id': format_id,
-                        'tbr': int_or_none(item.get('bitrate')),
-                        'filesize': int_or_none(item.get('fileSize')),
                     })
         if not formats and data.get('drmProtected'):
-            self.raise_no_formats('This video is DRM protected.', expected=True)
+            self.report_drm(video_id)
         self._sort_formats(formats)
 
         thumbnails = [{
-            'id': thumbnail.get('@type'),
-            'url': thumbnail.get('url'),
-        } for _, thumbnail in (asset.get('imageVersions') or {}).items()]
+            'id': type,
+            'url': thumb_url,
+        } for type, thumb_url in (asset.get('images') or {}).items()]
 
         return {
             'id': video_id,
             'url': video_url,
-            'title': self._live_title(title) if is_live else title,
+            'title': title,
             'description': strip_or_none(asset.get('description')),
             'thumbnails': thumbnails,
-            'timestamp': parse_iso8601(asset.get('createTime')),
+            'timestamp': parse_iso8601(asset.get('live_broadcast_time') or asset.get('update_time')),
             'duration': float_or_none(asset.get('accurateDuration') or asset.get('duration')),
             'view_count': int_or_none(asset.get('views')),
-            'categories': asset.get('keywords', '').split(','),
+            'categories': asset.get('tags', '').split(','),
             'formats': formats,
             'is_live': is_live,
         }
@@ -170,7 +162,7 @@ def _real_extract(self, url):
         return self.playlist_result(entries, playlist_id, title, description)
 
 
-class KatsomoIE(TV2IE):
+class KatsomoIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)?(?:katsomo|mtv(uutiset)?)\.fi/(?:sarja/[0-9a-z-]+-\d+/[0-9a-z-]+-|(?:#!/)?jakso/(?:\d+/[^/]+/)?|video/prog)(?P<id>\d+)'
     _TESTS = [{
         'url': 'https://www.mtv.fi/sarja/mtv-uutiset-live-33001002003/lahden-pelicans-teki-kovan-ratkaisun-ville-nieminen-pihalle-1181321',
@@ -203,6 +195,93 @@ class KatsomoIE(TV2IE):
     _PROTOCOLS = ('HLS', 'MPD')
     _GEO_COUNTRIES = ['FI']
 
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        api_base = 'http://%s/api/web/asset/%s' % (self._API_DOMAIN, video_id)
+
+        asset = self._download_json(
+            api_base + '.json', video_id,
+            'Downloading metadata JSON')['asset']
+        title = asset.get('subtitle') or asset['title']
+        is_live = asset.get('live') is True
+
+        formats = []
+        format_urls = []
+        for protocol in self._PROTOCOLS:
+            try:
+                data = self._download_json(
+                    api_base + '/play.json?protocol=%s&videoFormat=SMIL+ISMUSP' % protocol,
+                    video_id, 'Downloading play JSON')['playback']
+            except ExtractorError as e:
+                if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
+                    error = self._parse_json(e.cause.read().decode(), video_id)['error']
+                    error_code = error.get('code')
+                    if error_code == 'ASSET_PLAYBACK_INVALID_GEO_LOCATION':
+                        self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
+                    elif error_code == 'SESSION_NOT_AUTHENTICATED':
+                        self.raise_login_required()
+                    raise ExtractorError(error['description'])
+                raise
+            items = try_get(data, lambda x: x['items']['item'])
+            if not items:
+                continue
+            if not isinstance(items, list):
+                items = [items]
+            for item in items:
+                if not isinstance(item, dict):
+                    continue
+                video_url = item.get('url')
+                if not video_url or video_url in format_urls:
+                    continue
+                format_id = '%s-%s' % (protocol.lower(), item.get('mediaFormat'))
+                if not self._is_valid_url(video_url, video_id, format_id):
+                    continue
+                format_urls.append(video_url)
+                ext = determine_ext(video_url)
+                if ext == 'f4m':
+                    formats.extend(self._extract_f4m_formats(
+                        video_url, video_id, f4m_id=format_id, fatal=False))
+                elif ext == 'm3u8':
+                    if not data.get('drmProtected'):
+                        formats.extend(self._extract_m3u8_formats(
+                            video_url, video_id, 'mp4',
+                            'm3u8' if is_live else 'm3u8_native',
+                            m3u8_id=format_id, fatal=False))
+                elif ext == 'mpd':
+                    formats.extend(self._extract_mpd_formats(
+                        video_url, video_id, format_id, fatal=False))
+                elif ext == 'ism' or video_url.endswith('.ism/Manifest'):
+                    pass
+                else:
+                    formats.append({
+                        'url': video_url,
+                        'format_id': format_id,
+                        'tbr': int_or_none(item.get('bitrate')),
+                        'filesize': int_or_none(item.get('fileSize')),
+                    })
+        if not formats and data.get('drmProtected'):
+            self.report_drm(video_id)
+        self._sort_formats(formats)
+
+        thumbnails = [{
+            'id': thumbnail.get('@type'),
+            'url': thumbnail.get('url'),
+        } for _, thumbnail in (asset.get('imageVersions') or {}).items()]
+
+        return {
+            'id': video_id,
+            'url': video_url,
+            'title': title,
+            'description': strip_or_none(asset.get('description')),
+            'thumbnails': thumbnails,
+            'timestamp': parse_iso8601(asset.get('createTime')),
+            'duration': float_or_none(asset.get('accurateDuration') or asset.get('duration')),
+            'view_count': int_or_none(asset.get('views')),
+            'categories': asset.get('keywords', '').split(','),
+            'formats': formats,
+            'is_live': is_live,
+        }
+
 
 class MTVUutisetArticleIE(InfoExtractor):
     _VALID_URL = r'https?://(?:www\.)mtvuutiset\.fi/artikkeli/[^/]+/(?P<id>\d+)'