]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/bbc.py
[utils] Better traceback for `ExtractorError`
[yt-dlp.git] / yt_dlp / extractor / bbc.py
index 09b2932d20ef53d799b4775c4644d407b63501d3..b664a70075ee7b818e3a1d7854f8187bc76e9292 100644 (file)
@@ -10,9 +10,8 @@
 from ..compat import (
     compat_etree_Element,
     compat_HTTPError,
-    compat_parse_qs,
     compat_str,
-    compat_urllib_parse_urlparse,
+    compat_urllib_error,
     compat_urlparse,
 )
 from ..utils import (
@@ -26,6 +25,7 @@
     js_to_json,
     parse_duration,
     parse_iso8601,
+    parse_qs,
     strip_or_none,
     try_get,
     unescapeHTML,
@@ -39,7 +39,7 @@
 class BBCCoUkIE(InfoExtractor):
     IE_NAME = 'bbc.co.uk'
     IE_DESC = 'BBC iPlayer'
-    _ID_REGEX = r'(?:[pbm][\da-z]{7}|w[\da-z]{7,14})'
+    _ID_REGEX = r'(?:[pbml][\da-z]{7}|w[\da-z]{7,14})'
     _VALID_URL = r'''(?x)
                     https?://
                         (?:www\.)?bbc\.co\.uk/
@@ -395,9 +395,17 @@ def _process_media_selector(self, media_selection, programme_id):
                         formats.extend(self._extract_mpd_formats(
                             href, programme_id, mpd_id=format_id, fatal=False))
                     elif transfer_format == 'hls':
-                        formats.extend(self._extract_m3u8_formats(
-                            href, programme_id, ext='mp4', entry_protocol='m3u8_native',
-                            m3u8_id=format_id, fatal=False))
+                        # TODO: let expected_status be passed into _extract_xxx_formats() instead
+                        try:
+                            fmts = self._extract_m3u8_formats(
+                                href, programme_id, ext='mp4', entry_protocol='m3u8_native',
+                                m3u8_id=format_id, fatal=False)
+                        except ExtractorError as e:
+                            if not (isinstance(e.exc_info[1], compat_urllib_error.HTTPError)
+                                    and e.exc_info[1].code in (403, 404)):
+                                raise
+                            fmts = []
+                        formats.extend(fmts)
                     elif transfer_format == 'hds':
                         formats.extend(self._extract_f4m_formats(
                             href, programme_id, f4m_id=format_id, fatal=False))
@@ -452,9 +460,10 @@ def _download_playlist(self, playlist_id):
             playlist = self._download_json(
                 'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
                 playlist_id, 'Downloading playlist JSON')
+            formats = []
+            subtitles = {}
 
-            version = playlist.get('defaultAvailableVersion')
-            if version:
+            for version in playlist.get('allAvailableVersions', []):
                 smp_config = version['smpConfig']
                 title = smp_config['title']
                 description = smp_config['summary']
@@ -464,8 +473,17 @@ def _download_playlist(self, playlist_id):
                         continue
                     programme_id = item.get('vpid')
                     duration = int_or_none(item.get('duration'))
-                    formats, subtitles = self._download_media_selector(programme_id)
-                return programme_id, title, description, duration, formats, subtitles
+                    version_formats, version_subtitles = self._download_media_selector(programme_id)
+                    types = version['types']
+                    for f in version_formats:
+                        f['format_note'] = ', '.join(types)
+                        if any('AudioDescribed' in x for x in types):
+                            f['language_preference'] = -10
+                    formats += version_formats
+                    for tag, subformats in (version_subtitles or {}).items():
+                        subtitles.setdefault(tag, []).extend(subformats)
+
+            return programme_id, title, description, duration, formats, subtitles
         except ExtractorError as ee:
             if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
                 raise
@@ -589,8 +607,8 @@ class BBCIE(BBCCoUkIE):
     _VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)'
 
     _MEDIA_SETS = [
-        'mobile-tablet-main',
         'pc',
+        'mobile-tablet-main',
     ]
 
     _TESTS = [{
@@ -775,21 +793,33 @@ class BBCIE(BBCCoUkIE):
             'timestamp': 1437785037,
             'upload_date': '20150725',
         },
+    }, {
+        # video with window.__INITIAL_DATA__ and value as JSON string
+        'url': 'https://www.bbc.com/news/av/world-europe-59468682',
+        'info_dict': {
+            'id': 'p0b71qth',
+            'ext': 'mp4',
+            'title': 'Why France is making this woman a national hero',
+            'description': 'md5:7affdfab80e9c3a1f976230a1ff4d5e4',
+            'thumbnail': r're:https?://.+/.+\.jpg',
+            'timestamp': 1638230731,
+            'upload_date': '20211130',
+        },
     }, {
         # single video article embedded with data-media-vpid
         'url': 'http://www.bbc.co.uk/sport/rowing/35908187',
         'only_matching': True,
     }, {
+        # bbcthreeConfig
         'url': 'https://www.bbc.co.uk/bbcthree/clip/73d0bbd0-abc3-4cea-b3c0-cdae21905eb1',
         'info_dict': {
             'id': 'p06556y7',
             'ext': 'mp4',
-            'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
-            'description': 'md5:4b7dfd063d5a789a1512e99662be3ddd',
+            'title': 'Things Not To Say to people that live on council estates',
+            'description': "From being labelled a 'chav', to the presumption that they're 'scroungers', people who live on council estates encounter all kinds of prejudices and false assumptions about themselves, their families, and their lifestyles. Here, eight people discuss the common statements, misconceptions, and clichés that they're tired of hearing.",
+            'duration': 360,
+            'thumbnail': r're:https?://.+/.+\.jpg',
         },
-        'params': {
-            'skip_download': True,
-        }
     }, {
         # window.__PRELOADED_STATE__
         'url': 'https://www.bbc.co.uk/radio/play/b0b9z4yl',
@@ -1162,9 +1192,16 @@ def _real_extract(self, url):
                 return self.playlist_result(
                     entries, playlist_id, playlist_title, playlist_description)
 
-        initial_data = self._parse_json(self._search_regex(
-            r'window\.__INITIAL_DATA__\s*=\s*({.+?});', webpage,
-            'preload state', default='{}'), playlist_id, fatal=False)
+        initial_data = self._search_regex(
+            r'window\.__INITIAL_DATA__\s*=\s*("{.+?}")\s*;', webpage,
+            'quoted preload state', default=None)
+        if initial_data is None:
+            initial_data = self._search_regex(
+                r'window\.__INITIAL_DATA__\s*=\s*({.+?})\s*;', webpage,
+                'preload state', default={})
+        else:
+            initial_data = self._parse_json(initial_data or '"{}"', playlist_id, fatal=False)
+        initial_data = self._parse_json(initial_data, playlist_id, fatal=False)
         if initial_data:
             def parse_media(media):
                 if not media:
@@ -1205,7 +1242,10 @@ def parse_media(media):
                 if name == 'media-experience':
                     parse_media(try_get(resp, lambda x: x['data']['initialItem']['mediaItem'], dict))
                 elif name == 'article':
-                    for block in (try_get(resp, lambda x: x['data']['blocks'], list) or []):
+                    for block in (try_get(resp,
+                                          (lambda x: x['data']['blocks'],
+                                           lambda x: x['data']['content']['model']['blocks'],),
+                                          list) or []):
                         if block.get('type') != 'media':
                             continue
                         parse_media(block.get('model'))
@@ -1410,7 +1450,7 @@ def _fetch_page(self, programme_id, per_page, series_id, page):
 
     def _real_extract(self, url):
         pid = self._match_id(url)
-        qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        qs = parse_qs(url)
         series_id = qs.get('seriesId', [None])[0]
         page = qs.get('page', [None])[0]
         per_page = 36 if page else self._PAGE_SIZE