]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/facebook.py
[ie/mlbtv] Fix extraction (#10296)
[yt-dlp.git] / yt_dlp / extractor / facebook.py
index 830bbcc3c0d21876360c59f64fd2e9c75b6db629..a3ca291fcafeb0053fd0ecf2706f0cadc02e6380 100644 (file)
@@ -3,18 +3,13 @@
 import urllib.parse
 
 from .common import InfoExtractor
-from ..compat import (
-    compat_etree_fromstring,
-    compat_str,
-    compat_urllib_parse_unquote,
-)
+from ..compat import compat_etree_fromstring
 from ..networking import Request
 from ..networking.exceptions import network_exceptions
 from ..utils import (
     ExtractorError,
     clean_html,
     determine_ext,
-    error_to_compat_str,
     float_or_none,
     format_field,
     get_element_by_id,
@@ -209,7 +204,7 @@ class FacebookIE(InfoExtractor):
             'skip_download': True,
         },
     }, {
-        # FIXME
+        # FIXME: Cannot parse data error
         'url': 'https://www.facebook.com/LaGuiaDelVaron/posts/1072691702860471',
         'info_dict': {
             'id': '1072691702860471',
@@ -415,7 +410,7 @@ class FacebookIE(InfoExtractor):
     }]
     _SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)'
     _api_config = {
-        'graphURI': '/api/graphql/'
+        'graphURI': '/api/graphql/',
     }
 
     def _perform_login(self, username, password):
@@ -450,7 +445,7 @@ def _perform_login(self, username, password):
                     r'(?s)<div[^>]+class=(["\']).*?login_error_box.*?\1[^>]*><div[^>]*>.*?</div><div[^>]*>(?P<error>.+?)</div>',
                     login_results, 'login error', default=None, group='error')
                 if error:
-                    raise ExtractorError('Unable to login: %s' % error, expected=True)
+                    raise ExtractorError(f'Unable to login: {error}', expected=True)
                 self.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.')
                 return
 
@@ -474,7 +469,7 @@ def _perform_login(self, username, password):
             if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
                 self.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.')
         except network_exceptions as err:
-            self.report_warning('unable to log in: %s' % error_to_compat_str(err))
+            self.report_warning(f'unable to log in: {err}')
             return
 
     def _extract_from_url(self, url, video_id):
@@ -493,13 +488,14 @@ def extract_metadata(webpage):
             page_title = title or self._html_search_regex((
                 r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>(?P<content>[^<]*)</h2>',
                 r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(?P<content>.*?)</span>',
-                self._meta_regex('og:title'), self._meta_regex('twitter:title'), r'<title>(?P<content>.+?)</title>'
+                self._meta_regex('og:title'), self._meta_regex('twitter:title'), r'<title>(?P<content>.+?)</title>',
             ), webpage, 'title', default=None, group='content')
             description = description or self._html_search_meta(
                 ['description', 'og:description', 'twitter:description'],
                 webpage, 'description', default=None)
             uploader_data = (
                 get_first(media, ('owner', {dict}))
+                or get_first(post, ('video', 'creation_story', 'attachments', ..., 'media', lambda k, v: k == 'owner' and v['name']))
                 or get_first(post, (..., 'video', lambda k, v: k == 'owner' and v['name']))
                 or get_first(post, ('node', 'actors', ..., {dict}))
                 or get_first(post, ('event', 'event_creator', {dict})) or {})
@@ -524,7 +520,7 @@ def extract_metadata(webpage):
                 'timestamp': timestamp,
                 'thumbnail': thumbnail,
                 'view_count': parse_count(self._search_regex(
-                    (r'\bviewCount\s*:\s*["\']([\d,.]+)', r'video_view_count["\']\s*:\s*(\d+)',),
+                    (r'\bviewCount\s*:\s*["\']([\d,.]+)', r'video_view_count["\']\s*:\s*(\d+)'),
                     webpage, 'view count', default=None)),
                 'concurrent_view_count': get_first(post, (
                     ('video', (..., ..., 'attachments', ..., 'media')), 'liveViewerCount', {int_or_none})),
@@ -559,7 +555,7 @@ def extract_from_jsmods_instances(js_data):
                     js_data, lambda x: x['jsmods']['instances'], list) or [])
 
         def extract_dash_manifest(video, formats):
-            dash_manifest = video.get('dash_manifest')
+            dash_manifest = traverse_obj(video, 'dash_manifest', 'playlist', expected_type=str)
             if dash_manifest:
                 formats.extend(self._parse_mpd_formats(
                     compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
@@ -577,19 +573,19 @@ def process_formats(info):
 
         def extract_relay_data(_filter):
             return self._parse_json(self._search_regex(
-                r'data-sjs>({.*?%s.*?})</script>' % _filter,
+                rf'data-sjs>({{.*?{_filter}.*?}})</script>',
                 webpage, 'replay data', default='{}'), video_id, fatal=False) or {}
 
         def extract_relay_prefetched_data(_filter):
             return traverse_obj(extract_relay_data(_filter), (
                 'require', (None, (..., ..., ..., '__bbox', 'require')),
-                lambda _, v: 'RelayPrefetchedStreamCache' in v, ..., ...,
-                '__bbox', 'result', 'data', {dict}), get_all=False) or {}
+                lambda _, v: any(key.startswith('RelayPrefetchedStreamCache') for key in v),
+                ..., ..., '__bbox', 'result', 'data', {dict}), get_all=False) or {}
 
         if not video_data:
             server_js_data = self._parse_json(self._search_regex([
                 r'bigPipe\.onPageletArrive\(({.+?})\)\s*;\s*}\s*\)\s*,\s*["\']onPageletArrive\s+' + self._SUPPORTED_PAGLETS_REGEX,
-                r'bigPipe\.onPageletArrive\(({.*?id\s*:\s*"%s".*?})\);' % self._SUPPORTED_PAGLETS_REGEX
+                rf'bigPipe\.onPageletArrive\(({{.*?id\s*:\s*"{self._SUPPORTED_PAGLETS_REGEX}".*?}})\);',
             ], webpage, 'js data', default='{}'), video_id, js_to_json, False)
             video_data = extract_from_jsmods_instances(server_js_data)
 
@@ -625,13 +621,16 @@ def parse_graphql_video(video):
                                 'url': playable_url,
                             })
                     extract_dash_manifest(video, formats)
+                    if not formats:
+                        # Do not append false positive entry w/o any formats
+                        return
 
                     automatic_captions, subtitles = {}, {}
                     is_broadcast = traverse_obj(video, ('is_video_broadcast', {bool}))
                     for caption in traverse_obj(video, (
                         'video_available_captions_locales',
                         {lambda x: sorted(x, key=lambda c: c['locale'])},
-                        lambda _, v: url_or_none(v['captions_url'])
+                        lambda _, v: url_or_none(v['captions_url']),
                     )):
                         lang = caption.get('localized_language') or 'und'
                         subs = {
@@ -669,7 +668,7 @@ def parse_graphql_video(video):
                             'description': description,
                         })
                     else:
-                        info['title'] = description or 'Facebook video #%s' % v_id
+                        info['title'] = description or f'Facebook video #{v_id}'
                     entries.append(info)
 
                 def parse_attachment(attachment, key='media'):
@@ -698,7 +697,7 @@ def parse_attachment(attachment, key='media'):
                 if video:
                     attachments = try_get(video, [
                         lambda x: x['story']['attachments'],
-                        lambda x: x['creation_story']['attachments']
+                        lambda x: x['creation_story']['attachments'],
                     ], list) or []
                     for attachment in attachments:
                         parse_attachment(attachment)
@@ -722,7 +721,7 @@ def parse_attachment(attachment, key='media'):
             m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
             if m_msg is not None:
                 raise ExtractorError(
-                    'The video is not available, Facebook said: "%s"' % m_msg.group(1),
+                    f'The video is not available, Facebook said: "{m_msg.group(1)}"',
                     expected=True)
             elif any(p in webpage for p in (
                     '>You must log in to continue',
@@ -759,7 +758,7 @@ def parse_attachment(attachment, key='media'):
                 v_id = video.get('id')
                 if not v_id:
                     continue
-                v_id = compat_str(v_id)
+                v_id = str(v_id)
                 entries.append(self.url_result(
                     self._VIDEO_PAGE_TEMPLATE % v_id,
                     self.ie_key(), v_id, video.get('name')))
@@ -817,7 +816,7 @@ def parse_attachment(attachment, key='media'):
                 continue
             for quality in ('sd', 'hd'):
                 for src_type in ('src', 'src_no_ratelimit'):
-                    src = f[0].get('%s_%s' % (quality, src_type))
+                    src = f[0].get(f'{quality}_{src_type}')
                     if src:
                         # sd, hd formats w/o resolution info should be deprioritized below DASH
                         # TODO: investigate if progressive or src formats still exist
@@ -825,10 +824,10 @@ def parse_attachment(attachment, key='media'):
                         if quality == 'hd':
                             preference += 1
                         formats.append({
-                            'format_id': '%s_%s_%s' % (format_id, quality, src_type),
+                            'format_id': f'{format_id}_{quality}_{src_type}',
                             'url': src,
                             'quality': preference,
-                            'height': 720 if quality == 'hd' else None
+                            'height': 720 if quality == 'hd' else None,
                         })
             extract_dash_manifest(f[0], formats)
             subtitles_src = f[0].get('subtitles_src')
@@ -878,7 +877,7 @@ class FacebookPluginsVideoIE(InfoExtractor):
 
     def _real_extract(self, url):
         return self.url_result(
-            compat_urllib_parse_unquote(self._match_id(url)),
+            urllib.parse.unquote(self._match_id(url)),
             FacebookIE.ie_key())
 
 
@@ -939,7 +938,7 @@ class FacebookReelIE(InfoExtractor):
             'timestamp': 1637502609,
             'upload_date': '20211121',
             'thumbnail': r're:^https?://.*',
-        }
+        },
     }]
 
     def _real_extract(self, url):
@@ -965,7 +964,7 @@ class FacebookAdsIE(InfoExtractor):
             'thumbnail': r're:^https?://.*',
             'upload_date': '20231214',
             'like_count': int,
-        }
+        },
     }, {
         'url': 'https://www.facebook.com/ads/library/?id=893637265423481',
         'info_dict': {
@@ -997,7 +996,7 @@ class FacebookAdsIE(InfoExtractor):
     def _extract_formats(self, video_dict):
         formats = []
         for format_key, format_url in traverse_obj(video_dict, (
-            {dict.items}, lambda _, v: v[0] in self._FORMATS_MAP and url_or_none(v[1])
+            {dict.items}, lambda _, v: v[0] in self._FORMATS_MAP and url_or_none(v[1]),
         )):
             formats.append({
                 'format_id': self._FORMATS_MAP[format_key][0],
@@ -1034,7 +1033,7 @@ def _real_extract(self, url):
 
         entries = []
         for idx, entry in enumerate(traverse_obj(
-            data, (('videos', 'cards'), lambda _, v: any([url_or_none(v[f]) for f in self._FORMATS_MAP]))), 1
+            data, (('videos', 'cards'), lambda _, v: any(url_or_none(v[f]) for f in self._FORMATS_MAP))), 1,
         ):
             entries.append({
                 'id': f'{video_id}_{idx}',