]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/facebook.py
[ie/orf:on] Improve extraction (#9677)
[yt-dlp.git] / yt_dlp / extractor / facebook.py
index a16a067abbf659bb4147723890a7cbaf6302cf8e..b76407a5c7eca91836a28546b3fc7b4b288eb072 100644 (file)
@@ -20,6 +20,7 @@
     get_element_by_id,
     get_first,
     int_or_none,
+    join_nonempty,
     js_to_json,
     merge_dicts,
     parse_count,
@@ -43,6 +44,7 @@ class FacebookIE(InfoExtractor):
                         (?:[^#]*?\#!/)?
                         (?:
                             (?:
+                                permalink\.php|
                                 video/video\.php|
                                 photo\.php|
                                 video\.php|
@@ -52,6 +54,7 @@ class FacebookIE(InfoExtractor):
                             )\?(?:.*?)(?:v|video_id|story_fbid)=|
                             [^/]+/videos/(?:[^/]+/)?|
                             [^/]+/posts/|
+                            events/(?:[^/]+/)?|
                             groups/[^/]+/(?:permalink|posts)/|
                             watchparty/
                         )|
@@ -248,6 +251,7 @@ class FacebookIE(InfoExtractor):
             'duration': 148.435,
         },
     }, {
+        # data.node.comet_sections.content.story.attachments[].styles.attachment.media
         'url': 'https://www.facebook.com/attn/posts/pfbid0j1Czf2gGDVqeQ8KiMLFm3pWN8GxsQmeRrVhimWDzMuKQoR8r4b1knNsejELmUgyhl',
         'info_dict': {
             'id': '6968553779868435',
@@ -262,6 +266,22 @@ class FacebookIE(InfoExtractor):
             'thumbnail': r're:^https?://.*',
             'timestamp': 1701975646,
         },
+    }, {
+        # data.node.comet_sections.content.story.attachments[].styles.attachment.media
+        'url': 'https://www.facebook.com/permalink.php?story_fbid=pfbid0fqQuVEQyXRa9Dp4RcaTR14KHU3uULHV1EK7eckNXSH63JMuoALsAvVCJ97zAGitil&id=100068861234290',
+        'info_dict': {
+            'id': '270103405756416',
+            'ext': 'mp4',
+            'title': 'Lela Evans',
+            'description': 'Today Makkovik\'s own Pilot Mandy Smith made her inaugural landing on the airstrip in her hometown. What a proud moment as we all cheered and...',
+            'thumbnail': r're:^https?://.*',
+            'uploader': 'Lela Evans',
+            'uploader_id': 'pfbid0shZJipuigyy5mqrUJn9ub5LJFWNHvan5prtyi3LrDuuuJ4NwrURgnQHYR9fywBepl',
+            'upload_date': '20231228',
+            'timestamp': 1703804085,
+            'duration': 394.347,
+            'view_count': int,
+        },
     }, {
         'url': 'https://www.facebook.com/story.php?story_fbid=pfbid0Fnzhm8UuzjBYpPMNFzaSpFE9UmLdU4fJN8qTANi1Dmtj5q7DNrL5NERXfsAzDEV7l&id=100073071055552',
         'only_matching': True,
@@ -380,6 +400,18 @@ class FacebookIE(InfoExtractor):
         },
         'playlist_count': 1,
         'skip': 'Requires logging in',
+    }, {
+        # data.event.cover_media_renderer.cover_video
+        'url': 'https://m.facebook.com/events/1509582499515440',
+        'info_dict': {
+            'id': '637246984455045',
+            'ext': 'mp4',
+            'title': 'ANALISI IN CAMPO OSCURO " Coaguli nel sangue dei vaccinati"',
+            'description': 'Other event by Comitato Liberi Pensatori on Tuesday, October 18 2022',
+            'thumbnail': r're:^https?://.*',
+            'uploader': 'Comitato Liberi Pensatori',
+            'uploader_id': '100065709540881',
+        },
     }]
     _SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)'
     _api_config = {
@@ -454,38 +486,10 @@ def extract_metadata(webpage):
                 r'data-sjs>({.*?ScheduledServerJS.*?})</script>', webpage)]
             post = traverse_obj(post_data, (
                 ..., 'require', ..., ..., ..., '__bbox', 'require', ..., ..., ..., '__bbox', 'result', 'data'), expected_type=dict) or []
-
-            automatic_captions, subtitles = {}, {}
-            subs_data = traverse_obj(post, (..., 'video', ..., 'attachments', ..., lambda k, v: (
-                k == 'media' and str(v['id']) == video_id and v['__typename'] == 'Video')))
-            is_video_broadcast = get_first(subs_data, 'is_video_broadcast', expected_type=bool)
-            captions = get_first(subs_data, 'video_available_captions_locales', 'captions_url')
-            if url_or_none(captions):  # if subs_data only had a 'captions_url'
-                locale = self._html_search_meta(['og:locale', 'twitter:locale'], webpage, 'locale', default='en_US')
-                subtitles[locale] = [{'url': captions}]
-            # or else subs_data had 'video_available_captions_locales', a list of dicts
-            for caption in traverse_obj(captions, (
-                {lambda x: sorted(x, key=lambda c: c['locale'])}, lambda _, v: v['captions_url'])
-            ):
-                lang = caption.get('localized_language') or ''
-                subs = {
-                    'url': caption['captions_url'],
-                    'name': format_field(caption, 'localized_country', f'{lang} (%s)', default=lang),
-                }
-                if caption.get('localized_creation_method') or is_video_broadcast:
-                    automatic_captions.setdefault(caption['locale'], []).append(subs)
-                else:
-                    subtitles.setdefault(caption['locale'], []).append(subs)
-
             media = traverse_obj(post, (..., 'attachments', ..., lambda k, v: (
                 k == 'media' and str(v['id']) == video_id and v['__typename'] == 'Video')), expected_type=dict)
             title = get_first(media, ('title', 'text'))
             description = get_first(media, ('creation_story', 'comet_sections', 'message', 'story', 'message', 'text'))
-            uploader_data = (
-                get_first(media, ('owner', {dict}))
-                or get_first(post, (..., 'video', lambda k, v: k == 'owner' and v['name']))
-                or get_first(post, ('node', 'actors', ..., {dict})) or {})
-
             page_title = title or self._html_search_regex((
                 r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>(?P<content>[^<]*)</h2>',
                 r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(?P<content>.*?)</span>',
@@ -494,11 +498,16 @@ def extract_metadata(webpage):
             description = description or self._html_search_meta(
                 ['description', 'og:description', 'twitter:description'],
                 webpage, 'description', default=None)
+            uploader_data = (
+                get_first(media, ('owner', {dict}))
+                or get_first(post, ('video', 'creation_story', 'attachments', ..., 'media', lambda k, v: k == 'owner' and v['name']))
+                or get_first(post, (..., 'video', lambda k, v: k == 'owner' and v['name']))
+                or get_first(post, ('node', 'actors', ..., {dict}))
+                or get_first(post, ('event', 'event_creator', {dict})) or {})
             uploader = uploader_data.get('name') or (
                 clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
                 or self._search_regex(
                     (r'ownerName\s*:\s*"([^"]+)"', *self._og_regexes('title')), webpage, 'uploader', fatal=False))
-
             timestamp = int_or_none(self._search_regex(
                 r'<abbr[^>]+data-utime=["\'](\d+)', webpage,
                 'timestamp', default=None))
@@ -520,8 +529,6 @@ def extract_metadata(webpage):
                     webpage, 'view count', default=None)),
                 'concurrent_view_count': get_first(post, (
                     ('video', (..., ..., 'attachments', ..., 'media')), 'liveViewerCount', {int_or_none})),
-                'automatic_captions': automatic_captions,
-                'subtitles': subtitles,
             }
 
             info_json_ld = self._search_json_ld(webpage, video_id, default={})
@@ -553,7 +560,7 @@ def extract_from_jsmods_instances(js_data):
                     js_data, lambda x: x['jsmods']['instances'], list) or [])
 
         def extract_dash_manifest(video, formats):
-            dash_manifest = video.get('dash_manifest')
+            dash_manifest = traverse_obj(video, 'dash_manifest', 'playlist', expected_type=str)
             if dash_manifest:
                 formats.extend(self._parse_mpd_formats(
                     compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
@@ -563,7 +570,11 @@ def process_formats(info):
             # Downloads with browser's User-Agent are rate limited. Working around
             # with non-browser User-Agent.
             for f in info['formats']:
+                # Downloads with browser's User-Agent are rate limited. Working around
+                # with non-browser User-Agent.
                 f.setdefault('http_headers', {})['User-Agent'] = 'facebookexternalhit/1.1'
+                # Formats larger than ~500MB will return error 403 unless chunk size is regulated
+                f.setdefault('downloader_options', {})['http_chunk_size'] = 250 << 20
 
         def extract_relay_data(_filter):
             return self._parse_json(self._search_regex(
@@ -573,8 +584,8 @@ def extract_relay_data(_filter):
         def extract_relay_prefetched_data(_filter):
             return traverse_obj(extract_relay_data(_filter), (
                 'require', (None, (..., ..., ..., '__bbox', 'require')),
-                lambda _, v: 'RelayPrefetchedStreamCache' in v, ..., ...,
-                '__bbox', 'result', 'data', {dict}), get_all=False) or {}
+                lambda _, v: any(key.startswith('RelayPrefetchedStreamCache') for key in v),
+                ..., ..., '__bbox', 'result', 'data', {dict}), get_all=False) or {}
 
         if not video_data:
             server_js_data = self._parse_json(self._search_regex([
@@ -615,6 +626,29 @@ def parse_graphql_video(video):
                                 'url': playable_url,
                             })
                     extract_dash_manifest(video, formats)
+
+                    automatic_captions, subtitles = {}, {}
+                    is_broadcast = traverse_obj(video, ('is_video_broadcast', {bool}))
+                    for caption in traverse_obj(video, (
+                        'video_available_captions_locales',
+                        {lambda x: sorted(x, key=lambda c: c['locale'])},
+                        lambda _, v: url_or_none(v['captions_url'])
+                    )):
+                        lang = caption.get('localized_language') or 'und'
+                        subs = {
+                            'url': caption['captions_url'],
+                            'name': format_field(caption, 'localized_country', f'{lang} (%s)', default=lang),
+                        }
+                        if caption.get('localized_creation_method') or is_broadcast:
+                            automatic_captions.setdefault(caption['locale'], []).append(subs)
+                        else:
+                            subtitles.setdefault(caption['locale'], []).append(subs)
+                    captions_url = traverse_obj(video, ('captions_url', {url_or_none}))
+                    if captions_url and not automatic_captions and not subtitles:
+                        locale = self._html_search_meta(
+                            ['og:locale', 'twitter:locale'], webpage, 'locale', default='en_US')
+                        (automatic_captions if is_broadcast else subtitles)[locale] = [{'url': captions_url}]
+
                     info = {
                         'id': v_id,
                         'formats': formats,
@@ -624,6 +658,8 @@ def parse_graphql_video(video):
                         'timestamp': traverse_obj(video, 'publish_time', 'creation_time', expected_type=int_or_none),
                         'duration': (float_or_none(video.get('playable_duration_in_ms'), 1000)
                                      or float_or_none(video.get('length_in_second'))),
+                        'automatic_captions': automatic_captions,
+                        'subtitles': subtitles,
                     }
                     process_formats(info)
                     description = try_get(video, lambda x: x['savable_description']['text'])
@@ -658,7 +694,8 @@ def parse_attachment(attachment, key='media'):
                 for edge in edges:
                     parse_attachment(edge, key='node')
 
-                video = data.get('video') or {}
+                video = traverse_obj(data, (
+                    'event', 'cover_media_renderer', 'cover_video'), 'video', expected_type=dict) or {}
                 if video:
                     attachments = try_get(video, [
                         lambda x: x['story']['attachments'],
@@ -677,6 +714,9 @@ def parse_attachment(attachment, key='media'):
                 # honor precise duration in video info
                 if video_info.get('duration'):
                     webpage_info['duration'] = video_info['duration']
+                # preserve preferred_thumbnail in video info
+                if video_info.get('thumbnail'):
+                    webpage_info['thumbnail'] = video_info['thumbnail']
                 return merge_dicts(webpage_info, video_info)
 
         if not video_data:
@@ -907,3 +947,114 @@ def _real_extract(self, url):
         video_id = self._match_id(url)
         return self.url_result(
             f'https://m.facebook.com/watch/?v={video_id}&_rdr', FacebookIE, video_id)
+
+
+class FacebookAdsIE(InfoExtractor):
+    _VALID_URL = r'https?://(?:[\w-]+\.)?facebook\.com/ads/library/?\?(?:[^#]+&)?id=(?P<id>\d+)'
+    IE_NAME = 'facebook:ads'
+
+    _TESTS = [{
+        'url': 'https://www.facebook.com/ads/library/?id=899206155126718',
+        'info_dict': {
+            'id': '899206155126718',
+            'ext': 'mp4',
+            'title': 'video by Kandao',
+            'uploader': 'Kandao',
+            'uploader_id': '774114102743284',
+            'uploader_url': r're:^https?://.*',
+            'timestamp': 1702548330,
+            'thumbnail': r're:^https?://.*',
+            'upload_date': '20231214',
+            'like_count': int,
+        }
+    }, {
+        'url': 'https://www.facebook.com/ads/library/?id=893637265423481',
+        'info_dict': {
+            'id': '893637265423481',
+            'title': 'Jusqu\u2019\u00e0 -25% sur une s\u00e9lection de vins p\u00e9tillants italiens ',
+            'uploader': 'Eataly Paris Marais',
+            'uploader_id': '2086668958314152',
+            'uploader_url': r're:^https?://.*',
+            'timestamp': 1703571529,
+            'upload_date': '20231226',
+            'like_count': int,
+        },
+        'playlist_count': 3,
+    }, {
+        'url': 'https://es-la.facebook.com/ads/library/?id=901230958115569',
+        'only_matching': True,
+    }, {
+        'url': 'https://m.facebook.com/ads/library/?id=901230958115569',
+        'only_matching': True,
+    }]
+
+    _FORMATS_MAP = {
+        'watermarked_video_sd_url': ('sd-wmk', 'SD, watermarked'),
+        'video_sd_url': ('sd', None),
+        'watermarked_video_hd_url': ('hd-wmk', 'HD, watermarked'),
+        'video_hd_url': ('hd', None),
+    }
+
+    def _extract_formats(self, video_dict):
+        formats = []
+        for format_key, format_url in traverse_obj(video_dict, (
+            {dict.items}, lambda _, v: v[0] in self._FORMATS_MAP and url_or_none(v[1])
+        )):
+            formats.append({
+                'format_id': self._FORMATS_MAP[format_key][0],
+                'format_note': self._FORMATS_MAP[format_key][1],
+                'url': format_url,
+                'ext': 'mp4',
+                'quality': qualities(tuple(self._FORMATS_MAP))(format_key),
+            })
+        return formats
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage = self._download_webpage(url, video_id)
+
+        post_data = [self._parse_json(j, video_id, fatal=False)
+                     for j in re.findall(r's\.handle\(({.*})\);requireLazy\(', webpage)]
+        data = traverse_obj(post_data, (
+            ..., 'require', ..., ..., ..., 'props', 'deeplinkAdCard', 'snapshot', {dict}), get_all=False)
+        if not data:
+            raise ExtractorError('Unable to extract ad data')
+
+        title = data.get('title')
+        if not title or title == '{{product.name}}':
+            title = join_nonempty('display_format', 'page_name', delim=' by ', from_dict=data)
+
+        info_dict = traverse_obj(data, {
+            'description': ('link_description', {str}, {lambda x: x if x != '{{product.description}}' else None}),
+            'uploader': ('page_name', {str}),
+            'uploader_id': ('page_id', {str_or_none}),
+            'uploader_url': ('page_profile_uri', {url_or_none}),
+            'timestamp': ('creation_time', {int_or_none}),
+            'like_count': ('page_like_count', {int_or_none}),
+        })
+
+        entries = []
+        for idx, entry in enumerate(traverse_obj(
+            data, (('videos', 'cards'), lambda _, v: any([url_or_none(v[f]) for f in self._FORMATS_MAP]))), 1
+        ):
+            entries.append({
+                'id': f'{video_id}_{idx}',
+                'title': entry.get('title') or title,
+                'description': entry.get('link_description') or info_dict.get('description'),
+                'thumbnail': url_or_none(entry.get('video_preview_image_url')),
+                'formats': self._extract_formats(entry),
+            })
+
+        if len(entries) == 1:
+            info_dict.update(entries[0])
+
+        elif len(entries) > 1:
+            info_dict.update({
+                'title': entries[0]['title'],
+                'entries': entries,
+                '_type': 'playlist',
+            })
+
+        info_dict['id'] = video_id
+
+        return info_dict