determine_ext,
error_to_compat_str,
float_or_none,
+ format_field,
get_element_by_id,
get_first,
int_or_none,
+ join_nonempty,
js_to_json,
merge_dicts,
parse_count,
(?:[^#]*?\#!/)?
(?:
(?:
+ permalink\.php|
video/video\.php|
photo\.php|
video\.php|
)\?(?:.*?)(?:v|video_id|story_fbid)=|
[^/]+/videos/(?:[^/]+/)?|
[^/]+/posts/|
- groups/[^/]+/permalink/|
+ events/(?:[^/]+/)?|
+ groups/[^/]+/(?:permalink|posts)/|
watchparty/
)|
facebook:
)
- (?P<id>[0-9]+)
+ (?P<id>pfbid[A-Za-z0-9]+|\d+)
'''
_EMBED_REGEX = [
r'<iframe[^>]+?src=(["\'])(?P<url>https?://www\.facebook\.com/(?:video/embed|plugins/video\.php).+?)\1',
_VIDEO_PAGE_TAHOE_TEMPLATE = 'https://www.facebook.com/video/tahoe/async/%s/?chain=true&isvideo=true&payloadtype=primary'
_TESTS = [{
+ 'url': 'https://www.facebook.com/radiokicksfm/videos/3676516585958356/',
+ 'info_dict': {
+ 'id': '3676516585958356',
+ 'ext': 'mp4',
+ 'title': 'dr Adam Przygoda',
+ 'description': 'md5:34675bda53336b1d16400265c2bb9b3b',
+ 'uploader': 'RADIO KICKS FM',
+ 'upload_date': '20230818',
+ 'timestamp': 1692346159,
+ 'thumbnail': r're:^https?://.*',
+ 'uploader_id': '100063551323670',
+ 'duration': 3132.184,
+ 'view_count': int,
+ 'concurrent_view_count': 0,
+ },
+ }, {
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'upload_date': '20140506',
'timestamp': 1399398998,
'thumbnail': r're:^https?://.*',
- 'uploader_id': 'pfbid04scW44U4P9iTyLZAGy8y8W3pR3i2VugvHCimiRudUAVbN3MPp9eXBaYFcgVworZwl',
+ 'uploader_id': 'pfbid028wxorhX2ErLFJ578N6P3crHD3PHmXTCqCvfBpsnbSLmbokwSY75p5hWBjHGkG4zxl',
'duration': 131.03,
'concurrent_view_count': int,
},
'timestamp': 1486648217,
'upload_date': '20170209',
'uploader': 'Yaroslav Korpan',
- 'uploader_id': 'pfbid029y8j22EwH3ikeqgH3SEP9G3CAi9kmWKgXJJG9s5geV7mo3J2bvURqHCdgucRgAyhl',
+ 'uploader_id': 'pfbid06AScABAWcW91qpiuGrLt99Ef9tvwHoXP6t8KeFYEqkSfreMtfa9nTveh8b2ZEVSWl',
'concurrent_view_count': int,
'thumbnail': r're:^https?://.*',
'view_count': int,
'uploader_id': '100013949973717',
},
'skip': 'Requires logging in',
+ }, {
+ # data.node.comet_sections.content.story.attachments[].throwbackStyles.attachment_target_renderer.attachment.target.attachments[].styles.attachment.media
+ 'url': 'https://www.facebook.com/groups/1645456212344334/posts/3737828833107051/',
+ 'info_dict': {
+ 'id': '1569199726448814',
+ 'ext': 'mp4',
+ 'title': 'Pence MUST GO!',
+ 'description': 'Vickie Gentry shared a memory.',
+ 'timestamp': 1511548260,
+ 'upload_date': '20171124',
+ 'uploader': 'Vickie Gentry',
+ 'uploader_id': 'pfbid0FuZhHCeWDAxWxEbr3yKPFaRstXvRxgsp9uCPG6GjD4J2AitB35NUAuJ4Q75KcjiDl',
+ 'thumbnail': r're:^https?://.*',
+ 'duration': 148.435,
+ },
+ }, {
+ # data.node.comet_sections.content.story.attachments[].styles.attachment.media
+ 'url': 'https://www.facebook.com/attn/posts/pfbid0j1Czf2gGDVqeQ8KiMLFm3pWN8GxsQmeRrVhimWDzMuKQoR8r4b1knNsejELmUgyhl',
+ 'info_dict': {
+ 'id': '6968553779868435',
+ 'ext': 'mp4',
+ 'description': 'md5:2f2fcf93e97ac00244fe64521bbdb0cb',
+ 'uploader': 'ATTN:',
+ 'upload_date': '20231207',
+ 'title': 'ATTN:',
+ 'duration': 132.675,
+ 'uploader_id': '100064451419378',
+ 'view_count': int,
+ 'thumbnail': r're:^https?://.*',
+ 'timestamp': 1701975646,
+ },
+ }, {
+ # data.node.comet_sections.content.story.attachments[].styles.attachment.media
+ 'url': 'https://www.facebook.com/permalink.php?story_fbid=pfbid0fqQuVEQyXRa9Dp4RcaTR14KHU3uULHV1EK7eckNXSH63JMuoALsAvVCJ97zAGitil&id=100068861234290',
+ 'info_dict': {
+ 'id': '270103405756416',
+ 'ext': 'mp4',
+ 'title': 'Lela Evans',
+ 'description': 'Today Makkovik\'s own Pilot Mandy Smith made her inaugural landing on the airstrip in her hometown. What a proud moment as we all cheered and...',
+ 'thumbnail': r're:^https?://.*',
+ 'uploader': 'Lela Evans',
+ 'uploader_id': 'pfbid0shZJipuigyy5mqrUJn9ub5LJFWNHvan5prtyi3LrDuuuJ4NwrURgnQHYR9fywBepl',
+ 'upload_date': '20231228',
+ 'timestamp': 1703804085,
+ 'duration': 394.347,
+ 'view_count': int,
+ },
+ }, {
+ 'url': 'https://www.facebook.com/story.php?story_fbid=pfbid0Fnzhm8UuzjBYpPMNFzaSpFE9UmLdU4fJN8qTANi1Dmtj5q7DNrL5NERXfsAzDEV7l&id=100073071055552',
+ 'only_matching': True,
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
'title': 'Josef',
'thumbnail': r're:^https?://.*',
'concurrent_view_count': int,
- 'uploader_id': 'pfbid02gXHbDwxumkaKJQaTGUf3znYfYzTuidGEWawiramNx4YamSj2afwYSRkpcjtHtMRJl',
+ 'uploader_id': 'pfbid0cibUN6tV7DYgdbJdsUFN46wc4jKpVSPAvJQhFofGqBGmVn3V3JtAs2tfUwziw2hUl',
'timestamp': 1549275572,
'duration': 3.413,
'uploader': 'Josef Novak',
},
'playlist_count': 1,
'skip': 'Requires logging in',
+ }, {
+ # data.event.cover_media_renderer.cover_video
+ 'url': 'https://m.facebook.com/events/1509582499515440',
+ 'info_dict': {
+ 'id': '637246984455045',
+ 'ext': 'mp4',
+ 'title': 'ANALISI IN CAMPO OSCURO " Coaguli nel sangue dei vaccinati"',
+ 'description': 'Other event by Comitato Liberi Pensatori on Tuesday, October 18 2022',
+ 'thumbnail': r're:^https?://.*',
+ 'uploader': 'Comitato Liberi Pensatori',
+ 'uploader_id': '100065709540881',
+ },
}]
_SUPPORTED_PAGLETS_REGEX = r'(?:pagelet_group_mall|permalink_video_pagelet|hyperfeed_story_id_[0-9a-f]+)'
_api_config = {
def extract_metadata(webpage):
post_data = [self._parse_json(j, video_id, fatal=False) for j in re.findall(
- r'handleWithCustomApplyEach\(\s*ScheduledApplyEach\s*,\s*(\{.+?\})\s*\);', webpage)]
+ r'data-sjs>({.*?ScheduledServerJS.*?})</script>', webpage)]
post = traverse_obj(post_data, (
- ..., 'require', ..., ..., ..., '__bbox', 'result', 'data'), expected_type=dict) or []
+ ..., 'require', ..., ..., ..., '__bbox', 'require', ..., ..., ..., '__bbox', 'result', 'data'), expected_type=dict) or []
media = traverse_obj(post, (..., 'attachments', ..., lambda k, v: (
k == 'media' and str(v['id']) == video_id and v['__typename'] == 'Video')), expected_type=dict)
title = get_first(media, ('title', 'text'))
description = get_first(media, ('creation_story', 'comet_sections', 'message', 'story', 'message', 'text'))
- uploader_data = (
- get_first(media, ('owner', {dict}))
- or get_first(post, (..., 'video', lambda k, v: k == 'owner' and v['name']))
- or get_first(post, ('node', 'actors', ..., {dict})) or {})
-
page_title = title or self._html_search_regex((
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>(?P<content>[^<]*)</h2>',
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(?P<content>.*?)</span>',
description = description or self._html_search_meta(
['description', 'og:description', 'twitter:description'],
webpage, 'description', default=None)
+ uploader_data = (
+ get_first(media, ('owner', {dict}))
+ or get_first(post, ('video', 'creation_story', 'attachments', ..., 'media', lambda k, v: k == 'owner' and v['name']))
+ or get_first(post, (..., 'video', lambda k, v: k == 'owner' and v['name']))
+ or get_first(post, ('node', 'actors', ..., {dict}))
+ or get_first(post, ('event', 'event_creator', {dict})) or {})
uploader = uploader_data.get('name') or (
clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
or self._search_regex(
(r'ownerName\s*:\s*"([^"]+)"', *self._og_regexes('title')), webpage, 'uploader', fatal=False))
-
timestamp = int_or_none(self._search_regex(
r'<abbr[^>]+data-utime=["\'](\d+)', webpage,
'timestamp', default=None))
js_data, lambda x: x['jsmods']['instances'], list) or [])
def extract_dash_manifest(video, formats):
- dash_manifest = video.get('dash_manifest')
+ dash_manifest = traverse_obj(video, 'dash_manifest', 'playlist', expected_type=str)
if dash_manifest:
formats.extend(self._parse_mpd_formats(
- compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest))))
+ compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
+ mpd_url=video.get('dash_manifest_url')))
def process_formats(info):
# Downloads with browser's User-Agent are rate limited. Working around
# with non-browser User-Agent.
for f in info['formats']:
+ # Downloads with browser's User-Agent are rate limited. Working around
+ # with non-browser User-Agent.
f.setdefault('http_headers', {})['User-Agent'] = 'facebookexternalhit/1.1'
- info['_format_sort_fields'] = ('res', 'quality')
+ # Formats larger than ~500MB will return error 403 unless chunk size is regulated
+ f.setdefault('downloader_options', {})['http_chunk_size'] = 250 << 20
def extract_relay_data(_filter):
return self._parse_json(self._search_regex(
- r'handleWithCustomApplyEach\([^,]+,\s*({.*?%s.*?})\);' % _filter,
+ r'data-sjs>({.*?%s.*?})</script>' % _filter,
webpage, 'replay data', default='{}'), video_id, fatal=False) or {}
def extract_relay_prefetched_data(_filter):
- replay_data = extract_relay_data(_filter)
- for require in (replay_data.get('require') or []):
- if require[0] == 'RelayPrefetchedStreamCache':
- return try_get(require, lambda x: x[3][1]['__bbox']['result']['data'], dict) or {}
+ return traverse_obj(extract_relay_data(_filter), (
+ 'require', (None, (..., ..., ..., '__bbox', 'require')),
+ lambda _, v: any(key.startswith('RelayPrefetchedStreamCache') for key in v),
+ ..., ..., '__bbox', 'result', 'data', {dict}), get_all=False) or {}
if not video_data:
server_js_data = self._parse_json(self._search_regex([
if not video_data:
data = extract_relay_prefetched_data(
- r'"(?:dash_manifest|playable_url(?:_quality_hd)?)"\s*:\s*"[^"]+"')
+ r'"(?:dash_manifest|playable_url(?:_quality_hd)?)')
if data:
entries = []
formats = []
q = qualities(['sd', 'hd'])
for key, format_id in (('playable_url', 'sd'), ('playable_url_quality_hd', 'hd'),
- ('playable_url_dash', '')):
+ ('playable_url_dash', ''), ('browser_native_hd_url', 'hd'),
+ ('browser_native_sd_url', 'sd')):
playable_url = video.get(key)
if not playable_url:
continue
else:
formats.append({
'format_id': format_id,
- 'quality': q(format_id),
+ # sd, hd formats w/o resolution info should be deprioritized below DASH
+ 'quality': q(format_id) - 3,
'url': playable_url,
})
extract_dash_manifest(video, formats)
+
+ automatic_captions, subtitles = {}, {}
+ is_broadcast = traverse_obj(video, ('is_video_broadcast', {bool}))
+ for caption in traverse_obj(video, (
+ 'video_available_captions_locales',
+ {lambda x: sorted(x, key=lambda c: c['locale'])},
+ lambda _, v: url_or_none(v['captions_url'])
+ )):
+ lang = caption.get('localized_language') or 'und'
+ subs = {
+ 'url': caption['captions_url'],
+ 'name': format_field(caption, 'localized_country', f'{lang} (%s)', default=lang),
+ }
+ if caption.get('localized_creation_method') or is_broadcast:
+ automatic_captions.setdefault(caption['locale'], []).append(subs)
+ else:
+ subtitles.setdefault(caption['locale'], []).append(subs)
+ captions_url = traverse_obj(video, ('captions_url', {url_or_none}))
+ if captions_url and not automatic_captions and not subtitles:
+ locale = self._html_search_meta(
+ ['og:locale', 'twitter:locale'], webpage, 'locale', default='en_US')
+ (automatic_captions if is_broadcast else subtitles)[locale] = [{'url': captions_url}]
+
info = {
'id': v_id,
'formats': formats,
'timestamp': traverse_obj(video, 'publish_time', 'creation_time', expected_type=int_or_none),
'duration': (float_or_none(video.get('playable_duration_in_ms'), 1000)
or float_or_none(video.get('length_in_second'))),
+ 'automatic_captions': automatic_captions,
+ 'subtitles': subtitles,
}
process_formats(info)
description = try_get(video, lambda x: x['savable_description']['text'])
nodes = variadic(traverse_obj(data, 'nodes', 'node') or [])
attachments = traverse_obj(nodes, (
..., 'comet_sections', 'content', 'story', (None, 'attached_story'), 'attachments',
- ..., ('styles', 'style_type_renderer'), 'attachment'), expected_type=dict) or []
+ ..., ('styles', 'style_type_renderer', ('throwbackStyles', 'attachment_target_renderer')),
+ 'attachment', {dict}))
for attachment in attachments:
- ns = try_get(attachment, lambda x: x['all_subattachments']['nodes'], list) or []
+ ns = traverse_obj(attachment, ('all_subattachments', 'nodes', ..., {dict}),
+ ('target', 'attachments', ..., 'styles', 'attachment', {dict}))
for n in ns:
parse_attachment(n)
parse_attachment(attachment)
for edge in edges:
parse_attachment(edge, key='node')
- video = data.get('video') or {}
+ video = traverse_obj(data, (
+ 'event', 'cover_media_renderer', 'cover_video'), 'video', expected_type=dict) or {}
if video:
attachments = try_get(video, [
lambda x: x['story']['attachments'],
if len(entries) > 1:
return self.playlist_result(entries, video_id)
- video_info = entries[0]
+ video_info = entries[0] if entries else {'id': video_id}
webpage_info = extract_metadata(webpage)
# honor precise duration in video info
if video_info.get('duration'):
webpage_info['duration'] = video_info['duration']
+ # preserve preferred_thumbnail in video info
+ if video_info.get('thumbnail'):
+ webpage_info['thumbnail'] = video_info['thumbnail']
return merge_dicts(webpage_info, video_info)
if not video_data:
for src_type in ('src', 'src_no_ratelimit'):
src = f[0].get('%s_%s' % (quality, src_type))
if src:
- preference = -10 if format_id == 'progressive' else -1
+ # sd, hd formats w/o resolution info should be deprioritized below DASH
+ # TODO: investigate if progressive or src formats still exist
+ preference = -10 if format_id == 'progressive' else -3
if quality == 'hd':
- preference += 5
+ preference += 1
formats.append({
'format_id': '%s_%s_%s' % (format_id, quality, src_type),
'url': src,
video_id = self._match_id(url)
return self.url_result(
f'https://m.facebook.com/watch/?v={video_id}&_rdr', FacebookIE, video_id)
+
+
+class FacebookAdsIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:[\w-]+\.)?facebook\.com/ads/library/?\?(?:[^#]+&)?id=(?P<id>\d+)'
+ IE_NAME = 'facebook:ads'
+
+ _TESTS = [{
+ 'url': 'https://www.facebook.com/ads/library/?id=899206155126718',
+ 'info_dict': {
+ 'id': '899206155126718',
+ 'ext': 'mp4',
+ 'title': 'video by Kandao',
+ 'uploader': 'Kandao',
+ 'uploader_id': '774114102743284',
+ 'uploader_url': r're:^https?://.*',
+ 'timestamp': 1702548330,
+ 'thumbnail': r're:^https?://.*',
+ 'upload_date': '20231214',
+ 'like_count': int,
+ }
+ }, {
+ 'url': 'https://www.facebook.com/ads/library/?id=893637265423481',
+ 'info_dict': {
+ 'id': '893637265423481',
+ 'title': 'Jusqu\u2019\u00e0 -25% sur une s\u00e9lection de vins p\u00e9tillants italiens ',
+ 'uploader': 'Eataly Paris Marais',
+ 'uploader_id': '2086668958314152',
+ 'uploader_url': r're:^https?://.*',
+ 'timestamp': 1703571529,
+ 'upload_date': '20231226',
+ 'like_count': int,
+ },
+ 'playlist_count': 3,
+ }, {
+ 'url': 'https://es-la.facebook.com/ads/library/?id=901230958115569',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://m.facebook.com/ads/library/?id=901230958115569',
+ 'only_matching': True,
+ }]
+
+ _FORMATS_MAP = {
+ 'watermarked_video_sd_url': ('sd-wmk', 'SD, watermarked'),
+ 'video_sd_url': ('sd', None),
+ 'watermarked_video_hd_url': ('hd-wmk', 'HD, watermarked'),
+ 'video_hd_url': ('hd', None),
+ }
+
+ def _extract_formats(self, video_dict):
+ formats = []
+ for format_key, format_url in traverse_obj(video_dict, (
+ {dict.items}, lambda _, v: v[0] in self._FORMATS_MAP and url_or_none(v[1])
+ )):
+ formats.append({
+ 'format_id': self._FORMATS_MAP[format_key][0],
+ 'format_note': self._FORMATS_MAP[format_key][1],
+ 'url': format_url,
+ 'ext': 'mp4',
+ 'quality': qualities(tuple(self._FORMATS_MAP))(format_key),
+ })
+ return formats
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ post_data = [self._parse_json(j, video_id, fatal=False)
+ for j in re.findall(r's\.handle\(({.*})\);requireLazy\(', webpage)]
+ data = traverse_obj(post_data, (
+ ..., 'require', ..., ..., ..., 'props', 'deeplinkAdCard', 'snapshot', {dict}), get_all=False)
+ if not data:
+ raise ExtractorError('Unable to extract ad data')
+
+ title = data.get('title')
+ if not title or title == '{{product.name}}':
+ title = join_nonempty('display_format', 'page_name', delim=' by ', from_dict=data)
+
+ info_dict = traverse_obj(data, {
+ 'description': ('link_description', {str}, {lambda x: x if x != '{{product.description}}' else None}),
+ 'uploader': ('page_name', {str}),
+ 'uploader_id': ('page_id', {str_or_none}),
+ 'uploader_url': ('page_profile_uri', {url_or_none}),
+ 'timestamp': ('creation_time', {int_or_none}),
+ 'like_count': ('page_like_count', {int_or_none}),
+ })
+
+ entries = []
+ for idx, entry in enumerate(traverse_obj(
+ data, (('videos', 'cards'), lambda _, v: any([url_or_none(v[f]) for f in self._FORMATS_MAP]))), 1
+ ):
+ entries.append({
+ 'id': f'{video_id}_{idx}',
+ 'title': entry.get('title') or title,
+ 'description': entry.get('link_description') or info_dict.get('description'),
+ 'thumbnail': url_or_none(entry.get('video_preview_image_url')),
+ 'formats': self._extract_formats(entry),
+ })
+
+ if len(entries) == 1:
+ info_dict.update(entries[0])
+
+ elif len(entries) > 1:
+ info_dict.update({
+ 'title': entries[0]['title'],
+ 'entries': entries,
+ '_type': 'playlist',
+ })
+
+ info_dict['id'] = video_id
+
+ return info_dict