]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/instagram.py
04afacb9049cc9e4dd2f957b3895a2bad091406f
[yt-dlp.git] / yt_dlp / extractor / instagram.py
1 import hashlib
2 import itertools
3 import json
4 import re
5 import time
6 import urllib.error
7
8 from .common import InfoExtractor
9 from ..utils import (
10 ExtractorError,
11 decode_base_n,
12 encode_base_n,
13 float_or_none,
14 format_field,
15 get_element_by_attribute,
16 int_or_none,
17 lowercase_escape,
18 str_or_none,
19 str_to_int,
20 traverse_obj,
21 url_or_none,
22 urlencode_postdata,
23 )
24
25 _ENCODING_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
26
27
28 def _pk_to_id(id):
29 """Source: https://stackoverflow.com/questions/24437823/getting-instagram-post-url-from-media-id"""
30 return encode_base_n(int(id.split('_')[0]), table=_ENCODING_CHARS)
31
32
33 def _id_to_pk(shortcode):
34 """Covert a shortcode to a numeric value"""
35 return decode_base_n(shortcode[:11], table=_ENCODING_CHARS)
36
37
38 class InstagramBaseIE(InfoExtractor):
39 _NETRC_MACHINE = 'instagram'
40 _IS_LOGGED_IN = False
41
42 def _perform_login(self, username, password):
43 if self._IS_LOGGED_IN:
44 return
45
46 login_webpage = self._download_webpage(
47 'https://www.instagram.com/accounts/login/', None,
48 note='Downloading login webpage', errnote='Failed to download login webpage')
49
50 shared_data = self._parse_json(
51 self._search_regex(
52 r'window\._sharedData\s*=\s*({.+?});',
53 login_webpage, 'shared data', default='{}'),
54 None)
55
56 login = self._download_json('https://www.instagram.com/accounts/login/ajax/', None, note='Logging in', headers={
57 'Accept': '*/*',
58 'X-IG-App-ID': '936619743392459',
59 'X-ASBD-ID': '198387',
60 'X-IG-WWW-Claim': '0',
61 'X-Requested-With': 'XMLHttpRequest',
62 'X-CSRFToken': shared_data['config']['csrf_token'],
63 'X-Instagram-AJAX': shared_data['rollout_hash'],
64 'Referer': 'https://www.instagram.com/',
65 }, data=urlencode_postdata({
66 'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{int(time.time())}:{password}',
67 'username': username,
68 'queryParams': '{}',
69 'optIntoOneTap': 'false',
70 'stopDeletionNonce': '',
71 'trustedDeviceRecords': '{}',
72 }))
73
74 if not login.get('authenticated'):
75 if login.get('message'):
76 raise ExtractorError(f'Unable to login: {login["message"]}')
77 elif login.get('user'):
78 raise ExtractorError('Unable to login: Sorry, your password was incorrect. Please double-check your password.', expected=True)
79 elif login.get('user') is False:
80 raise ExtractorError('Unable to login: The username you entered doesn\'t belong to an account. Please check your username and try again.', expected=True)
81 raise ExtractorError('Unable to login')
82 InstagramBaseIE._IS_LOGGED_IN = True
83
84 def _get_count(self, media, kind, *keys):
85 return traverse_obj(
86 media, (kind, 'count'), *((f'edge_media_{key}', 'count') for key in keys),
87 expected_type=int_or_none)
88
89 def _get_dimension(self, name, media, webpage=None):
90 return (
91 traverse_obj(media, ('dimensions', name), expected_type=int_or_none)
92 or int_or_none(self._html_search_meta(
93 (f'og:video:{name}', f'video:{name}'), webpage or '', default=None)))
94
95 def _extract_nodes(self, nodes, is_direct=False):
96 for idx, node in enumerate(nodes, start=1):
97 if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
98 continue
99
100 video_id = node.get('shortcode')
101
102 if is_direct:
103 info = {
104 'id': video_id or node['id'],
105 'url': node.get('video_url'),
106 'width': self._get_dimension('width', node),
107 'height': self._get_dimension('height', node),
108 'http_headers': {
109 'Referer': 'https://www.instagram.com/',
110 }
111 }
112 elif not video_id:
113 continue
114 else:
115 info = {
116 '_type': 'url',
117 'ie_key': 'Instagram',
118 'id': video_id,
119 'url': f'https://instagram.com/p/{video_id}',
120 }
121
122 yield {
123 **info,
124 'title': node.get('title') or (f'Video {idx}' if is_direct else None),
125 'description': traverse_obj(
126 node, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str),
127 'thumbnail': traverse_obj(
128 node, 'display_url', 'thumbnail_src', 'display_src', expected_type=url_or_none),
129 'duration': float_or_none(node.get('video_duration')),
130 'timestamp': int_or_none(node.get('taken_at_timestamp')),
131 'view_count': int_or_none(node.get('video_view_count')),
132 'comment_count': self._get_count(node, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'),
133 'like_count': self._get_count(node, 'likes', 'preview_like'),
134 }
135
136 def _extract_product_media(self, product_media):
137 media_id = product_media.get('code') or product_media.get('id')
138 vcodec = product_media.get('video_codec')
139 dash_manifest_raw = product_media.get('video_dash_manifest')
140 videos_list = product_media.get('video_versions')
141 if not (dash_manifest_raw or videos_list):
142 return {}
143
144 formats = [{
145 'format_id': format.get('id'),
146 'url': format.get('url'),
147 'width': format.get('width'),
148 'height': format.get('height'),
149 'vcodec': vcodec,
150 } for format in videos_list or []]
151 if dash_manifest_raw:
152 formats.extend(self._parse_mpd_formats(self._parse_xml(dash_manifest_raw, media_id), mpd_id='dash'))
153 self._sort_formats(formats)
154
155 thumbnails = [{
156 'url': thumbnail.get('url'),
157 'width': thumbnail.get('width'),
158 'height': thumbnail.get('height')
159 } for thumbnail in traverse_obj(product_media, ('image_versions2', 'candidates')) or []]
160 return {
161 'id': media_id,
162 'duration': float_or_none(product_media.get('video_duration')),
163 'formats': formats,
164 'thumbnails': thumbnails
165 }
166
167 def _extract_product(self, product_info):
168 if isinstance(product_info, list):
169 product_info = product_info[0]
170
171 comment_data = traverse_obj(product_info, ('edge_media_to_parent_comment', 'edges'))
172 comments = [{
173 'author': traverse_obj(comment_dict, ('node', 'owner', 'username')),
174 'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id')),
175 'id': traverse_obj(comment_dict, ('node', 'id')),
176 'text': traverse_obj(comment_dict, ('node', 'text')),
177 'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), expected_type=int_or_none),
178 } for comment_dict in comment_data] if comment_data else None
179
180 user_info = product_info.get('user') or {}
181 info_dict = {
182 'id': product_info.get('code') or product_info.get('id'),
183 'title': product_info.get('title') or f'Video by {user_info.get("username")}',
184 'description': traverse_obj(product_info, ('caption', 'text'), expected_type=str_or_none),
185 'timestamp': int_or_none(product_info.get('taken_at')),
186 'channel': user_info.get('username'),
187 'uploader': user_info.get('full_name'),
188 'uploader_id': str_or_none(user_info.get('pk')),
189 'view_count': int_or_none(product_info.get('view_count')),
190 'like_count': int_or_none(product_info.get('like_count')),
191 'comment_count': int_or_none(product_info.get('comment_count')),
192 'comments': comments,
193 'http_headers': {
194 'Referer': 'https://www.instagram.com/',
195 }
196 }
197 carousel_media = product_info.get('carousel_media')
198 if carousel_media:
199 return {
200 '_type': 'playlist',
201 **info_dict,
202 'title': f'Post by {user_info.get("username")}',
203 'entries': [{
204 **info_dict,
205 **self._extract_product_media(product_media),
206 } for product_media in carousel_media],
207 }
208
209 return {
210 **info_dict,
211 **self._extract_product_media(product_info)
212 }
213
214
215 class InstagramIOSIE(InfoExtractor):
216 IE_DESC = 'IOS instagram:// URL'
217 _VALID_URL = r'instagram://media\?id=(?P<id>[\d_]+)'
218 _TESTS = [{
219 'url': 'instagram://media?id=482584233761418119',
220 'md5': '0d2da106a9d2631273e192b372806516',
221 'info_dict': {
222 'id': 'aye83DjauH',
223 'ext': 'mp4',
224 'title': 'Video by naomipq',
225 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
226 'thumbnail': r're:^https?://.*\.jpg',
227 'duration': 0,
228 'timestamp': 1371748545,
229 'upload_date': '20130620',
230 'uploader_id': 'naomipq',
231 'uploader': 'B E A U T Y F O R A S H E S',
232 'like_count': int,
233 'comment_count': int,
234 'comments': list,
235 },
236 'add_ie': ['Instagram']
237 }]
238
239 def _real_extract(self, url):
240 video_id = _pk_to_id(self._match_id(url))
241 return self.url_result(f'http://instagram.com/tv/{video_id}', InstagramIE, video_id)
242
243
244 class InstagramIE(InstagramBaseIE):
245 _VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com(?:/[^/]+)?/(?:p|tv|reel)/(?P<id>[^/?#&]+))'
246 _TESTS = [{
247 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
248 'md5': '0d2da106a9d2631273e192b372806516',
249 'info_dict': {
250 'id': 'aye83DjauH',
251 'ext': 'mp4',
252 'title': 'Video by naomipq',
253 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
254 'thumbnail': r're:^https?://.*\.jpg',
255 'duration': 0,
256 'timestamp': 1371748545,
257 'upload_date': '20130620',
258 'uploader_id': '2815873',
259 'uploader': 'B E A U T Y F O R A S H E S',
260 'channel': 'naomipq',
261 'like_count': int,
262 'comment_count': int,
263 'comments': list,
264 },
265 }, {
266 # missing description
267 'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
268 'info_dict': {
269 'id': 'BA-pQFBG8HZ',
270 'ext': 'mp4',
271 'title': 'Video by britneyspears',
272 'thumbnail': r're:^https?://.*\.jpg',
273 'duration': 0,
274 'timestamp': 1453760977,
275 'upload_date': '20160125',
276 'uploader_id': '12246775',
277 'uploader': 'Britney Spears',
278 'channel': 'britneyspears',
279 'like_count': int,
280 'comment_count': int,
281 'comments': list,
282 },
283 'params': {
284 'skip_download': True,
285 },
286 }, {
287 # multi video post
288 'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
289 'playlist': [{
290 'info_dict': {
291 'id': 'BQ0dSaohpPW',
292 'ext': 'mp4',
293 'title': 'Video 1',
294 },
295 }, {
296 'info_dict': {
297 'id': 'BQ0dTpOhuHT',
298 'ext': 'mp4',
299 'title': 'Video 2',
300 },
301 }, {
302 'info_dict': {
303 'id': 'BQ0dT7RBFeF',
304 'ext': 'mp4',
305 'title': 'Video 3',
306 },
307 }],
308 'info_dict': {
309 'id': 'BQ0eAlwhDrw',
310 'title': 'Post by instagram',
311 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
312 },
313 }, {
314 # IGTV
315 'url': 'https://www.instagram.com/tv/BkfuX9UB-eK/',
316 'info_dict': {
317 'id': 'BkfuX9UB-eK',
318 'ext': 'mp4',
319 'title': 'Fingerboarding Tricks with @cass.fb',
320 'thumbnail': r're:^https?://.*\.jpg',
321 'duration': 53.83,
322 'timestamp': 1530032919,
323 'upload_date': '20180626',
324 'uploader_id': '25025320',
325 'uploader': 'Instagram',
326 'channel': 'instagram',
327 'like_count': int,
328 'comment_count': int,
329 'comments': list,
330 'description': 'Meet Cass Hirst (@cass.fb), a fingerboarding pro who can perform tiny ollies and kickflips while blindfolded.',
331 }
332 }, {
333 'url': 'https://instagram.com/p/-Cmh1cukG2/',
334 'only_matching': True,
335 }, {
336 'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
337 'only_matching': True,
338 }, {
339 'url': 'https://www.instagram.com/tv/aye83DjauH/',
340 'only_matching': True,
341 }, {
342 'url': 'https://www.instagram.com/reel/CDUMkliABpa/',
343 'only_matching': True,
344 }, {
345 'url': 'https://www.instagram.com/marvelskies.fc/reel/CWqAgUZgCku/',
346 'only_matching': True,
347 }]
348
349 @staticmethod
350 def _extract_embed_url(webpage):
351 mobj = re.search(
352 r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
353 webpage)
354 if mobj:
355 return mobj.group('url')
356
357 blockquote_el = get_element_by_attribute(
358 'class', 'instagram-media', webpage)
359 if blockquote_el is None:
360 return
361
362 mobj = re.search(
363 r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
364 if mobj:
365 return mobj.group('link')
366
367 def _real_extract(self, url):
368 video_id, url = self._match_valid_url(url).group('id', 'url')
369 general_info = self._download_json(
370 f'https://www.instagram.com/graphql/query/?query_hash=9f8827793ef34641b2fb195d4d41151c'
371 f'&variables=%7B"shortcode":"{video_id}",'
372 '"parent_comment_count":10,"has_threaded_comments":true}', video_id, fatal=False, errnote=False,
373 headers={
374 'Accept': '*',
375 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
376 'Authority': 'www.instagram.com',
377 'Referer': 'https://www.instagram.com',
378 'x-ig-app-id': '936619743392459',
379 })
380 media = traverse_obj(general_info, ('data', 'shortcode_media')) or {}
381 if not media:
382 self.report_warning('General metadata extraction failed', video_id)
383
384 info = self._download_json(
385 f'https://i.instagram.com/api/v1/media/{_id_to_pk(video_id)}/info/', video_id,
386 fatal=False, note='Downloading video info', errnote=False, headers={
387 'Accept': '*',
388 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
389 'Authority': 'www.instagram.com',
390 'Referer': 'https://www.instagram.com',
391 'x-ig-app-id': '936619743392459',
392 })
393 if info:
394 media.update(info['items'][0])
395 return self._extract_product(media)
396
397 webpage = self._download_webpage(
398 f'https://www.instagram.com/p/{video_id}/embed/', video_id,
399 note='Downloading embed webpage', fatal=False)
400 if not webpage:
401 self.raise_login_required('Requested content was not found, the content might be private')
402
403 additional_data = self._search_json(
404 r'window\.__additionalDataLoaded\s*\(\s*[^,]+,\s*', webpage, 'additional data', video_id, fatal=False)
405 product_item = traverse_obj(additional_data, ('items', 0), expected_type=dict)
406 if product_item:
407 media.update(product_item)
408 return self._extract_product(media)
409
410 media.update(traverse_obj(
411 additional_data, ('graphql', 'shortcode_media'), 'shortcode_media', expected_type=dict) or {})
412
413 username = traverse_obj(media, ('owner', 'username')) or self._search_regex(
414 r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"', webpage, 'username', fatal=False)
415
416 description = (
417 traverse_obj(media, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str)
418 or media.get('caption'))
419 if not description:
420 description = self._search_regex(
421 r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
422 if description is not None:
423 description = lowercase_escape(description)
424
425 video_url = media.get('video_url')
426 if not video_url:
427 nodes = traverse_obj(media, ('edge_sidecar_to_children', 'edges', ..., 'node'), expected_type=dict) or []
428 if nodes:
429 return self.playlist_result(
430 self._extract_nodes(nodes, True), video_id,
431 format_field(username, None, 'Post by %s'), description)
432
433 video_url = self._og_search_video_url(webpage, secure=False)
434
435 formats = [{
436 'url': video_url,
437 'width': self._get_dimension('width', media, webpage),
438 'height': self._get_dimension('height', media, webpage),
439 }]
440 dash = traverse_obj(media, ('dash_info', 'video_dash_manifest'))
441 if dash:
442 formats.extend(self._parse_mpd_formats(self._parse_xml(dash, video_id), mpd_id='dash'))
443 self._sort_formats(formats)
444
445 comment_data = traverse_obj(media, ('edge_media_to_parent_comment', 'edges'))
446 comments = [{
447 'author': traverse_obj(comment_dict, ('node', 'owner', 'username')),
448 'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id')),
449 'id': traverse_obj(comment_dict, ('node', 'id')),
450 'text': traverse_obj(comment_dict, ('node', 'text')),
451 'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), expected_type=int_or_none),
452 } for comment_dict in comment_data] if comment_data else None
453
454 display_resources = (
455 media.get('display_resources')
456 or [{'src': media.get(key)} for key in ('display_src', 'display_url')]
457 or [{'src': self._og_search_thumbnail(webpage)}])
458 thumbnails = [{
459 'url': thumbnail['src'],
460 'width': thumbnail.get('config_width'),
461 'height': thumbnail.get('config_height'),
462 } for thumbnail in display_resources if thumbnail.get('src')]
463
464 return {
465 'id': video_id,
466 'formats': formats,
467 'title': media.get('title') or 'Video by %s' % username,
468 'description': description,
469 'duration': float_or_none(media.get('video_duration')),
470 'timestamp': traverse_obj(media, 'taken_at_timestamp', 'date', expected_type=int_or_none),
471 'uploader_id': traverse_obj(media, ('owner', 'id')),
472 'uploader': traverse_obj(media, ('owner', 'full_name')),
473 'channel': username,
474 'like_count': self._get_count(media, 'likes', 'preview_like') or str_to_int(self._search_regex(
475 r'data-log-event="likeCountClick"[^>]*>[^\d]*([\d,\.]+)', webpage, 'like count', fatal=False)),
476 'comment_count': self._get_count(media, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'),
477 'comments': comments,
478 'thumbnails': thumbnails,
479 'http_headers': {
480 'Referer': 'https://www.instagram.com/',
481 }
482 }
483
484
485 class InstagramPlaylistBaseIE(InstagramBaseIE):
486 _gis_tmpl = None # used to cache GIS request type
487
488 def _parse_graphql(self, webpage, item_id):
489 # Reads a webpage and returns its GraphQL data.
490 return self._parse_json(
491 self._search_regex(
492 r'sharedData\s*=\s*({.+?})\s*;\s*[<\n]', webpage, 'data'),
493 item_id)
494
495 def _extract_graphql(self, data, url):
496 # Parses GraphQL queries containing videos and generates a playlist.
497 uploader_id = self._match_id(url)
498 csrf_token = data['config']['csrf_token']
499 rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
500
501 cursor = ''
502 for page_num in itertools.count(1):
503 variables = {
504 'first': 12,
505 'after': cursor,
506 }
507 variables.update(self._query_vars_for(data))
508 variables = json.dumps(variables)
509
510 if self._gis_tmpl:
511 gis_tmpls = [self._gis_tmpl]
512 else:
513 gis_tmpls = [
514 '%s' % rhx_gis,
515 '',
516 '%s:%s' % (rhx_gis, csrf_token),
517 '%s:%s:%s' % (rhx_gis, csrf_token, self.get_param('http_headers')['User-Agent']),
518 ]
519
520 # try all of the ways to generate a GIS query, and not only use the
521 # first one that works, but cache it for future requests
522 for gis_tmpl in gis_tmpls:
523 try:
524 json_data = self._download_json(
525 'https://www.instagram.com/graphql/query/', uploader_id,
526 'Downloading JSON page %d' % page_num, headers={
527 'X-Requested-With': 'XMLHttpRequest',
528 'X-Instagram-GIS': hashlib.md5(
529 ('%s:%s' % (gis_tmpl, variables)).encode('utf-8')).hexdigest(),
530 }, query={
531 'query_hash': self._QUERY_HASH,
532 'variables': variables,
533 })
534 media = self._parse_timeline_from(json_data)
535 self._gis_tmpl = gis_tmpl
536 break
537 except ExtractorError as e:
538 # if it's an error caused by a bad query, and there are
539 # more GIS templates to try, ignore it and keep trying
540 if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 403:
541 if gis_tmpl != gis_tmpls[-1]:
542 continue
543 raise
544
545 nodes = traverse_obj(media, ('edges', ..., 'node'), expected_type=dict) or []
546 if not nodes:
547 break
548 yield from self._extract_nodes(nodes)
549
550 has_next_page = traverse_obj(media, ('page_info', 'has_next_page'))
551 cursor = traverse_obj(media, ('page_info', 'end_cursor'), expected_type=str)
552 if not has_next_page or not cursor:
553 break
554
555 def _real_extract(self, url):
556 user_or_tag = self._match_id(url)
557 webpage = self._download_webpage(url, user_or_tag)
558 data = self._parse_graphql(webpage, user_or_tag)
559
560 self._set_cookie('instagram.com', 'ig_pr', '1')
561
562 return self.playlist_result(
563 self._extract_graphql(data, url), user_or_tag, user_or_tag)
564
565
566 class InstagramUserIE(InstagramPlaylistBaseIE):
567 _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
568 IE_DESC = 'Instagram user profile'
569 IE_NAME = 'instagram:user'
570 _TESTS = [{
571 'url': 'https://instagram.com/porsche',
572 'info_dict': {
573 'id': 'porsche',
574 'title': 'porsche',
575 },
576 'playlist_count': 5,
577 'params': {
578 'extract_flat': True,
579 'skip_download': True,
580 'playlistend': 5,
581 }
582 }]
583
584 _QUERY_HASH = '42323d64886122307be10013ad2dcc44',
585
586 @staticmethod
587 def _parse_timeline_from(data):
588 # extracts the media timeline data from a GraphQL result
589 return data['data']['user']['edge_owner_to_timeline_media']
590
591 @staticmethod
592 def _query_vars_for(data):
593 # returns a dictionary of variables to add to the timeline query based
594 # on the GraphQL of the original page
595 return {
596 'id': data['entry_data']['ProfilePage'][0]['graphql']['user']['id']
597 }
598
599
600 class InstagramTagIE(InstagramPlaylistBaseIE):
601 _VALID_URL = r'https?://(?:www\.)?instagram\.com/explore/tags/(?P<id>[^/]+)'
602 IE_DESC = 'Instagram hashtag search URLs'
603 IE_NAME = 'instagram:tag'
604 _TESTS = [{
605 'url': 'https://instagram.com/explore/tags/lolcats',
606 'info_dict': {
607 'id': 'lolcats',
608 'title': 'lolcats',
609 },
610 'playlist_count': 50,
611 'params': {
612 'extract_flat': True,
613 'skip_download': True,
614 'playlistend': 50,
615 }
616 }]
617
618 _QUERY_HASH = 'f92f56d47dc7a55b606908374b43a314',
619
620 @staticmethod
621 def _parse_timeline_from(data):
622 # extracts the media timeline data from a GraphQL result
623 return data['data']['hashtag']['edge_hashtag_to_media']
624
625 @staticmethod
626 def _query_vars_for(data):
627 # returns a dictionary of variables to add to the timeline query based
628 # on the GraphQL of the original page
629 return {
630 'tag_name':
631 data['entry_data']['TagPage'][0]['graphql']['hashtag']['name']
632 }
633
634
635 class InstagramStoryIE(InstagramBaseIE):
636 _VALID_URL = r'https?://(?:www\.)?instagram\.com/stories/(?P<user>[^/]+)/(?P<id>\d+)'
637 IE_NAME = 'instagram:story'
638
639 _TESTS = [{
640 'url': 'https://www.instagram.com/stories/highlights/18090946048123978/',
641 'info_dict': {
642 'id': '18090946048123978',
643 'title': 'Rare',
644 },
645 'playlist_mincount': 50
646 }]
647
648 def _real_extract(self, url):
649 username, story_id = self._match_valid_url(url).groups()
650 story_info = self._download_webpage(url, story_id)
651 user_info = self._search_json(r'"user":', story_info, 'user info', story_id, fatal=False)
652 if not user_info:
653 self.raise_login_required('This content is unreachable')
654 user_id = user_info.get('id')
655
656 story_info_url = user_id if username != 'highlights' else f'highlight:{story_id}'
657 videos = traverse_obj(self._download_json(
658 f'https://i.instagram.com/api/v1/feed/reels_media/?reel_ids={story_info_url}',
659 story_id, errnote=False, fatal=False, headers={
660 'X-IG-App-ID': 936619743392459,
661 'X-ASBD-ID': 198387,
662 'X-IG-WWW-Claim': 0,
663 }), 'reels')
664 if not videos:
665 self.raise_login_required('You need to log in to access this content')
666
667 full_name = traverse_obj(videos, (f'highlight:{story_id}', 'user', 'full_name'), (str(user_id), 'user', 'full_name'))
668 story_title = traverse_obj(videos, (f'highlight:{story_id}', 'title'))
669 if not story_title:
670 story_title = f'Story by {username}'
671
672 highlights = traverse_obj(videos, (f'highlight:{story_id}', 'items'), (str(user_id), 'items'))
673 info_data = []
674 for highlight in highlights:
675 highlight_data = self._extract_product(highlight)
676 if highlight_data.get('formats'):
677 info_data.append({
678 **highlight_data,
679 'uploader': full_name,
680 'uploader_id': user_id,
681 })
682 return self.playlist_result(info_data, playlist_id=story_id, playlist_title=story_title)