]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/instagram.py
[extractor/instagram] Fix bugs in 7d3b98be4c4567b985ba7d7b17057e930457edc9 (#4701)
[yt-dlp.git] / yt_dlp / extractor / instagram.py
1 import hashlib
2 import itertools
3 import json
4 import re
5 import time
6 import urllib.error
7
8 from .common import InfoExtractor
9 from ..utils import (
10 ExtractorError,
11 decode_base_n,
12 encode_base_n,
13 float_or_none,
14 format_field,
15 get_element_by_attribute,
16 int_or_none,
17 lowercase_escape,
18 str_or_none,
19 str_to_int,
20 traverse_obj,
21 url_or_none,
22 urlencode_postdata,
23 )
24
25 _ENCODING_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
26
27
28 def _pk_to_id(id):
29 """Source: https://stackoverflow.com/questions/24437823/getting-instagram-post-url-from-media-id"""
30 return encode_base_n(int(id.split('_')[0]), table=_ENCODING_CHARS)
31
32
33 def _id_to_pk(shortcode):
34 """Covert a shortcode to a numeric value"""
35 return decode_base_n(shortcode[:11], table=_ENCODING_CHARS)
36
37
38 class InstagramBaseIE(InfoExtractor):
39 _NETRC_MACHINE = 'instagram'
40 _IS_LOGGED_IN = False
41
42 _API_BASE_URL = 'https://i.instagram.com/api/v1'
43 _LOGIN_URL = 'https://www.instagram.com/accounts/login'
44 _API_HEADERS = {
45 'X-IG-App-ID': '936619743392459',
46 'X-ASBD-ID': '198387',
47 'X-IG-WWW-Claim': '0',
48 'Origin': 'https://www.instagram.com',
49 'Accept': '*/*',
50 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36',
51 }
52
53 def _perform_login(self, username, password):
54 if self._IS_LOGGED_IN:
55 return
56
57 login_webpage = self._download_webpage(
58 self._LOGIN_URL, None, note='Downloading login webpage', errnote='Failed to download login webpage')
59
60 shared_data = self._parse_json(self._search_regex(
61 r'window\._sharedData\s*=\s*({.+?});', login_webpage, 'shared data', default='{}'), None)
62
63 login = self._download_json(
64 f'{self._LOGIN_URL}/ajax/', None, note='Logging in', headers={
65 **self._API_HEADERS,
66 'X-Requested-With': 'XMLHttpRequest',
67 'X-CSRFToken': shared_data['config']['csrf_token'],
68 'X-Instagram-AJAX': shared_data['rollout_hash'],
69 'Referer': 'https://www.instagram.com/',
70 }, data=urlencode_postdata({
71 'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{int(time.time())}:{password}',
72 'username': username,
73 'queryParams': '{}',
74 'optIntoOneTap': 'false',
75 'stopDeletionNonce': '',
76 'trustedDeviceRecords': '{}',
77 }))
78
79 if not login.get('authenticated'):
80 if login.get('message'):
81 raise ExtractorError(f'Unable to login: {login["message"]}')
82 elif login.get('user'):
83 raise ExtractorError('Unable to login: Sorry, your password was incorrect. Please double-check your password.', expected=True)
84 elif login.get('user') is False:
85 raise ExtractorError('Unable to login: The username you entered doesn\'t belong to an account. Please check your username and try again.', expected=True)
86 raise ExtractorError('Unable to login')
87 InstagramBaseIE._IS_LOGGED_IN = True
88
89 def _get_count(self, media, kind, *keys):
90 return traverse_obj(
91 media, (kind, 'count'), *((f'edge_media_{key}', 'count') for key in keys),
92 expected_type=int_or_none)
93
94 def _get_dimension(self, name, media, webpage=None):
95 return (
96 traverse_obj(media, ('dimensions', name), expected_type=int_or_none)
97 or int_or_none(self._html_search_meta(
98 (f'og:video:{name}', f'video:{name}'), webpage or '', default=None)))
99
100 def _extract_nodes(self, nodes, is_direct=False):
101 for idx, node in enumerate(nodes, start=1):
102 if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
103 continue
104
105 video_id = node.get('shortcode')
106
107 if is_direct:
108 info = {
109 'id': video_id or node['id'],
110 'url': node.get('video_url'),
111 'width': self._get_dimension('width', node),
112 'height': self._get_dimension('height', node),
113 'http_headers': {
114 'Referer': 'https://www.instagram.com/',
115 }
116 }
117 elif not video_id:
118 continue
119 else:
120 info = {
121 '_type': 'url',
122 'ie_key': 'Instagram',
123 'id': video_id,
124 'url': f'https://instagram.com/p/{video_id}',
125 }
126
127 yield {
128 **info,
129 'title': node.get('title') or (f'Video {idx}' if is_direct else None),
130 'description': traverse_obj(
131 node, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str),
132 'thumbnail': traverse_obj(
133 node, 'display_url', 'thumbnail_src', 'display_src', expected_type=url_or_none),
134 'duration': float_or_none(node.get('video_duration')),
135 'timestamp': int_or_none(node.get('taken_at_timestamp')),
136 'view_count': int_or_none(node.get('video_view_count')),
137 'comment_count': self._get_count(node, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'),
138 'like_count': self._get_count(node, 'likes', 'preview_like'),
139 }
140
141 def _extract_product_media(self, product_media):
142 media_id = product_media.get('code') or _pk_to_id(product_media.get('pk'))
143 vcodec = product_media.get('video_codec')
144 dash_manifest_raw = product_media.get('video_dash_manifest')
145 videos_list = product_media.get('video_versions')
146 if not (dash_manifest_raw or videos_list):
147 return {}
148
149 formats = [{
150 'format_id': format.get('id'),
151 'url': format.get('url'),
152 'width': format.get('width'),
153 'height': format.get('height'),
154 'vcodec': vcodec,
155 } for format in videos_list or []]
156 if dash_manifest_raw:
157 formats.extend(self._parse_mpd_formats(self._parse_xml(dash_manifest_raw, media_id), mpd_id='dash'))
158 self._sort_formats(formats)
159
160 thumbnails = [{
161 'url': thumbnail.get('url'),
162 'width': thumbnail.get('width'),
163 'height': thumbnail.get('height')
164 } for thumbnail in traverse_obj(product_media, ('image_versions2', 'candidates')) or []]
165 return {
166 'id': media_id,
167 'duration': float_or_none(product_media.get('video_duration')),
168 'formats': formats,
169 'thumbnails': thumbnails
170 }
171
172 def _extract_product(self, product_info):
173 if isinstance(product_info, list):
174 product_info = product_info[0]
175
176 comment_data = traverse_obj(product_info, ('edge_media_to_parent_comment', 'edges'))
177 comments = [{
178 'author': traverse_obj(comment_dict, ('node', 'owner', 'username')),
179 'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id')),
180 'id': traverse_obj(comment_dict, ('node', 'id')),
181 'text': traverse_obj(comment_dict, ('node', 'text')),
182 'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), expected_type=int_or_none),
183 } for comment_dict in comment_data] if comment_data else None
184
185 user_info = product_info.get('user') or {}
186 info_dict = {
187 'id': product_info.get('code') or _pk_to_id(product_info.get('pk')),
188 'title': product_info.get('title') or f'Video by {user_info.get("username")}',
189 'description': traverse_obj(product_info, ('caption', 'text'), expected_type=str_or_none),
190 'timestamp': int_or_none(product_info.get('taken_at')),
191 'channel': user_info.get('username'),
192 'uploader': user_info.get('full_name'),
193 'uploader_id': str_or_none(user_info.get('pk')),
194 'view_count': int_or_none(product_info.get('view_count')),
195 'like_count': int_or_none(product_info.get('like_count')),
196 'comment_count': int_or_none(product_info.get('comment_count')),
197 'comments': comments,
198 'http_headers': {
199 'Referer': 'https://www.instagram.com/',
200 }
201 }
202 carousel_media = product_info.get('carousel_media')
203 if carousel_media:
204 return {
205 '_type': 'playlist',
206 **info_dict,
207 'title': f'Post by {user_info.get("username")}',
208 'entries': [{
209 **info_dict,
210 **self._extract_product_media(product_media),
211 } for product_media in carousel_media],
212 }
213
214 return {
215 **info_dict,
216 **self._extract_product_media(product_info)
217 }
218
219
220 class InstagramIOSIE(InfoExtractor):
221 IE_DESC = 'IOS instagram:// URL'
222 _VALID_URL = r'instagram://media\?id=(?P<id>[\d_]+)'
223 _TESTS = [{
224 'url': 'instagram://media?id=482584233761418119',
225 'md5': '0d2da106a9d2631273e192b372806516',
226 'info_dict': {
227 'id': 'aye83DjauH',
228 'ext': 'mp4',
229 'title': 'Video by naomipq',
230 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
231 'thumbnail': r're:^https?://.*\.jpg',
232 'duration': 0,
233 'timestamp': 1371748545,
234 'upload_date': '20130620',
235 'uploader_id': 'naomipq',
236 'uploader': 'B E A U T Y F O R A S H E S',
237 'like_count': int,
238 'comment_count': int,
239 'comments': list,
240 },
241 'add_ie': ['Instagram']
242 }]
243
244 def _real_extract(self, url):
245 video_id = _pk_to_id(self._match_id(url))
246 return self.url_result(f'http://instagram.com/tv/{video_id}', InstagramIE, video_id)
247
248
249 class InstagramIE(InstagramBaseIE):
250 _VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com(?:/[^/]+)?/(?:p|tv|reel)/(?P<id>[^/?#&]+))'
251 _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1']
252 _TESTS = [{
253 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
254 'md5': '0d2da106a9d2631273e192b372806516',
255 'info_dict': {
256 'id': 'aye83DjauH',
257 'ext': 'mp4',
258 'title': 'Video by naomipq',
259 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
260 'thumbnail': r're:^https?://.*\.jpg',
261 'duration': 0,
262 'timestamp': 1371748545,
263 'upload_date': '20130620',
264 'uploader_id': '2815873',
265 'uploader': 'B E A U T Y F O R A S H E S',
266 'channel': 'naomipq',
267 'like_count': int,
268 'comment_count': int,
269 'comments': list,
270 },
271 }, {
272 # missing description
273 'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
274 'info_dict': {
275 'id': 'BA-pQFBG8HZ',
276 'ext': 'mp4',
277 'title': 'Video by britneyspears',
278 'thumbnail': r're:^https?://.*\.jpg',
279 'duration': 0,
280 'timestamp': 1453760977,
281 'upload_date': '20160125',
282 'uploader_id': '12246775',
283 'uploader': 'Britney Spears',
284 'channel': 'britneyspears',
285 'like_count': int,
286 'comment_count': int,
287 'comments': list,
288 },
289 'params': {
290 'skip_download': True,
291 },
292 }, {
293 # multi video post
294 'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
295 'playlist': [{
296 'info_dict': {
297 'id': 'BQ0dSaohpPW',
298 'ext': 'mp4',
299 'title': 'Video 1',
300 },
301 }, {
302 'info_dict': {
303 'id': 'BQ0dTpOhuHT',
304 'ext': 'mp4',
305 'title': 'Video 2',
306 },
307 }, {
308 'info_dict': {
309 'id': 'BQ0dT7RBFeF',
310 'ext': 'mp4',
311 'title': 'Video 3',
312 },
313 }],
314 'info_dict': {
315 'id': 'BQ0eAlwhDrw',
316 'title': 'Post by instagram',
317 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
318 },
319 }, {
320 # IGTV
321 'url': 'https://www.instagram.com/tv/BkfuX9UB-eK/',
322 'info_dict': {
323 'id': 'BkfuX9UB-eK',
324 'ext': 'mp4',
325 'title': 'Fingerboarding Tricks with @cass.fb',
326 'thumbnail': r're:^https?://.*\.jpg',
327 'duration': 53.83,
328 'timestamp': 1530032919,
329 'upload_date': '20180626',
330 'uploader_id': '25025320',
331 'uploader': 'Instagram',
332 'channel': 'instagram',
333 'like_count': int,
334 'comment_count': int,
335 'comments': list,
336 'description': 'Meet Cass Hirst (@cass.fb), a fingerboarding pro who can perform tiny ollies and kickflips while blindfolded.',
337 }
338 }, {
339 'url': 'https://instagram.com/p/-Cmh1cukG2/',
340 'only_matching': True,
341 }, {
342 'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
343 'only_matching': True,
344 }, {
345 'url': 'https://www.instagram.com/tv/aye83DjauH/',
346 'only_matching': True,
347 }, {
348 'url': 'https://www.instagram.com/reel/CDUMkliABpa/',
349 'only_matching': True,
350 }, {
351 'url': 'https://www.instagram.com/marvelskies.fc/reel/CWqAgUZgCku/',
352 'only_matching': True,
353 }]
354
355 @classmethod
356 def _extract_embed_urls(cls, url, webpage):
357 res = tuple(super()._extract_embed_urls(url, webpage))
358 if res:
359 return res
360
361 mobj = re.search(r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1',
362 get_element_by_attribute('class', 'instagram-media', webpage) or '')
363 if mobj:
364 return [mobj.group('link')]
365
366 def _real_extract(self, url):
367 video_id, url = self._match_valid_url(url).group('id', 'url')
368 media, webpage = {}, ''
369
370 api_check = self._download_json(
371 f'{self._API_BASE_URL}/web/get_ruling_for_content/?content_type=MEDIA&target_id={_id_to_pk(video_id)}',
372 video_id, headers=self._API_HEADERS, fatal=False, note='Setting up session', errnote=False) or {}
373 csrf_token = self._get_cookies('https://www.instagram.com').get('csrftoken')
374
375 if not csrf_token:
376 self.report_warning('No csrf token set by Instagram API', video_id)
377 elif api_check.get('status') != 'ok':
378 self.report_warning('Instagram API is not granting access', video_id)
379 else:
380 if self._get_cookies(url).get('sessionid'):
381 media.update(traverse_obj(self._download_json(
382 f'{self._API_BASE_URL}/media/{_id_to_pk(video_id)}/info/', video_id,
383 fatal=False, note='Downloading video info', headers={
384 **self._API_HEADERS,
385 'X-CSRFToken': csrf_token.value,
386 }), ('items', 0)) or {})
387 if media:
388 return self._extract_product(media)
389
390 variables = {
391 'shortcode': video_id,
392 'child_comment_count': 3,
393 'fetch_comment_count': 40,
394 'parent_comment_count': 24,
395 'has_threaded_comments': True,
396 }
397 general_info = self._download_json(
398 'https://www.instagram.com/graphql/query/', video_id, fatal=False,
399 headers={
400 **self._API_HEADERS,
401 'X-CSRFToken': csrf_token.value,
402 'X-Requested-With': 'XMLHttpRequest',
403 'Referer': url,
404 }, query={
405 'query_hash': '9f8827793ef34641b2fb195d4d41151c',
406 'variables': json.dumps(variables, separators=(',', ':')),
407 })
408 media.update(traverse_obj(general_info, ('data', 'shortcode_media')) or {})
409
410 if not media:
411 self.report_warning('General metadata extraction failed (some metadata might be missing).', video_id)
412 webpage, urlh = self._download_webpage_handle(url, video_id)
413 shared_data = self._search_json(
414 r'window\._sharedData\s*=', webpage, 'shared data', video_id, fatal=False) or {}
415
416 if shared_data and self._LOGIN_URL not in urlh.geturl():
417 media.update(traverse_obj(
418 shared_data, ('entry_data', 'PostPage', 0, 'graphql', 'shortcode_media'),
419 ('entry_data', 'PostPage', 0, 'media'), expected_type=dict) or {})
420 else:
421 self.report_warning('Main webpage is locked behind the login page. Retrying with embed webpage')
422 webpage = self._download_webpage(
423 f'{url}/embed/', video_id, note='Downloading embed webpage', fatal=False)
424 additional_data = self._search_json(
425 r'window\.__additionalDataLoaded\s*\(\s*[^,]+,\s*', webpage, 'additional data', video_id, fatal=False)
426 if not additional_data:
427 self.raise_login_required('Requested content is not available, rate-limit reached or login required')
428
429 product_item = traverse_obj(additional_data, ('items', 0), expected_type=dict)
430 if product_item:
431 media.update(product_item)
432 return self._extract_product(media)
433
434 media.update(traverse_obj(
435 additional_data, ('graphql', 'shortcode_media'), 'shortcode_media', expected_type=dict) or {})
436
437 username = traverse_obj(media, ('owner', 'username')) or self._search_regex(
438 r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"', webpage, 'username', fatal=False)
439
440 description = (
441 traverse_obj(media, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str)
442 or media.get('caption'))
443 if not description:
444 description = self._search_regex(
445 r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
446 if description is not None:
447 description = lowercase_escape(description)
448
449 video_url = media.get('video_url')
450 if not video_url:
451 nodes = traverse_obj(media, ('edge_sidecar_to_children', 'edges', ..., 'node'), expected_type=dict) or []
452 if nodes:
453 return self.playlist_result(
454 self._extract_nodes(nodes, True), video_id,
455 format_field(username, None, 'Post by %s'), description)
456
457 video_url = self._og_search_video_url(webpage, secure=False)
458
459 formats = [{
460 'url': video_url,
461 'width': self._get_dimension('width', media, webpage),
462 'height': self._get_dimension('height', media, webpage),
463 }]
464 dash = traverse_obj(media, ('dash_info', 'video_dash_manifest'))
465 if dash:
466 formats.extend(self._parse_mpd_formats(self._parse_xml(dash, video_id), mpd_id='dash'))
467 self._sort_formats(formats)
468
469 comment_data = traverse_obj(media, ('edge_media_to_parent_comment', 'edges'))
470 comments = [{
471 'author': traverse_obj(comment_dict, ('node', 'owner', 'username')),
472 'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id')),
473 'id': traverse_obj(comment_dict, ('node', 'id')),
474 'text': traverse_obj(comment_dict, ('node', 'text')),
475 'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), expected_type=int_or_none),
476 } for comment_dict in comment_data] if comment_data else None
477
478 display_resources = (
479 media.get('display_resources')
480 or [{'src': media.get(key)} for key in ('display_src', 'display_url')]
481 or [{'src': self._og_search_thumbnail(webpage)}])
482 thumbnails = [{
483 'url': thumbnail['src'],
484 'width': thumbnail.get('config_width'),
485 'height': thumbnail.get('config_height'),
486 } for thumbnail in display_resources if thumbnail.get('src')]
487
488 return {
489 'id': video_id,
490 'formats': formats,
491 'title': media.get('title') or 'Video by %s' % username,
492 'description': description,
493 'duration': float_or_none(media.get('video_duration')),
494 'timestamp': traverse_obj(media, 'taken_at_timestamp', 'date', expected_type=int_or_none),
495 'uploader_id': traverse_obj(media, ('owner', 'id')),
496 'uploader': traverse_obj(media, ('owner', 'full_name')),
497 'channel': username,
498 'like_count': self._get_count(media, 'likes', 'preview_like') or str_to_int(self._search_regex(
499 r'data-log-event="likeCountClick"[^>]*>[^\d]*([\d,\.]+)', webpage, 'like count', fatal=False)),
500 'comment_count': self._get_count(media, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'),
501 'comments': comments,
502 'thumbnails': thumbnails,
503 'http_headers': {
504 'Referer': 'https://www.instagram.com/',
505 }
506 }
507
508
509 class InstagramPlaylistBaseIE(InstagramBaseIE):
510 _gis_tmpl = None # used to cache GIS request type
511
512 def _parse_graphql(self, webpage, item_id):
513 # Reads a webpage and returns its GraphQL data.
514 return self._parse_json(
515 self._search_regex(
516 r'sharedData\s*=\s*({.+?})\s*;\s*[<\n]', webpage, 'data'),
517 item_id)
518
519 def _extract_graphql(self, data, url):
520 # Parses GraphQL queries containing videos and generates a playlist.
521 uploader_id = self._match_id(url)
522 csrf_token = data['config']['csrf_token']
523 rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
524
525 cursor = ''
526 for page_num in itertools.count(1):
527 variables = {
528 'first': 12,
529 'after': cursor,
530 }
531 variables.update(self._query_vars_for(data))
532 variables = json.dumps(variables)
533
534 if self._gis_tmpl:
535 gis_tmpls = [self._gis_tmpl]
536 else:
537 gis_tmpls = [
538 '%s' % rhx_gis,
539 '',
540 '%s:%s' % (rhx_gis, csrf_token),
541 '%s:%s:%s' % (rhx_gis, csrf_token, self.get_param('http_headers')['User-Agent']),
542 ]
543
544 # try all of the ways to generate a GIS query, and not only use the
545 # first one that works, but cache it for future requests
546 for gis_tmpl in gis_tmpls:
547 try:
548 json_data = self._download_json(
549 'https://www.instagram.com/graphql/query/', uploader_id,
550 'Downloading JSON page %d' % page_num, headers={
551 'X-Requested-With': 'XMLHttpRequest',
552 'X-Instagram-GIS': hashlib.md5(
553 ('%s:%s' % (gis_tmpl, variables)).encode('utf-8')).hexdigest(),
554 }, query={
555 'query_hash': self._QUERY_HASH,
556 'variables': variables,
557 })
558 media = self._parse_timeline_from(json_data)
559 self._gis_tmpl = gis_tmpl
560 break
561 except ExtractorError as e:
562 # if it's an error caused by a bad query, and there are
563 # more GIS templates to try, ignore it and keep trying
564 if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 403:
565 if gis_tmpl != gis_tmpls[-1]:
566 continue
567 raise
568
569 nodes = traverse_obj(media, ('edges', ..., 'node'), expected_type=dict) or []
570 if not nodes:
571 break
572 yield from self._extract_nodes(nodes)
573
574 has_next_page = traverse_obj(media, ('page_info', 'has_next_page'))
575 cursor = traverse_obj(media, ('page_info', 'end_cursor'), expected_type=str)
576 if not has_next_page or not cursor:
577 break
578
579 def _real_extract(self, url):
580 user_or_tag = self._match_id(url)
581 webpage = self._download_webpage(url, user_or_tag)
582 data = self._parse_graphql(webpage, user_or_tag)
583
584 self._set_cookie('instagram.com', 'ig_pr', '1')
585
586 return self.playlist_result(
587 self._extract_graphql(data, url), user_or_tag, user_or_tag)
588
589
590 class InstagramUserIE(InstagramPlaylistBaseIE):
591 _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
592 IE_DESC = 'Instagram user profile'
593 IE_NAME = 'instagram:user'
594 _TESTS = [{
595 'url': 'https://instagram.com/porsche',
596 'info_dict': {
597 'id': 'porsche',
598 'title': 'porsche',
599 },
600 'playlist_count': 5,
601 'params': {
602 'extract_flat': True,
603 'skip_download': True,
604 'playlistend': 5,
605 }
606 }]
607
608 _QUERY_HASH = '42323d64886122307be10013ad2dcc44',
609
610 @staticmethod
611 def _parse_timeline_from(data):
612 # extracts the media timeline data from a GraphQL result
613 return data['data']['user']['edge_owner_to_timeline_media']
614
615 @staticmethod
616 def _query_vars_for(data):
617 # returns a dictionary of variables to add to the timeline query based
618 # on the GraphQL of the original page
619 return {
620 'id': data['entry_data']['ProfilePage'][0]['graphql']['user']['id']
621 }
622
623
624 class InstagramTagIE(InstagramPlaylistBaseIE):
625 _VALID_URL = r'https?://(?:www\.)?instagram\.com/explore/tags/(?P<id>[^/]+)'
626 IE_DESC = 'Instagram hashtag search URLs'
627 IE_NAME = 'instagram:tag'
628 _TESTS = [{
629 'url': 'https://instagram.com/explore/tags/lolcats',
630 'info_dict': {
631 'id': 'lolcats',
632 'title': 'lolcats',
633 },
634 'playlist_count': 50,
635 'params': {
636 'extract_flat': True,
637 'skip_download': True,
638 'playlistend': 50,
639 }
640 }]
641
642 _QUERY_HASH = 'f92f56d47dc7a55b606908374b43a314',
643
644 @staticmethod
645 def _parse_timeline_from(data):
646 # extracts the media timeline data from a GraphQL result
647 return data['data']['hashtag']['edge_hashtag_to_media']
648
649 @staticmethod
650 def _query_vars_for(data):
651 # returns a dictionary of variables to add to the timeline query based
652 # on the GraphQL of the original page
653 return {
654 'tag_name':
655 data['entry_data']['TagPage'][0]['graphql']['hashtag']['name']
656 }
657
658
659 class InstagramStoryIE(InstagramBaseIE):
660 _VALID_URL = r'https?://(?:www\.)?instagram\.com/stories/(?P<user>[^/]+)/(?P<id>\d+)'
661 IE_NAME = 'instagram:story'
662
663 _TESTS = [{
664 'url': 'https://www.instagram.com/stories/highlights/18090946048123978/',
665 'info_dict': {
666 'id': '18090946048123978',
667 'title': 'Rare',
668 },
669 'playlist_mincount': 50
670 }]
671
672 def _real_extract(self, url):
673 username, story_id = self._match_valid_url(url).groups()
674 story_info = self._download_webpage(url, story_id)
675 user_info = self._search_json(r'"user":', story_info, 'user info', story_id, fatal=False)
676 if not user_info:
677 self.raise_login_required('This content is unreachable')
678 user_id = user_info.get('id')
679
680 story_info_url = user_id if username != 'highlights' else f'highlight:{story_id}'
681 videos = traverse_obj(self._download_json(
682 f'{self._API_BASE_URL}/feed/reels_media/?reel_ids={story_info_url}',
683 story_id, errnote=False, fatal=False, headers=self._API_HEADERS), 'reels')
684 if not videos:
685 self.raise_login_required('You need to log in to access this content')
686
687 full_name = traverse_obj(videos, (f'highlight:{story_id}', 'user', 'full_name'), (str(user_id), 'user', 'full_name'))
688 story_title = traverse_obj(videos, (f'highlight:{story_id}', 'title'))
689 if not story_title:
690 story_title = f'Story by {username}'
691
692 highlights = traverse_obj(videos, (f'highlight:{story_id}', 'items'), (str(user_id), 'items'))
693 info_data = []
694 for highlight in highlights:
695 highlight_data = self._extract_product(highlight)
696 if highlight_data.get('formats'):
697 info_data.append({
698 **highlight_data,
699 'uploader': full_name,
700 'uploader_id': user_id,
701 })
702 return self.playlist_result(info_data, playlist_id=story_id, playlist_title=story_title)