]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/instagram.py
[ie/matchtv] Fix extractor (#10190)
[yt-dlp.git] / yt_dlp / extractor / instagram.py
1 import hashlib
2 import itertools
3 import json
4 import re
5 import time
6
7 from .common import InfoExtractor
8 from ..networking.exceptions import HTTPError
9 from ..utils import (
10 ExtractorError,
11 decode_base_n,
12 encode_base_n,
13 filter_dict,
14 float_or_none,
15 format_field,
16 get_element_by_attribute,
17 int_or_none,
18 lowercase_escape,
19 str_or_none,
20 str_to_int,
21 traverse_obj,
22 url_or_none,
23 urlencode_postdata,
24 )
25
26 _ENCODING_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
27
28
29 def _pk_to_id(media_id):
30 """Source: https://stackoverflow.com/questions/24437823/getting-instagram-post-url-from-media-id"""
31 return encode_base_n(int(media_id.split('_')[0]), table=_ENCODING_CHARS)
32
33
34 def _id_to_pk(shortcode):
35 """Covert a shortcode to a numeric value"""
36 return decode_base_n(shortcode[:11], table=_ENCODING_CHARS)
37
38
39 class InstagramBaseIE(InfoExtractor):
40 _NETRC_MACHINE = 'instagram'
41 _IS_LOGGED_IN = False
42
43 _API_BASE_URL = 'https://i.instagram.com/api/v1'
44 _LOGIN_URL = 'https://www.instagram.com/accounts/login'
45 _API_HEADERS = {
46 'X-IG-App-ID': '936619743392459',
47 'X-ASBD-ID': '198387',
48 'X-IG-WWW-Claim': '0',
49 'Origin': 'https://www.instagram.com',
50 'Accept': '*/*',
51 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36',
52 }
53
54 def _perform_login(self, username, password):
55 if self._IS_LOGGED_IN:
56 return
57
58 login_webpage = self._download_webpage(
59 self._LOGIN_URL, None, note='Downloading login webpage', errnote='Failed to download login webpage')
60
61 shared_data = self._parse_json(self._search_regex(
62 r'window\._sharedData\s*=\s*({.+?});', login_webpage, 'shared data', default='{}'), None)
63
64 login = self._download_json(
65 f'{self._LOGIN_URL}/ajax/', None, note='Logging in', headers={
66 **self._API_HEADERS,
67 'X-Requested-With': 'XMLHttpRequest',
68 'X-CSRFToken': shared_data['config']['csrf_token'],
69 'X-Instagram-AJAX': shared_data['rollout_hash'],
70 'Referer': 'https://www.instagram.com/',
71 }, data=urlencode_postdata({
72 'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{int(time.time())}:{password}',
73 'username': username,
74 'queryParams': '{}',
75 'optIntoOneTap': 'false',
76 'stopDeletionNonce': '',
77 'trustedDeviceRecords': '{}',
78 }))
79
80 if not login.get('authenticated'):
81 if login.get('message'):
82 raise ExtractorError(f'Unable to login: {login["message"]}')
83 elif login.get('user'):
84 raise ExtractorError('Unable to login: Sorry, your password was incorrect. Please double-check your password.', expected=True)
85 elif login.get('user') is False:
86 raise ExtractorError('Unable to login: The username you entered doesn\'t belong to an account. Please check your username and try again.', expected=True)
87 raise ExtractorError('Unable to login')
88 InstagramBaseIE._IS_LOGGED_IN = True
89
90 def _get_count(self, media, kind, *keys):
91 return traverse_obj(
92 media, (kind, 'count'), *((f'edge_media_{key}', 'count') for key in keys),
93 expected_type=int_or_none)
94
95 def _get_dimension(self, name, media, webpage=None):
96 return (
97 traverse_obj(media, ('dimensions', name), expected_type=int_or_none)
98 or int_or_none(self._html_search_meta(
99 (f'og:video:{name}', f'video:{name}'), webpage or '', default=None)))
100
101 def _extract_nodes(self, nodes, is_direct=False):
102 for idx, node in enumerate(nodes, start=1):
103 if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
104 continue
105
106 video_id = node.get('shortcode')
107
108 if is_direct:
109 info = {
110 'id': video_id or node['id'],
111 'url': node.get('video_url'),
112 'width': self._get_dimension('width', node),
113 'height': self._get_dimension('height', node),
114 'http_headers': {
115 'Referer': 'https://www.instagram.com/',
116 },
117 }
118 elif not video_id:
119 continue
120 else:
121 info = {
122 '_type': 'url',
123 'ie_key': 'Instagram',
124 'id': video_id,
125 'url': f'https://instagram.com/p/{video_id}',
126 }
127
128 yield {
129 **info,
130 'title': node.get('title') or (f'Video {idx}' if is_direct else None),
131 'description': traverse_obj(
132 node, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str),
133 'thumbnail': traverse_obj(
134 node, 'display_url', 'thumbnail_src', 'display_src', expected_type=url_or_none),
135 'duration': float_or_none(node.get('video_duration')),
136 'timestamp': int_or_none(node.get('taken_at_timestamp')),
137 'view_count': int_or_none(node.get('video_view_count')),
138 'comment_count': self._get_count(node, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'),
139 'like_count': self._get_count(node, 'likes', 'preview_like'),
140 }
141
142 def _extract_product_media(self, product_media):
143 media_id = product_media.get('code') or _pk_to_id(product_media.get('pk'))
144 vcodec = product_media.get('video_codec')
145 dash_manifest_raw = product_media.get('video_dash_manifest')
146 videos_list = product_media.get('video_versions')
147 if not (dash_manifest_raw or videos_list):
148 return {}
149
150 formats = [{
151 'format_id': fmt.get('id'),
152 'url': fmt.get('url'),
153 'width': fmt.get('width'),
154 'height': fmt.get('height'),
155 'vcodec': vcodec,
156 } for fmt in videos_list or []]
157 if dash_manifest_raw:
158 formats.extend(self._parse_mpd_formats(self._parse_xml(dash_manifest_raw, media_id), mpd_id='dash'))
159
160 thumbnails = [{
161 'url': thumbnail.get('url'),
162 'width': thumbnail.get('width'),
163 'height': thumbnail.get('height'),
164 } for thumbnail in traverse_obj(product_media, ('image_versions2', 'candidates')) or []]
165 return {
166 'id': media_id,
167 'duration': float_or_none(product_media.get('video_duration')),
168 'formats': formats,
169 'thumbnails': thumbnails,
170 }
171
172 def _extract_product(self, product_info):
173 if isinstance(product_info, list):
174 product_info = product_info[0]
175
176 user_info = product_info.get('user') or {}
177 info_dict = {
178 'id': _pk_to_id(traverse_obj(product_info, 'pk', 'id', expected_type=str_or_none)[:19]),
179 'title': product_info.get('title') or f'Video by {user_info.get("username")}',
180 'description': traverse_obj(product_info, ('caption', 'text'), expected_type=str_or_none),
181 'timestamp': int_or_none(product_info.get('taken_at')),
182 'channel': user_info.get('username'),
183 'uploader': user_info.get('full_name'),
184 'uploader_id': str_or_none(user_info.get('pk')),
185 'view_count': int_or_none(product_info.get('view_count')),
186 'like_count': int_or_none(product_info.get('like_count')),
187 'comment_count': int_or_none(product_info.get('comment_count')),
188 '__post_extractor': self.extract_comments(_pk_to_id(product_info.get('pk'))),
189 'http_headers': {
190 'Referer': 'https://www.instagram.com/',
191 },
192 }
193 carousel_media = product_info.get('carousel_media')
194 if carousel_media:
195 return {
196 '_type': 'playlist',
197 **info_dict,
198 'title': f'Post by {user_info.get("username")}',
199 'entries': [{
200 **info_dict,
201 **self._extract_product_media(product_media),
202 } for product_media in carousel_media],
203 }
204
205 return {
206 **info_dict,
207 **self._extract_product_media(product_info),
208 }
209
210 def _get_comments(self, video_id):
211 comments_info = self._download_json(
212 f'{self._API_BASE_URL}/media/{_id_to_pk(video_id)}/comments/?can_support_threading=true&permalink_enabled=false', video_id,
213 fatal=False, errnote='Comments extraction failed', note='Downloading comments info', headers=self._API_HEADERS) or {}
214
215 comment_data = traverse_obj(comments_info, ('edge_media_to_parent_comment', 'edges'), 'comments')
216 for comment_dict in comment_data or []:
217 yield {
218 'author': traverse_obj(comment_dict, ('node', 'owner', 'username'), ('user', 'username')),
219 'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id'), ('user', 'pk')),
220 'author_thumbnail': traverse_obj(comment_dict, ('node', 'owner', 'profile_pic_url'), ('user', 'profile_pic_url'), expected_type=url_or_none),
221 'id': traverse_obj(comment_dict, ('node', 'id'), 'pk'),
222 'text': traverse_obj(comment_dict, ('node', 'text'), 'text'),
223 'like_count': traverse_obj(comment_dict, ('node', 'edge_liked_by', 'count'), 'comment_like_count', expected_type=int_or_none),
224 'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), 'created_at', expected_type=int_or_none),
225 }
226
227
228 class InstagramIOSIE(InfoExtractor):
229 IE_DESC = 'IOS instagram:// URL'
230 _VALID_URL = r'instagram://media\?id=(?P<id>[\d_]+)'
231 _TESTS = [{
232 'url': 'instagram://media?id=482584233761418119',
233 'md5': '0d2da106a9d2631273e192b372806516',
234 'info_dict': {
235 'id': 'aye83DjauH',
236 'ext': 'mp4',
237 'title': 'Video by naomipq',
238 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
239 'thumbnail': r're:^https?://.*\.jpg',
240 'duration': 0,
241 'timestamp': 1371748545,
242 'upload_date': '20130620',
243 'uploader_id': 'naomipq',
244 'uploader': 'B E A U T Y F O R A S H E S',
245 'like_count': int,
246 'comment_count': int,
247 'comments': list,
248 },
249 'add_ie': ['Instagram'],
250 }]
251
252 def _real_extract(self, url):
253 video_id = _pk_to_id(self._match_id(url))
254 return self.url_result(f'http://instagram.com/tv/{video_id}', InstagramIE, video_id)
255
256
257 class InstagramIE(InstagramBaseIE):
258 _VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com(?:/[^/]+)?/(?:p|tv|reels?(?!/audio/))/(?P<id>[^/?#&]+))'
259 _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1']
260 _TESTS = [{
261 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
262 'md5': '0d2da106a9d2631273e192b372806516',
263 'info_dict': {
264 'id': 'aye83DjauH',
265 'ext': 'mp4',
266 'title': 'Video by naomipq',
267 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
268 'thumbnail': r're:^https?://.*\.jpg',
269 'duration': 8.747,
270 'timestamp': 1371748545,
271 'upload_date': '20130620',
272 'uploader_id': '2815873',
273 'uploader': 'B E A U T Y F O R A S H E S',
274 'channel': 'naomipq',
275 'like_count': int,
276 'comment_count': int,
277 'comments': list,
278 },
279 'expected_warnings': [
280 'General metadata extraction failed',
281 'Main webpage is locked behind the login page',
282 ],
283 }, {
284 # reel
285 'url': 'https://www.instagram.com/reel/Chunk8-jurw/',
286 'md5': 'f6d8277f74515fa3ff9f5791426e42b1',
287 'info_dict': {
288 'id': 'Chunk8-jurw',
289 'ext': 'mp4',
290 'title': 'Video by instagram',
291 'description': 'md5:c9cde483606ed6f80fbe9283a6a2b290',
292 'thumbnail': r're:^https?://.*\.jpg',
293 'duration': 5.016,
294 'timestamp': 1661529231,
295 'upload_date': '20220826',
296 'uploader_id': '25025320',
297 'uploader': 'Instagram',
298 'channel': 'instagram',
299 'like_count': int,
300 'comment_count': int,
301 'comments': list,
302 },
303 'expected_warnings': [
304 'General metadata extraction failed',
305 'Main webpage is locked behind the login page',
306 ],
307 }, {
308 # multi video post
309 'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
310 'playlist': [{
311 'info_dict': {
312 'id': 'BQ0dSaohpPW',
313 'ext': 'mp4',
314 'title': 'Video 1',
315 'thumbnail': r're:^https?://.*\.jpg',
316 'view_count': int,
317 },
318 }, {
319 'info_dict': {
320 'id': 'BQ0dTpOhuHT',
321 'ext': 'mp4',
322 'title': 'Video 2',
323 'thumbnail': r're:^https?://.*\.jpg',
324 'view_count': int,
325 },
326 }, {
327 'info_dict': {
328 'id': 'BQ0dT7RBFeF',
329 'ext': 'mp4',
330 'title': 'Video 3',
331 'thumbnail': r're:^https?://.*\.jpg',
332 'view_count': int,
333 },
334 }],
335 'info_dict': {
336 'id': 'BQ0eAlwhDrw',
337 'title': 'Post by instagram',
338 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
339 },
340 'expected_warnings': [
341 'General metadata extraction failed',
342 'Main webpage is locked behind the login page',
343 ],
344 }, {
345 # IGTV
346 'url': 'https://www.instagram.com/tv/BkfuX9UB-eK/',
347 'info_dict': {
348 'id': 'BkfuX9UB-eK',
349 'ext': 'mp4',
350 'title': 'Fingerboarding Tricks with @cass.fb',
351 'thumbnail': r're:^https?://.*\.jpg',
352 'duration': 53.83,
353 'timestamp': 1530032919,
354 'upload_date': '20180626',
355 'uploader_id': '25025320',
356 'uploader': 'Instagram',
357 'channel': 'instagram',
358 'like_count': int,
359 'comment_count': int,
360 'comments': list,
361 'description': 'Meet Cass Hirst (@cass.fb), a fingerboarding pro who can perform tiny ollies and kickflips while blindfolded.',
362 },
363 'expected_warnings': [
364 'General metadata extraction failed',
365 'Main webpage is locked behind the login page',
366 ],
367 }, {
368 'url': 'https://instagram.com/p/-Cmh1cukG2/',
369 'only_matching': True,
370 }, {
371 'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
372 'only_matching': True,
373 }, {
374 'url': 'https://www.instagram.com/tv/aye83DjauH/',
375 'only_matching': True,
376 }, {
377 'url': 'https://www.instagram.com/reel/CDUMkliABpa/',
378 'only_matching': True,
379 }, {
380 'url': 'https://www.instagram.com/marvelskies.fc/reel/CWqAgUZgCku/',
381 'only_matching': True,
382 }, {
383 'url': 'https://www.instagram.com/reels/Cop84x6u7CP/',
384 'only_matching': True,
385 }]
386
387 @classmethod
388 def _extract_embed_urls(cls, url, webpage):
389 res = tuple(super()._extract_embed_urls(url, webpage))
390 if res:
391 return res
392
393 mobj = re.search(r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1',
394 get_element_by_attribute('class', 'instagram-media', webpage) or '')
395 if mobj:
396 return [mobj.group('link')]
397
398 def _real_extract(self, url):
399 video_id, url = self._match_valid_url(url).group('id', 'url')
400 media, webpage = {}, ''
401
402 if self._get_cookies(url).get('sessionid'):
403 info = traverse_obj(self._download_json(
404 f'{self._API_BASE_URL}/media/{_id_to_pk(video_id)}/info/', video_id,
405 fatal=False, errnote='Video info extraction failed',
406 note='Downloading video info', headers=self._API_HEADERS), ('items', 0))
407 if info:
408 media.update(info)
409 return self._extract_product(media)
410
411 api_check = self._download_json(
412 f'{self._API_BASE_URL}/web/get_ruling_for_content/?content_type=MEDIA&target_id={_id_to_pk(video_id)}',
413 video_id, headers=self._API_HEADERS, fatal=False, note='Setting up session', errnote=False) or {}
414 csrf_token = self._get_cookies('https://www.instagram.com').get('csrftoken')
415
416 if not csrf_token:
417 self.report_warning('No csrf token set by Instagram API', video_id)
418 else:
419 csrf_token = csrf_token.value if api_check.get('status') == 'ok' else None
420 if not csrf_token:
421 self.report_warning('Instagram API is not granting access', video_id)
422
423 variables = {
424 'shortcode': video_id,
425 'child_comment_count': 3,
426 'fetch_comment_count': 40,
427 'parent_comment_count': 24,
428 'has_threaded_comments': True,
429 }
430 general_info = self._download_json(
431 'https://www.instagram.com/graphql/query/', video_id, fatal=False, errnote=False,
432 headers={
433 **self._API_HEADERS,
434 'X-CSRFToken': csrf_token or '',
435 'X-Requested-With': 'XMLHttpRequest',
436 'Referer': url,
437 }, query={
438 'query_hash': '9f8827793ef34641b2fb195d4d41151c',
439 'variables': json.dumps(variables, separators=(',', ':')),
440 })
441 media.update(traverse_obj(general_info, ('data', 'shortcode_media')) or {})
442
443 if not general_info:
444 self.report_warning('General metadata extraction failed (some metadata might be missing).', video_id)
445 webpage, urlh = self._download_webpage_handle(url, video_id)
446 shared_data = self._search_json(
447 r'window\._sharedData\s*=', webpage, 'shared data', video_id, fatal=False) or {}
448
449 if shared_data and self._LOGIN_URL not in urlh.url:
450 media.update(traverse_obj(
451 shared_data, ('entry_data', 'PostPage', 0, 'graphql', 'shortcode_media'),
452 ('entry_data', 'PostPage', 0, 'media'), expected_type=dict) or {})
453 else:
454 self.report_warning('Main webpage is locked behind the login page. Retrying with embed webpage (some metadata might be missing).')
455 webpage = self._download_webpage(
456 f'{url}/embed/', video_id, note='Downloading embed webpage', fatal=False)
457 additional_data = self._search_json(
458 r'window\.__additionalDataLoaded\s*\(\s*[^,]+,', webpage, 'additional data', video_id, fatal=False)
459 if not additional_data and not media:
460 self.raise_login_required('Requested content is not available, rate-limit reached or login required')
461
462 product_item = traverse_obj(additional_data, ('items', 0), expected_type=dict)
463 if product_item:
464 media.update(product_item)
465 return self._extract_product(media)
466
467 media.update(traverse_obj(
468 additional_data, ('graphql', 'shortcode_media'), 'shortcode_media', expected_type=dict) or {})
469
470 username = traverse_obj(media, ('owner', 'username')) or self._search_regex(
471 r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"', webpage, 'username', fatal=False)
472
473 description = (
474 traverse_obj(media, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str)
475 or media.get('caption'))
476 if not description:
477 description = self._search_regex(
478 r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
479 if description is not None:
480 description = lowercase_escape(description)
481
482 video_url = media.get('video_url')
483 if not video_url:
484 nodes = traverse_obj(media, ('edge_sidecar_to_children', 'edges', ..., 'node'), expected_type=dict) or []
485 if nodes:
486 return self.playlist_result(
487 self._extract_nodes(nodes, True), video_id,
488 format_field(username, None, 'Post by %s'), description)
489
490 video_url = self._og_search_video_url(webpage, secure=False)
491
492 formats = [{
493 'url': video_url,
494 'width': self._get_dimension('width', media, webpage),
495 'height': self._get_dimension('height', media, webpage),
496 }]
497 dash = traverse_obj(media, ('dash_info', 'video_dash_manifest'))
498 if dash:
499 formats.extend(self._parse_mpd_formats(self._parse_xml(dash, video_id), mpd_id='dash'))
500
501 comment_data = traverse_obj(media, ('edge_media_to_parent_comment', 'edges'))
502 comments = [{
503 'author': traverse_obj(comment_dict, ('node', 'owner', 'username')),
504 'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id')),
505 'id': traverse_obj(comment_dict, ('node', 'id')),
506 'text': traverse_obj(comment_dict, ('node', 'text')),
507 'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), expected_type=int_or_none),
508 } for comment_dict in comment_data] if comment_data else None
509
510 display_resources = (
511 media.get('display_resources')
512 or [{'src': media.get(key)} for key in ('display_src', 'display_url')]
513 or [{'src': self._og_search_thumbnail(webpage)}])
514 thumbnails = [{
515 'url': thumbnail['src'],
516 'width': thumbnail.get('config_width'),
517 'height': thumbnail.get('config_height'),
518 } for thumbnail in display_resources if thumbnail.get('src')]
519
520 return {
521 'id': video_id,
522 'formats': formats,
523 'title': media.get('title') or f'Video by {username}',
524 'description': description,
525 'duration': float_or_none(media.get('video_duration')),
526 'timestamp': traverse_obj(media, 'taken_at_timestamp', 'date', expected_type=int_or_none),
527 'uploader_id': traverse_obj(media, ('owner', 'id')),
528 'uploader': traverse_obj(media, ('owner', 'full_name')),
529 'channel': username,
530 'like_count': self._get_count(media, 'likes', 'preview_like') or str_to_int(self._search_regex(
531 r'data-log-event="likeCountClick"[^>]*>[^\d]*([\d,\.]+)', webpage, 'like count', fatal=False)),
532 'comment_count': self._get_count(media, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'),
533 'comments': comments,
534 'thumbnails': thumbnails,
535 'http_headers': {
536 'Referer': 'https://www.instagram.com/',
537 },
538 }
539
540
541 class InstagramPlaylistBaseIE(InstagramBaseIE):
542 _gis_tmpl = None # used to cache GIS request type
543
544 def _parse_graphql(self, webpage, item_id):
545 # Reads a webpage and returns its GraphQL data.
546 return self._parse_json(
547 self._search_regex(
548 r'sharedData\s*=\s*({.+?})\s*;\s*[<\n]', webpage, 'data'),
549 item_id)
550
551 def _extract_graphql(self, data, url):
552 # Parses GraphQL queries containing videos and generates a playlist.
553 uploader_id = self._match_id(url)
554 csrf_token = data['config']['csrf_token']
555 rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
556
557 cursor = ''
558 for page_num in itertools.count(1):
559 variables = {
560 'first': 12,
561 'after': cursor,
562 }
563 variables.update(self._query_vars_for(data))
564 variables = json.dumps(variables)
565
566 if self._gis_tmpl:
567 gis_tmpls = [self._gis_tmpl]
568 else:
569 gis_tmpls = [
570 f'{rhx_gis}',
571 '',
572 f'{rhx_gis}:{csrf_token}',
573 '{}:{}:{}'.format(rhx_gis, csrf_token, self.get_param('http_headers')['User-Agent']),
574 ]
575
576 # try all of the ways to generate a GIS query, and not only use the
577 # first one that works, but cache it for future requests
578 for gis_tmpl in gis_tmpls:
579 try:
580 json_data = self._download_json(
581 'https://www.instagram.com/graphql/query/', uploader_id,
582 f'Downloading JSON page {page_num}', headers={
583 'X-Requested-With': 'XMLHttpRequest',
584 'X-Instagram-GIS': hashlib.md5(
585 (f'{gis_tmpl}:{variables}').encode()).hexdigest(),
586 }, query={
587 'query_hash': self._QUERY_HASH,
588 'variables': variables,
589 })
590 media = self._parse_timeline_from(json_data)
591 self._gis_tmpl = gis_tmpl
592 break
593 except ExtractorError as e:
594 # if it's an error caused by a bad query, and there are
595 # more GIS templates to try, ignore it and keep trying
596 if isinstance(e.cause, HTTPError) and e.cause.status == 403:
597 if gis_tmpl != gis_tmpls[-1]:
598 continue
599 raise
600
601 nodes = traverse_obj(media, ('edges', ..., 'node'), expected_type=dict) or []
602 if not nodes:
603 break
604 yield from self._extract_nodes(nodes)
605
606 has_next_page = traverse_obj(media, ('page_info', 'has_next_page'))
607 cursor = traverse_obj(media, ('page_info', 'end_cursor'), expected_type=str)
608 if not has_next_page or not cursor:
609 break
610
611 def _real_extract(self, url):
612 user_or_tag = self._match_id(url)
613 webpage = self._download_webpage(url, user_or_tag)
614 data = self._parse_graphql(webpage, user_or_tag)
615
616 self._set_cookie('instagram.com', 'ig_pr', '1')
617
618 return self.playlist_result(
619 self._extract_graphql(data, url), user_or_tag, user_or_tag)
620
621
622 class InstagramUserIE(InstagramPlaylistBaseIE):
623 _WORKING = False
624 _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
625 IE_DESC = 'Instagram user profile'
626 IE_NAME = 'instagram:user'
627 _TESTS = [{
628 'url': 'https://instagram.com/porsche',
629 'info_dict': {
630 'id': 'porsche',
631 'title': 'porsche',
632 },
633 'playlist_count': 5,
634 'params': {
635 'extract_flat': True,
636 'skip_download': True,
637 'playlistend': 5,
638 },
639 }]
640
641 _QUERY_HASH = ('42323d64886122307be10013ad2dcc44',)
642
643 @staticmethod
644 def _parse_timeline_from(data):
645 # extracts the media timeline data from a GraphQL result
646 return data['data']['user']['edge_owner_to_timeline_media']
647
648 @staticmethod
649 def _query_vars_for(data):
650 # returns a dictionary of variables to add to the timeline query based
651 # on the GraphQL of the original page
652 return {
653 'id': data['entry_data']['ProfilePage'][0]['graphql']['user']['id'],
654 }
655
656
657 class InstagramTagIE(InstagramPlaylistBaseIE):
658 _VALID_URL = r'https?://(?:www\.)?instagram\.com/explore/tags/(?P<id>[^/]+)'
659 IE_DESC = 'Instagram hashtag search URLs'
660 IE_NAME = 'instagram:tag'
661 _TESTS = [{
662 'url': 'https://instagram.com/explore/tags/lolcats',
663 'info_dict': {
664 'id': 'lolcats',
665 'title': 'lolcats',
666 },
667 'playlist_count': 50,
668 'params': {
669 'extract_flat': True,
670 'skip_download': True,
671 'playlistend': 50,
672 },
673 }]
674
675 _QUERY_HASH = ('f92f56d47dc7a55b606908374b43a314',)
676
677 @staticmethod
678 def _parse_timeline_from(data):
679 # extracts the media timeline data from a GraphQL result
680 return data['data']['hashtag']['edge_hashtag_to_media']
681
682 @staticmethod
683 def _query_vars_for(data):
684 # returns a dictionary of variables to add to the timeline query based
685 # on the GraphQL of the original page
686 return {
687 'tag_name':
688 data['entry_data']['TagPage'][0]['graphql']['hashtag']['name'],
689 }
690
691
692 class InstagramStoryIE(InstagramBaseIE):
693 _VALID_URL = r'https?://(?:www\.)?instagram\.com/stories/(?P<user>[^/]+)/(?P<id>\d+)'
694 IE_NAME = 'instagram:story'
695
696 _TESTS = [{
697 'url': 'https://www.instagram.com/stories/highlights/18090946048123978/',
698 'info_dict': {
699 'id': '18090946048123978',
700 'title': 'Rare',
701 },
702 'playlist_mincount': 50,
703 }]
704
705 def _real_extract(self, url):
706 username, story_id = self._match_valid_url(url).groups()
707 story_info = self._download_webpage(url, story_id)
708 user_info = self._search_json(r'"user":', story_info, 'user info', story_id, fatal=False)
709 if not user_info:
710 self.raise_login_required('This content is unreachable')
711
712 user_id = traverse_obj(user_info, 'pk', 'id', expected_type=str)
713 story_info_url = user_id if username != 'highlights' else f'highlight:{story_id}'
714 if not story_info_url: # user id is only mandatory for non-highlights
715 raise ExtractorError('Unable to extract user id')
716
717 videos = traverse_obj(self._download_json(
718 f'{self._API_BASE_URL}/feed/reels_media/?reel_ids={story_info_url}',
719 story_id, errnote=False, fatal=False, headers=self._API_HEADERS), 'reels')
720 if not videos:
721 self.raise_login_required('You need to log in to access this content')
722
723 full_name = traverse_obj(videos, (f'highlight:{story_id}', 'user', 'full_name'), (user_id, 'user', 'full_name'))
724 story_title = traverse_obj(videos, (f'highlight:{story_id}', 'title'))
725 if not story_title:
726 story_title = f'Story by {username}'
727
728 highlights = traverse_obj(videos, (f'highlight:{story_id}', 'items'), (user_id, 'items'))
729 info_data = []
730 for highlight in highlights:
731 highlight_data = self._extract_product(highlight)
732 if highlight_data.get('formats'):
733 info_data.append({
734 'uploader': full_name,
735 'uploader_id': user_id,
736 **filter_dict(highlight_data),
737 })
738 return self.playlist_result(info_data, playlist_id=story_id, playlist_title=story_title)