]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/instagram.py
[ie/crunchyroll] Fix stream extraction (#10005)
[yt-dlp.git] / yt_dlp / extractor / instagram.py
index 94db756403add923bcad616caf5bfb06795491be..f7f21505ea9e44c1e6d49254a220ecf21162bae2 100644 (file)
@@ -3,13 +3,14 @@
 import json
 import re
 import time
-import urllib.error
 
 from .common import InfoExtractor
+from ..networking.exceptions import HTTPError
 from ..utils import (
     ExtractorError,
     decode_base_n,
     encode_base_n,
+    filter_dict,
     float_or_none,
     format_field,
     get_element_by_attribute,
@@ -39,37 +40,42 @@ class InstagramBaseIE(InfoExtractor):
     _NETRC_MACHINE = 'instagram'
     _IS_LOGGED_IN = False
 
+    _API_BASE_URL = 'https://i.instagram.com/api/v1'
+    _LOGIN_URL = 'https://www.instagram.com/accounts/login'
+    _API_HEADERS = {
+        'X-IG-App-ID': '936619743392459',
+        'X-ASBD-ID': '198387',
+        'X-IG-WWW-Claim': '0',
+        'Origin': 'https://www.instagram.com',
+        'Accept': '*/*',
+        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36',
+    }
+
     def _perform_login(self, username, password):
         if self._IS_LOGGED_IN:
             return
 
         login_webpage = self._download_webpage(
-            'https://www.instagram.com/accounts/login/', None,
-            note='Downloading login webpage', errnote='Failed to download login webpage')
+            self._LOGIN_URL, None, note='Downloading login webpage', errnote='Failed to download login webpage')
 
-        shared_data = self._parse_json(
-            self._search_regex(
-                r'window\._sharedData\s*=\s*({.+?});',
-                login_webpage, 'shared data', default='{}'),
-            None)
-
-        login = self._download_json('https://www.instagram.com/accounts/login/ajax/', None, note='Logging in', headers={
-            'Accept': '*/*',
-            'X-IG-App-ID': '936619743392459',
-            'X-ASBD-ID': '198387',
-            'X-IG-WWW-Claim': '0',
-            'X-Requested-With': 'XMLHttpRequest',
-            'X-CSRFToken': shared_data['config']['csrf_token'],
-            'X-Instagram-AJAX': shared_data['rollout_hash'],
-            'Referer': 'https://www.instagram.com/',
-        }, data=urlencode_postdata({
-            'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{int(time.time())}:{password}',
-            'username': username,
-            'queryParams': '{}',
-            'optIntoOneTap': 'false',
-            'stopDeletionNonce': '',
-            'trustedDeviceRecords': '{}',
-        }))
+        shared_data = self._parse_json(self._search_regex(
+            r'window\._sharedData\s*=\s*({.+?});', login_webpage, 'shared data', default='{}'), None)
+
+        login = self._download_json(
+            f'{self._LOGIN_URL}/ajax/', None, note='Logging in', headers={
+                **self._API_HEADERS,
+                'X-Requested-With': 'XMLHttpRequest',
+                'X-CSRFToken': shared_data['config']['csrf_token'],
+                'X-Instagram-AJAX': shared_data['rollout_hash'],
+                'Referer': 'https://www.instagram.com/',
+            }, data=urlencode_postdata({
+                'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{int(time.time())}:{password}',
+                'username': username,
+                'queryParams': '{}',
+                'optIntoOneTap': 'false',
+                'stopDeletionNonce': '',
+                'trustedDeviceRecords': '{}',
+            }))
 
         if not login.get('authenticated'):
             if login.get('message'):
@@ -134,7 +140,7 @@ def _extract_nodes(self, nodes, is_direct=False):
             }
 
     def _extract_product_media(self, product_media):
-        media_id = product_media.get('code') or product_media.get('id')
+        media_id = product_media.get('code') or _pk_to_id(product_media.get('pk'))
         vcodec = product_media.get('video_codec')
         dash_manifest_raw = product_media.get('video_dash_manifest')
         videos_list = product_media.get('video_versions')
@@ -150,7 +156,6 @@ def _extract_product_media(self, product_media):
         } for format in videos_list or []]
         if dash_manifest_raw:
             formats.extend(self._parse_mpd_formats(self._parse_xml(dash_manifest_raw, media_id), mpd_id='dash'))
-        self._sort_formats(formats)
 
         thumbnails = [{
             'url': thumbnail.get('url'),
@@ -168,18 +173,9 @@ def _extract_product(self, product_info):
         if isinstance(product_info, list):
             product_info = product_info[0]
 
-        comment_data = traverse_obj(product_info, ('edge_media_to_parent_comment', 'edges'))
-        comments = [{
-            'author': traverse_obj(comment_dict, ('node', 'owner', 'username')),
-            'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id')),
-            'id': traverse_obj(comment_dict, ('node', 'id')),
-            'text': traverse_obj(comment_dict, ('node', 'text')),
-            'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), expected_type=int_or_none),
-        } for comment_dict in comment_data] if comment_data else None
-
         user_info = product_info.get('user') or {}
         info_dict = {
-            'id': product_info.get('code') or product_info.get('id'),
+            'id': _pk_to_id(traverse_obj(product_info, 'pk', 'id', expected_type=str_or_none)[:19]),
             'title': product_info.get('title') or f'Video by {user_info.get("username")}',
             'description': traverse_obj(product_info, ('caption', 'text'), expected_type=str_or_none),
             'timestamp': int_or_none(product_info.get('taken_at')),
@@ -189,7 +185,7 @@ def _extract_product(self, product_info):
             'view_count': int_or_none(product_info.get('view_count')),
             'like_count': int_or_none(product_info.get('like_count')),
             'comment_count': int_or_none(product_info.get('comment_count')),
-            'comments': comments,
+            '__post_extractor': self.extract_comments(_pk_to_id(product_info.get('pk'))),
             'http_headers': {
                 'Referer': 'https://www.instagram.com/',
             }
@@ -211,6 +207,23 @@ def _extract_product(self, product_info):
             **self._extract_product_media(product_info)
         }
 
+    def _get_comments(self, video_id):
+        comments_info = self._download_json(
+            f'{self._API_BASE_URL}/media/{_id_to_pk(video_id)}/comments/?can_support_threading=true&permalink_enabled=false', video_id,
+            fatal=False, errnote='Comments extraction failed', note='Downloading comments info', headers=self._API_HEADERS) or {}
+
+        comment_data = traverse_obj(comments_info, ('edge_media_to_parent_comment', 'edges'), 'comments')
+        for comment_dict in comment_data or []:
+            yield {
+                'author': traverse_obj(comment_dict, ('node', 'owner', 'username'), ('user', 'username')),
+                'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id'), ('user', 'pk')),
+                'author_thumbnail': traverse_obj(comment_dict, ('node', 'owner', 'profile_pic_url'), ('user', 'profile_pic_url'), expected_type=url_or_none),
+                'id': traverse_obj(comment_dict, ('node', 'id'), 'pk'),
+                'text': traverse_obj(comment_dict, ('node', 'text'), 'text'),
+                'like_count': traverse_obj(comment_dict, ('node', 'edge_liked_by', 'count'), 'comment_like_count', expected_type=int_or_none),
+                'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), 'created_at', expected_type=int_or_none),
+            }
+
 
 class InstagramIOSIE(InfoExtractor):
     IE_DESC = 'IOS instagram:// URL'
@@ -253,7 +266,7 @@ class InstagramIE(InstagramBaseIE):
             'title': 'Video by naomipq',
             'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
             'thumbnail': r're:^https?://.*\.jpg',
-            'duration': 0,
+            'duration': 8.747,
             'timestamp': 1371748545,
             'upload_date': '20130620',
             'uploader_id': '2815873',
@@ -263,27 +276,34 @@ class InstagramIE(InstagramBaseIE):
             'comment_count': int,
             'comments': list,
         },
+        'expected_warnings': [
+            'General metadata extraction failed',
+            'Main webpage is locked behind the login page',
+        ],
     }, {
-        # missing description
-        'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
+        # reel
+        'url': 'https://www.instagram.com/reel/Chunk8-jurw/',
+        'md5': 'f6d8277f74515fa3ff9f5791426e42b1',
         'info_dict': {
-            'id': 'BA-pQFBG8HZ',
+            'id': 'Chunk8-jurw',
             'ext': 'mp4',
-            'title': 'Video by britneyspears',
+            'title': 'Video by instagram',
+            'description': 'md5:c9cde483606ed6f80fbe9283a6a2b290',
             'thumbnail': r're:^https?://.*\.jpg',
-            'duration': 0,
-            'timestamp': 1453760977,
-            'upload_date': '20160125',
-            'uploader_id': '12246775',
-            'uploader': 'Britney Spears',
-            'channel': 'britneyspears',
+            'duration': 5.016,
+            'timestamp': 1661529231,
+            'upload_date': '20220826',
+            'uploader_id': '25025320',
+            'uploader': 'Instagram',
+            'channel': 'instagram',
             'like_count': int,
             'comment_count': int,
             'comments': list,
         },
-        'params': {
-            'skip_download': True,
-        },
+        'expected_warnings': [
+            'General metadata extraction failed',
+            'Main webpage is locked behind the login page',
+        ],
     }, {
         # multi video post
         'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
@@ -292,18 +312,24 @@ class InstagramIE(InstagramBaseIE):
                 'id': 'BQ0dSaohpPW',
                 'ext': 'mp4',
                 'title': 'Video 1',
+                'thumbnail': r're:^https?://.*\.jpg',
+                'view_count': int,
             },
         }, {
             'info_dict': {
                 'id': 'BQ0dTpOhuHT',
                 'ext': 'mp4',
                 'title': 'Video 2',
+                'thumbnail': r're:^https?://.*\.jpg',
+                'view_count': int,
             },
         }, {
             'info_dict': {
                 'id': 'BQ0dT7RBFeF',
                 'ext': 'mp4',
                 'title': 'Video 3',
+                'thumbnail': r're:^https?://.*\.jpg',
+                'view_count': int,
             },
         }],
         'info_dict': {
@@ -311,6 +337,10 @@ class InstagramIE(InstagramBaseIE):
             'title': 'Post by instagram',
             'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
         },
+        'expected_warnings': [
+            'General metadata extraction failed',
+            'Main webpage is locked behind the login page',
+        ],
     }, {
         # IGTV
         'url': 'https://www.instagram.com/tv/BkfuX9UB-eK/',
@@ -329,7 +359,11 @@ class InstagramIE(InstagramBaseIE):
             'comment_count': int,
             'comments': list,
             'description': 'Meet Cass Hirst (@cass.fb), a fingerboarding pro who can perform tiny ollies and kickflips while blindfolded.',
-        }
+        },
+        'expected_warnings': [
+            'General metadata extraction failed',
+            'Main webpage is locked behind the login page',
+        ],
     }, {
         'url': 'https://instagram.com/p/-Cmh1cukG2/',
         'only_matching': True,
@@ -360,49 +394,75 @@ def _extract_embed_urls(cls, url, webpage):
 
     def _real_extract(self, url):
         video_id, url = self._match_valid_url(url).group('id', 'url')
+        media, webpage = {}, ''
+
+        if self._get_cookies(url).get('sessionid'):
+            info = traverse_obj(self._download_json(
+                f'{self._API_BASE_URL}/media/{_id_to_pk(video_id)}/info/', video_id,
+                fatal=False, errnote='Video info extraction failed',
+                note='Downloading video info', headers=self._API_HEADERS), ('items', 0))
+            if info:
+                media.update(info)
+                return self._extract_product(media)
+
+        api_check = self._download_json(
+            f'{self._API_BASE_URL}/web/get_ruling_for_content/?content_type=MEDIA&target_id={_id_to_pk(video_id)}',
+            video_id, headers=self._API_HEADERS, fatal=False, note='Setting up session', errnote=False) or {}
+        csrf_token = self._get_cookies('https://www.instagram.com').get('csrftoken')
+
+        if not csrf_token:
+            self.report_warning('No csrf token set by Instagram API', video_id)
+        else:
+            csrf_token = csrf_token.value if api_check.get('status') == 'ok' else None
+            if not csrf_token:
+                self.report_warning('Instagram API is not granting access', video_id)
+
+        variables = {
+            'shortcode': video_id,
+            'child_comment_count': 3,
+            'fetch_comment_count': 40,
+            'parent_comment_count': 24,
+            'has_threaded_comments': True,
+        }
         general_info = self._download_json(
-            f'https://www.instagram.com/graphql/query/?query_hash=9f8827793ef34641b2fb195d4d41151c'
-            f'&variables=%7B"shortcode":"{video_id}",'
-            '"parent_comment_count":10,"has_threaded_comments":true}', video_id, fatal=False, errnote=False,
+            'https://www.instagram.com/graphql/query/', video_id, fatal=False, errnote=False,
             headers={
-                'Accept': '*',
-                'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
-                'Authority': 'www.instagram.com',
-                'Referer': 'https://www.instagram.com',
-                'x-ig-app-id': '936619743392459',
+                **self._API_HEADERS,
+                'X-CSRFToken': csrf_token or '',
+                'X-Requested-With': 'XMLHttpRequest',
+                'Referer': url,
+            }, query={
+                'query_hash': '9f8827793ef34641b2fb195d4d41151c',
+                'variables': json.dumps(variables, separators=(',', ':')),
             })
-        media = traverse_obj(general_info, ('data', 'shortcode_media')) or {}
-        if not media:
-            self.report_warning('General metadata extraction failed', video_id)
-
-        info = self._download_json(
-            f'https://i.instagram.com/api/v1/media/{_id_to_pk(video_id)}/info/', video_id,
-            fatal=False, note='Downloading video info', errnote=False, headers={
-                'Accept': '*',
-                'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
-                'Authority': 'www.instagram.com',
-                'Referer': 'https://www.instagram.com',
-                'x-ig-app-id': '936619743392459',
-            })
-        if info:
-            media.update(info['items'][0])
-            return self._extract_product(media)
-
-        webpage = self._download_webpage(
-            f'https://www.instagram.com/p/{video_id}/embed/', video_id,
-            note='Downloading embed webpage', fatal=False)
-        if not webpage:
-            self.raise_login_required('Requested content was not found, the content might be private')
-
-        additional_data = self._search_json(
-            r'window\.__additionalDataLoaded\s*\(\s*[^,]+,\s*', webpage, 'additional data', video_id, fatal=False)
-        product_item = traverse_obj(additional_data, ('items', 0), expected_type=dict)
-        if product_item:
-            media.update(product_item)
-            return self._extract_product(media)
-
-        media.update(traverse_obj(
-            additional_data, ('graphql', 'shortcode_media'), 'shortcode_media', expected_type=dict) or {})
+        media.update(traverse_obj(general_info, ('data', 'shortcode_media')) or {})
+
+        if not general_info:
+            self.report_warning('General metadata extraction failed (some metadata might be missing).', video_id)
+            webpage, urlh = self._download_webpage_handle(url, video_id)
+            shared_data = self._search_json(
+                r'window\._sharedData\s*=', webpage, 'shared data', video_id, fatal=False) or {}
+
+            if shared_data and self._LOGIN_URL not in urlh.url:
+                media.update(traverse_obj(
+                    shared_data, ('entry_data', 'PostPage', 0, 'graphql', 'shortcode_media'),
+                    ('entry_data', 'PostPage', 0, 'media'), expected_type=dict) or {})
+            else:
+                self.report_warning('Main webpage is locked behind the login page. Retrying with embed webpage (some metadata might be missing).')
+                webpage = self._download_webpage(
+                    f'{url}/embed/', video_id, note='Downloading embed webpage', fatal=False)
+                additional_data = self._search_json(
+                    r'window\.__additionalDataLoaded\s*\(\s*[^,]+,', webpage, 'additional data', video_id, fatal=False)
+                if not additional_data and not media:
+                    self.raise_login_required('Requested content is not available, rate-limit reached or login required')
+
+                product_item = traverse_obj(additional_data, ('items', 0), expected_type=dict)
+                if product_item:
+                    media.update(product_item)
+                    return self._extract_product(media)
+
+                media.update(traverse_obj(
+                    additional_data, ('graphql', 'shortcode_media'), 'shortcode_media', expected_type=dict) or {})
 
         username = traverse_obj(media, ('owner', 'username')) or self._search_regex(
             r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"', webpage, 'username', fatal=False)
@@ -434,7 +494,6 @@ def _real_extract(self, url):
         dash = traverse_obj(media, ('dash_info', 'video_dash_manifest'))
         if dash:
             formats.extend(self._parse_mpd_formats(self._parse_xml(dash, video_id), mpd_id='dash'))
-        self._sort_formats(formats)
 
         comment_data = traverse_obj(media, ('edge_media_to_parent_comment', 'edges'))
         comments = [{
@@ -531,7 +590,7 @@ def _extract_graphql(self, data, url):
                 except ExtractorError as e:
                     # if it's an error caused by a bad query, and there are
                     # more GIS templates to try, ignore it and keep trying
-                    if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 403:
+                    if isinstance(e.cause, HTTPError) and e.cause.status == 403:
                         if gis_tmpl != gis_tmpls[-1]:
                             continue
                     raise
@@ -558,6 +617,7 @@ def _real_extract(self, url):
 
 
 class InstagramUserIE(InstagramPlaylistBaseIE):
+    _WORKING = False
     _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
     IE_DESC = 'Instagram user profile'
     IE_NAME = 'instagram:user'
@@ -645,32 +705,31 @@ def _real_extract(self, url):
         user_info = self._search_json(r'"user":', story_info, 'user info', story_id, fatal=False)
         if not user_info:
             self.raise_login_required('This content is unreachable')
-        user_id = user_info.get('id')
 
+        user_id = traverse_obj(user_info, 'pk', 'id', expected_type=str)
         story_info_url = user_id if username != 'highlights' else f'highlight:{story_id}'
+        if not story_info_url:  # user id is only mandatory for non-highlights
+            raise ExtractorError('Unable to extract user id')
+
         videos = traverse_obj(self._download_json(
-            f'https://i.instagram.com/api/v1/feed/reels_media/?reel_ids={story_info_url}',
-            story_id, errnote=False, fatal=False, headers={
-                'X-IG-App-ID': 936619743392459,
-                'X-ASBD-ID': 198387,
-                'X-IG-WWW-Claim': 0,
-            }), 'reels')
+            f'{self._API_BASE_URL}/feed/reels_media/?reel_ids={story_info_url}',
+            story_id, errnote=False, fatal=False, headers=self._API_HEADERS), 'reels')
         if not videos:
             self.raise_login_required('You need to log in to access this content')
 
-        full_name = traverse_obj(videos, (f'highlight:{story_id}', 'user', 'full_name'), (str(user_id), 'user', 'full_name'))
+        full_name = traverse_obj(videos, (f'highlight:{story_id}', 'user', 'full_name'), (user_id, 'user', 'full_name'))
         story_title = traverse_obj(videos, (f'highlight:{story_id}', 'title'))
         if not story_title:
             story_title = f'Story by {username}'
 
-        highlights = traverse_obj(videos, (f'highlight:{story_id}', 'items'), (str(user_id), 'items'))
+        highlights = traverse_obj(videos, (f'highlight:{story_id}', 'items'), (user_id, 'items'))
         info_data = []
         for highlight in highlights:
             highlight_data = self._extract_product(highlight)
             if highlight_data.get('formats'):
                 info_data.append({
-                    **highlight_data,
                     'uploader': full_name,
                     'uploader_id': user_id,
+                    **filter_dict(highlight_data),
                 })
         return self.playlist_result(info_data, playlist_id=story_id, playlist_title=story_title)