X-Git-Url: https://jfr.im/git/yt-dlp.git/blobdiff_plain/49c258e18deadee9db559aa8df1e947d72ba1557..182b6ae8a6b12ad49f2fa880f8db436f9a79a8ba:/yt_dlp/extractor/youtube.py diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index e27253e37..1b4f3960b 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -2,7 +2,9 @@ from __future__ import unicode_literals +import base64 import calendar +import copy import hashlib import itertools import json @@ -26,6 +28,7 @@ from ..jsinterp import JSInterpreter from ..utils import ( bool_or_none, + bytes_to_intlist, clean_html, dict_get, datetime_from_str, @@ -34,6 +37,7 @@ format_field, float_or_none, int_or_none, + intlist_to_bytes, mimetype2ext, parse_codecs, parse_duration, @@ -97,8 +101,9 @@ def warn(message): if username: warn('Logging in using username and password is broken. %s' % self._LOGIN_HINTS['cookies']) return - # Everything below this is broken! + # Everything below this is broken! + r''' # No authentication to be performed if username is None: if self._LOGIN_REQUIRED and self.get_param('cookiefile') is None: @@ -271,6 +276,7 @@ def req(url, f_req, note, errnote): return False return True + ''' def _initialize_consent(self): cookies = self._get_cookies('https://www.youtube.com/') @@ -294,13 +300,148 @@ def _real_initialize(self): if not self._login(): return - _YT_WEB_CLIENT_VERSION = '2.20210407.08.00' - _YT_INNERTUBE_API_KEY = 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8' _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;' _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;' _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|= retries: + if fatal: + raise ExtractorError(last_error) + else: + self.report_warning(last_error) + return + return response + @staticmethod def is_music_url(url): return re.match(r'https?://music\.youtube\.com/', url) is not None @@ -667,6 +949,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): } _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt') + _AGE_GATE_REASONS = ( + 'Sign in to confirm your age', + 'This video may be inappropriate for some users.', + 'Sorry, this content is age-restricted.') + _GEO_BYPASS = False IE_NAME = 'youtube' @@ -1346,7 +1633,32 @@ class YoutubeIE(YoutubeBaseInfoExtractor): # multiple subtitles with same lang_code 'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug', 'only_matching': True, + }, { + # Force use android client fallback + 'url': 'https://www.youtube.com/watch?v=YOelRv7fMxY', + 'info_dict': { + 'id': 'YOelRv7fMxY', + 'title': 'Digging a Secret Tunnel from my Workshop', + 'ext': '3gp', + 'upload_date': '20210624', + 'channel_id': 'UCp68_FLety0O-n9QU6phsgw', + 'uploader': 'colinfurze', + 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCp68_FLety0O-n9QU6phsgw', + 'description': 'md5:ecb672623246d98c6c562eed6ae798c3' + }, + 'params': { + 'format': '17', # 3gp format available on android + 'extractor_args': {'youtube': {'player_client': ['android']}}, + }, }, + { + # Skip download of additional client configs (remix client config in this case) + 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs', + 'only_matching': True, + 'params': { + 'extractor_args': {'youtube': {'player_skip': ['configs']}}, + }, + } ] @classmethod @@ -1364,6 +1676,19 @@ def __init__(self, *args, **kwargs): self._code_cache = {} self._player_cache = {} + def _extract_player_url(self, ytcfg=None, webpage=None): + player_url = try_get(ytcfg, (lambda x: x['PLAYER_JS_URL']), str) + if not player_url: + player_url = self._search_regex( + r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"', + webpage, 'player URL', fatal=False) + if player_url.startswith('//'): + player_url = 'https:' + player_url + elif not re.match(r'https?://', player_url): + player_url = compat_urlparse.urljoin( + 'https://www.youtube.com', player_url) + return player_url + def _signature_cache_id(self, example_sig): """ Return a string representation of a signature """ return '.'.join(compat_str(len(part)) for part in example_sig.split('.')) @@ -1378,6 +1703,15 @@ def _extract_player_info(cls, player_url): raise ExtractorError('Cannot identify player %r' % player_url) return id_m.group('id') + def _load_player(self, video_id, player_url, fatal=True) -> bool: + player_id = self._extract_player_info(player_url) + if player_id not in self._code_cache: + self._code_cache[player_id] = self._download_webpage( + player_url, video_id, fatal=fatal, + note='Downloading player ' + player_id, + errnote='Download of %s failed' % player_url) + return player_id in self._code_cache + def _extract_signature_function(self, video_id, player_url, example_sig): player_id = self._extract_player_info(player_url) @@ -1390,20 +1724,16 @@ def _extract_signature_function(self, video_id, player_url, example_sig): if cache_spec is not None: return lambda s: ''.join(s[i] for i in cache_spec) - if player_id not in self._code_cache: - self._code_cache[player_id] = self._download_webpage( - player_url, video_id, - note='Downloading player ' + player_id, - errnote='Download of %s failed' % player_url) - code = self._code_cache[player_id] - res = self._parse_sig_js(code) + if self._load_player(video_id, player_url): + code = self._code_cache[player_id] + res = self._parse_sig_js(code) - test_string = ''.join(map(compat_chr, range(len(example_sig)))) - cache_res = res(test_string) - cache_spec = [ord(c) for c in cache_res] + test_string = ''.join(map(compat_chr, range(len(example_sig)))) + cache_res = res(test_string) + cache_spec = [ord(c) for c in cache_res] - self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec) - return res + self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec) + return res def _print_sig_code(self, func, example_sig): def gen_sig_code(idxs): @@ -1474,11 +1804,6 @@ def _decrypt_signature(self, s, video_id, player_url): if player_url is None: raise ExtractorError('Cannot decrypt signature without player_url') - if player_url.startswith('//'): - player_url = 'https:' + player_url - elif not re.match(r'https?://', player_url): - player_url = compat_urlparse.urljoin( - 'https://www.youtube.com', player_url) try: player_id = (player_url, self._signature_cache_id(s)) if player_id not in self._player_cache: @@ -1495,6 +1820,31 @@ def _decrypt_signature(self, s, video_id, player_url): raise ExtractorError( 'Signature extraction failed: ' + tb, cause=e) + def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False): + """ + Extract signatureTimestamp (sts) + Required to tell API what sig/player version is in use. + """ + sts = None + if isinstance(ytcfg, dict): + sts = int_or_none(ytcfg.get('STS')) + + if not sts: + # Attempt to extract from player + if player_url is None: + error_msg = 'Cannot extract signature timestamp without player_url.' + if fatal: + raise ExtractorError(error_msg) + self.report_warning(error_msg) + return + if self._load_player(video_id, player_url, fatal=fatal): + player_id = self._extract_player_info(player_url) + code = self._code_cache[player_id] + sts = int_or_none(self._search_regex( + r'(?:signatureTimestamp|sts)\s*:\s*(?P[0-9]{5})', code, + 'JS player signature timestamp', group='sts', fatal=fatal)) + return sts + def _mark_watched(self, video_id, player_response): playback_url = url_or_none(try_get( player_response, @@ -1652,14 +2002,15 @@ def _extract_comment(self, comment_renderer, parent=None): lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str) author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool) - is_liked = try_get(comment_renderer, lambda x: x['isLiked'], bool) + is_favorited = 'creatorHeart' in (try_get( + comment_renderer, lambda x: x['actionButtons']['commentActionButtonsRenderer'], dict) or {}) return { 'id': comment_id, 'text': text, 'timestamp': timestamp, 'time_text': time_text, 'like_count': votes, - 'is_favorited': is_liked, + 'is_favorited': is_favorited, 'author': author, 'author_id': author_id, 'author_thumbnail': author_thumbnail, @@ -1668,10 +2019,43 @@ def _extract_comment(self, comment_renderer, parent=None): } def _comment_entries(self, root_continuation_data, identity_token, account_syncid, - ytcfg, session_token_list, parent=None, comment_counts=None): + ytcfg, video_id, parent=None, comment_counts=None): - def extract_thread(parent_renderer): - contents = try_get(parent_renderer, lambda x: x['contents'], list) or [] + def extract_header(contents): + _total_comments = 0 + _continuation = None + for content in contents: + comments_header_renderer = try_get(content, lambda x: x['commentsHeaderRenderer']) + expected_comment_count = try_get(comments_header_renderer, + (lambda x: x['countText']['runs'][0]['text'], + lambda x: x['commentsCount']['runs'][0]['text']), + compat_str) + if expected_comment_count: + comment_counts[1] = str_to_int(expected_comment_count) + self.to_screen('Downloading ~%d comments' % str_to_int(expected_comment_count)) + _total_comments = comment_counts[1] + sort_mode_str = self._configuration_arg('comment_sort', [''])[0] + comment_sort_index = int(sort_mode_str != 'top') # 1 = new, 0 = top + + sort_menu_item = try_get( + comments_header_renderer, + lambda x: x['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'][comment_sort_index], dict) or {} + sort_continuation_ep = sort_menu_item.get('serviceEndpoint') or {} + + _continuation = self._extract_continuation_ep_data(sort_continuation_ep) or self._extract_continuation(sort_menu_item) + if not _continuation: + continue + + sort_text = sort_menu_item.get('title') + if isinstance(sort_text, compat_str): + sort_text = sort_text.lower() + else: + sort_text = 'top comments' if comment_sort_index == 0 else 'newest first' + self.to_screen('Sorting comments by %s' % sort_text) + break + return _total_comments, _continuation + + def extract_thread(contents): if not parent: comment_counts[2] = 0 for content in contents: @@ -1695,107 +2079,48 @@ def extract_thread(parent_renderer): comment_counts[2] += 1 comment_entries_iter = self._comment_entries( comment_replies_renderer, identity_token, account_syncid, ytcfg, - parent=comment.get('id'), session_token_list=session_token_list, - comment_counts=comment_counts) + video_id, parent=comment.get('id'), comment_counts=comment_counts) for reply_comment in comment_entries_iter: yield reply_comment + # YouTube comments have a max depth of 2 + max_depth = int_or_none(self._configuration_arg('max_comment_depth', [''])[0]) or float('inf') + if max_depth == 1 and parent: + return if not comment_counts: # comment so far, est. total comments, current comment thread # comment_counts = [0, 0, 0] - # TODO: Generalize the download code with TabIE - context = self._extract_context(ytcfg) - visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str) - continuation = YoutubeTabIE._extract_continuation(root_continuation_data) # TODO - first_continuation = False - if parent is None: - first_continuation = True + continuation = self._extract_continuation(root_continuation_data) + if continuation and len(continuation['ctoken']) < 27: + self.write_debug('Detected old API continuation token. Generating new API compatible token.') + continuation_token = self._generate_comment_continuation(video_id) + continuation = self._build_continuation_query(continuation_token, None) + + visitor_data = None + is_first_continuation = parent is None for page_num in itertools.count(0): if not continuation: break headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data) - retries = self.get_param('extractor_retries', 3) - count = -1 - last_error = None - - while count < retries: - count += 1 - if last_error: - self.report_warning('%s. Retrying ...' % last_error) - try: - query = { - 'ctoken': continuation['ctoken'], - 'pbj': 1, - 'type': 'next', - } - if parent: - query['action_get_comment_replies'] = 1 - else: - query['action_get_comments'] = 1 - - comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1]) - if page_num == 0: - if first_continuation: - note_prefix = 'Downloading initial comment continuation page' - else: - note_prefix = ' Downloading comment reply thread %d %s' % (comment_counts[2], comment_prog_str) - else: - note_prefix = '%sDownloading comment%s page %d %s' % ( - ' ' if parent else '', - ' replies' if parent else '', - page_num, - comment_prog_str) - - browse = self._download_json( - 'https://www.youtube.com/comment_service_ajax', None, - '%s %s' % (note_prefix, '(retry #%d)' % count if count else ''), - headers=headers, query=query, - data=urlencode_postdata({ - 'session_token': session_token_list[0] - })) - except ExtractorError as e: - if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404, 413): - if e.cause.code == 413: - self.report_warning('Assumed end of comments (received HTTP Error 413)') - return - # Downloading page may result in intermittent 5xx HTTP error - # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289 - last_error = 'HTTP Error %s' % e.cause.code - if e.cause.code == 404: - last_error = last_error + ' (this API is probably deprecated)' - if count < retries: - continue - raise + comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1]) + if page_num == 0: + if is_first_continuation: + note_prefix = 'Downloading comment section API JSON' else: - session_token = try_get(browse, lambda x: x['xsrf_token'], compat_str) - if session_token: - session_token_list[0] = session_token - - response = try_get(browse, - (lambda x: x['response'], - lambda x: x[1]['response'])) or {} - - if response.get('continuationContents'): - break - - # YouTube sometimes gives reload: now json if something went wrong (e.g. bad auth) - if browse.get('reload'): - raise ExtractorError('Invalid or missing params in continuation request', expected=False) - - # TODO: not tested, merged from old extractor - err_msg = browse.get('externalErrorMessage') - if err_msg: - raise ExtractorError('YouTube said: %s' % err_msg, expected=False) - - # Youtube sometimes sends incomplete data - # See: https://github.com/ytdl-org/youtube-dl/issues/28194 - last_error = 'Incomplete data received' - if count >= retries: - raise ExtractorError(last_error) + note_prefix = ' Downloading comment API JSON reply thread %d %s' % ( + comment_counts[2], comment_prog_str) + else: + note_prefix = '%sDownloading comment%s API JSON page %d %s' % ( + ' ' if parent else '', ' replies' if parent else '', + page_num, comment_prog_str) + response = self._extract_response( + item_id=None, query=self._continuation_query_ajax_to_api(continuation), + ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix, + check_get_keys=('onResponseReceivedEndpoints', 'continuationContents')) if not response: break visitor_data = try_get( @@ -1803,80 +2128,107 @@ def extract_thread(parent_renderer): lambda x: x['responseContext']['webResponseContextExtensionData']['ytConfigData']['visitorData'], compat_str) or visitor_data - known_continuation_renderers = { - 'itemSectionContinuation': extract_thread, - 'commentRepliesContinuation': extract_thread - } + continuation_contents = dict_get(response, ('onResponseReceivedEndpoints', 'continuationContents')) - # extract next root continuation from the results - continuation_contents = try_get( - response, lambda x: x['continuationContents'], dict) or {} - - for key, value in continuation_contents.items(): - if key not in known_continuation_renderers: - continue - continuation_renderer = value - - if first_continuation: - first_continuation = False - expected_comment_count = try_get( - continuation_renderer, - (lambda x: x['header']['commentsHeaderRenderer']['countText']['runs'][0]['text'], - lambda x: x['header']['commentsHeaderRenderer']['commentsCount']['runs'][0]['text']), - compat_str) - - if expected_comment_count: - comment_counts[1] = str_to_int(expected_comment_count) - self.to_screen('Downloading ~%d comments' % str_to_int(expected_comment_count)) - yield comment_counts[1] - - # TODO: cli arg. - # 1/True for newest, 0/False for popular (default) - comment_sort_index = int(True) - sort_continuation_renderer = try_get( - continuation_renderer, - lambda x: x['header']['commentsHeaderRenderer']['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'] - [comment_sort_index]['continuation']['reloadContinuationData'], dict) - # If this fails, the initial continuation page - # starts off with popular anyways. - if sort_continuation_renderer: - continuation = YoutubeTabIE._build_continuation_query( - continuation=sort_continuation_renderer.get('continuation'), - ctp=sort_continuation_renderer.get('clickTrackingParams')) - self.to_screen('Sorting comments by %s' % ('popular' if comment_sort_index == 0 else 'newest')) + continuation = None + if isinstance(continuation_contents, list): + for continuation_section in continuation_contents: + if not isinstance(continuation_section, dict): + continue + continuation_items = try_get( + continuation_section, + (lambda x: x['reloadContinuationItemsCommand']['continuationItems'], + lambda x: x['appendContinuationItemsAction']['continuationItems']), + list) or [] + if is_first_continuation: + total_comments, continuation = extract_header(continuation_items) + if total_comments: + yield total_comments + is_first_continuation = False + if continuation: + break + continue + count = 0 + for count, entry in enumerate(extract_thread(continuation_items)): + yield entry + continuation = self._extract_continuation({'contents': continuation_items}) + if continuation: + # Sometimes YouTube provides a continuation without any comments + # In most cases we end up just downloading these with very little comments to come. + if count == 0: + if not parent: + self.report_warning('No comments received - assuming end of comments') + continuation = None break - for entry in known_continuation_renderers[key](continuation_renderer): - yield entry + # Deprecated response structure + elif isinstance(continuation_contents, dict): + known_continuation_renderers = ('itemSectionContinuation', 'commentRepliesContinuation') + for key, continuation_renderer in continuation_contents.items(): + if key not in known_continuation_renderers: + continue + if not isinstance(continuation_renderer, dict): + continue + if is_first_continuation: + header_continuation_items = [continuation_renderer.get('header') or {}] + total_comments, continuation = extract_header(header_continuation_items) + if total_comments: + yield total_comments + is_first_continuation = False + if continuation: + break - continuation = YoutubeTabIE._extract_continuation(continuation_renderer) # TODO - break + # Sometimes YouTube provides a continuation without any comments + # In most cases we end up just downloading these with very little comments to come. + count = 0 + for count, entry in enumerate(extract_thread(continuation_renderer.get('contents') or {})): + yield entry + continuation = self._extract_continuation(continuation_renderer) + if count == 0: + if not parent: + self.report_warning('No comments received - assuming end of comments') + continuation = None + break + + @staticmethod + def _generate_comment_continuation(video_id): + """ + Generates initial comment section continuation token from given video id + """ + b64_vid_id = base64.b64encode(bytes(video_id.encode('utf-8'))) + parts = ('Eg0SCw==', b64_vid_id, 'GAYyJyIRIgs=', b64_vid_id, 'MAB4AjAAQhBjb21tZW50cy1zZWN0aW9u') + new_continuation_intlist = list(itertools.chain.from_iterable( + [bytes_to_intlist(base64.b64decode(part)) for part in parts])) + return base64.b64encode(intlist_to_bytes(new_continuation_intlist)).decode('utf-8') - def _extract_comments(self, ytcfg, video_id, contents, webpage, xsrf_token): + def _extract_comments(self, ytcfg, video_id, contents, webpage): """Entry for comment extraction""" + def _real_comment_extract(contents): + if isinstance(contents, list): + for entry in contents: + for key, renderer in entry.items(): + if key not in known_entry_comment_renderers: + continue + yield from self._comment_entries( + renderer, video_id=video_id, ytcfg=ytcfg, + identity_token=self._extract_identity_token(webpage, item_id=video_id), + account_syncid=self._extract_account_syncid(ytcfg)) + break comments = [] - known_entry_comment_renderers = ( - 'itemSectionRenderer', - ) + known_entry_comment_renderers = ('itemSectionRenderer',) estimated_total = 0 - for entry in contents: - for key, renderer in entry.items(): - if key not in known_entry_comment_renderers: - continue + max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0]) or float('inf') - comment_iter = self._comment_entries( - renderer, - identity_token=self._extract_identity_token(webpage, item_id=video_id), - account_syncid=self._extract_account_syncid(ytcfg), - ytcfg=ytcfg, - session_token_list=[xsrf_token]) - - for comment in comment_iter: - if isinstance(comment, int): - estimated_total = comment - continue - comments.append(comment) - break + try: + for comment in _real_comment_extract(contents): + if len(comments) >= max_comments: + break + if isinstance(comment, int): + estimated_total = comment + continue + comments.append(comment) + except KeyboardInterrupt: + self.to_screen('Interrupted by user') self.to_screen('Downloaded %d/%d comments' % (len(comments), estimated_total)) return { 'comments': comments, @@ -1884,14 +2236,37 @@ def _extract_comments(self, ytcfg, video_id, contents, webpage, xsrf_token): } @staticmethod - def _get_video_info_params(video_id): + def _generate_player_context(sts=None): + context = { + 'html5Preference': 'HTML5_PREF_WANTS', + } + if sts is not None: + context['signatureTimestamp'] = sts return { + 'playbackContext': { + 'contentPlaybackContext': context + } + } + + @staticmethod + def _get_video_info_params(video_id, client='TVHTML5'): + GVI_CLIENTS = { + 'ANDROID': { + 'c': 'ANDROID', + 'cver': '16.20', + }, + 'TVHTML5': { + 'c': 'TVHTML5', + 'cver': '6.20180913', + } + } + query = { 'video_id': video_id, 'eurl': 'https://youtube.googleapis.com/v/' + video_id, - 'html5': '1', - 'c': 'TVHTML5', - 'cver': '6.20180913', + 'html5': '1' } + query.update(GVI_CLIENTS.get(client)) + return query def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) @@ -1904,6 +2279,19 @@ def _real_extract(self, url): webpage = self._download_webpage( webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False) + ytcfg = self._extract_ytcfg(video_id, webpage) or self._get_default_ytcfg() + identity_token = self._extract_identity_token(webpage, video_id) + syncid = self._extract_account_syncid(ytcfg) + headers = self._generate_api_headers(ytcfg, identity_token, syncid) + + player_url = self._extract_player_url(ytcfg, webpage) + + player_client = self._configuration_arg('player_client', [''])[0] + if player_client not in ('web', 'android', ''): + self.report_warning(f'Invalid player_client {player_client} given. Falling back to android client.') + force_mobile_client = player_client != 'web' + player_skip = self._configuration_arg('player_skip') + def get_text(x): if not x: return @@ -1917,23 +2305,35 @@ def get_text(x): ytm_streaming_data = {} if is_music_url: - # we are forcing to use parse_json because 141 only appeared in get_video_info. - # el, c, cver, cplayer field required for 141(aac 256kbps) codec - # maybe paramter of youtube music player? - ytm_player_response = self._parse_json(try_get(compat_parse_qs( - self._download_webpage( - base_url + 'get_video_info', video_id, - 'Fetching youtube music info webpage', - 'unable to download youtube music info webpage', query={ - **self._get_video_info_params(video_id), - 'el': 'detailpage', - 'c': 'WEB_REMIX', - 'cver': '0.1', - 'cplayer': 'UNIPLAYER', - }, fatal=False) or ''), - lambda x: x['player_response'][0], - compat_str) or '{}', video_id, fatal=False) - ytm_streaming_data = ytm_player_response.get('streamingData') or {} + ytm_webpage = None + sts = self._extract_signature_timestamp(video_id, player_url, ytcfg, fatal=False) + if sts and not force_mobile_client and 'configs' not in player_skip: + ytm_webpage = self._download_webpage( + 'https://music.youtube.com', + video_id, fatal=False, note='Downloading remix client config') + + ytm_cfg = self._extract_ytcfg(video_id, ytm_webpage) or {} + ytm_client = 'WEB_REMIX' + if not sts or force_mobile_client: + # Android client already has signature descrambled + # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/562 + if not sts: + self.report_warning('Falling back to android remix client for player API.') + ytm_client = 'ANDROID_MUSIC' + ytm_cfg = {} + + ytm_headers = self._generate_api_headers( + ytm_cfg, identity_token, syncid, + client=ytm_client) + ytm_query = {'videoId': video_id} + ytm_query.update(self._generate_player_context(sts)) + + ytm_player_response = self._extract_response( + item_id=video_id, ep='player', query=ytm_query, + ytcfg=ytm_cfg, headers=ytm_headers, fatal=False, + default_client=ytm_client, + note='Downloading %sremix player API JSON' % ('android ' if force_mobile_client else '')) + ytm_streaming_data = try_get(ytm_player_response, lambda x: x['streamingData'], dict) or {} player_response = None if webpage: @@ -1941,20 +2341,81 @@ def get_text(x): webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, video_id, 'initial player response') - ytcfg = self._extract_ytcfg(video_id, webpage) - if not player_response: - player_response = self._call_api( - 'player', {'videoId': video_id}, video_id, api_key=self._extract_api_key(ytcfg)) - + if not player_response or force_mobile_client: + sts = self._extract_signature_timestamp(video_id, player_url, ytcfg, fatal=False) + yt_client = 'WEB' + ytpcfg = ytcfg + ytp_headers = headers + if not sts or force_mobile_client: + # Android client already has signature descrambled + # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/562 + if not sts: + self.report_warning('Falling back to android client for player API.') + yt_client = 'ANDROID' + ytpcfg = {} + ytp_headers = self._generate_api_headers(ytpcfg, identity_token, syncid, yt_client) + + yt_query = {'videoId': video_id} + yt_query.update(self._generate_player_context(sts)) + player_response = self._extract_response( + item_id=video_id, ep='player', query=yt_query, + ytcfg=ytpcfg, headers=ytp_headers, fatal=False, + default_client=yt_client, + note='Downloading %splayer API JSON' % ('android ' if force_mobile_client else '') + ) or player_response + + # Age-gate workarounds playability_status = player_response.get('playabilityStatus') or {} - if playability_status.get('reason') == 'Sign in to confirm your age': - pr = self._parse_json(try_get(compat_parse_qs( - self._download_webpage( - base_url + 'get_video_info', video_id, - 'Refetching age-gated info webpage', 'unable to download video info webpage', - query=self._get_video_info_params(video_id), fatal=False)), - lambda x: x['player_response'][0], - compat_str) or '{}', video_id) + if playability_status.get('reason') in self._AGE_GATE_REASONS: + gvi_clients = ('ANDROID', 'TVHTML5') if force_mobile_client else ('TVHTML5', 'ANDROID') + for gvi_client in gvi_clients: + pr = self._parse_json(try_get(compat_parse_qs( + self._download_webpage( + base_url + 'get_video_info', video_id, + 'Refetching age-gated %s info webpage' % gvi_client.lower(), + 'unable to download video info webpage', fatal=False, + query=self._get_video_info_params(video_id, client=gvi_client))), + lambda x: x['player_response'][0], + compat_str) or '{}', video_id) + if pr: + break + if not pr: + self.report_warning('Falling back to embedded-only age-gate workaround.') + embed_webpage = None + sts = self._extract_signature_timestamp(video_id, player_url, ytcfg, fatal=False) + if sts and not force_mobile_client and 'configs' not in player_skip: + embed_webpage = self._download_webpage( + 'https://www.youtube.com/embed/%s?html5=1' % video_id, + video_id=video_id, note='Downloading age-gated embed config') + + ytcfg_age = self._extract_ytcfg(video_id, embed_webpage) or {} + # If we extracted the embed webpage, it'll tell us if we can view the video + embedded_pr = self._parse_json( + try_get(ytcfg_age, lambda x: x['PLAYER_VARS']['embedded_player_response'], str) or '{}', + video_id=video_id) + embedded_ps_reason = try_get(embedded_pr, lambda x: x['playabilityStatus']['reason'], str) or '' + if embedded_ps_reason not in self._AGE_GATE_REASONS: + yt_client = 'WEB_EMBEDDED_PLAYER' + if not sts or force_mobile_client: + # Android client already has signature descrambled + # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/562 + if not sts: + self.report_warning( + 'Falling back to android embedded client for player API (note: some formats may be missing).') + yt_client = 'ANDROID_EMBEDDED_PLAYER' + ytcfg_age = {} + + ytage_headers = self._generate_api_headers( + ytcfg_age, identity_token, syncid, client=yt_client) + yt_age_query = {'videoId': video_id} + yt_age_query.update(self._generate_player_context(sts)) + pr = self._extract_response( + item_id=video_id, ep='player', query=yt_age_query, + ytcfg=ytcfg_age, headers=ytage_headers, fatal=False, + default_client=yt_client, + note='Downloading %sage-gated player API JSON' % ('android ' if force_mobile_client else '') + ) or {} + if pr: player_response = pr @@ -2026,8 +2487,9 @@ def feed_entry(name): formats, itags, stream_ids = [], [], [] itag_qualities = {} - player_url = None q = qualities([ + # "tiny" is the smallest video-only format. But some audio-only formats + # was also labeled "tiny". It is not clear if such formats still exist 'tiny', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres' ]) @@ -2066,12 +2528,6 @@ def feed_entry(name): encrypted_sig = try_get(sc, lambda x: x['s'][0]) if not (sc and fmt_url and encrypted_sig): continue - if not player_url: - if not webpage: - continue - player_url = self._search_regex( - r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"', - webpage, 'player URL', fatal=False) if not player_url: continue signature = self._decrypt_signature(sc['s'][0], video_id, player_url) @@ -2097,13 +2553,15 @@ def feed_entry(name): 'width': fmt.get('width'), 'language': audio_track.get('id', '').split('.')[0], } - mimetype = fmt.get('mimeType') - if mimetype: - mobj = re.match( - r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', mimetype) - if mobj: - dct['ext'] = mimetype2ext(mobj.group(1)) - dct.update(parse_codecs(mobj.group(2))) + mime_mobj = re.match( + r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '') + if mime_mobj: + dct['ext'] = mimetype2ext(mime_mobj.group(1)) + dct.update(parse_codecs(mime_mobj.group(2))) + # The 3gp format in android client has a quality of "small", + # but is actually worse than all other formats + if dct['ext'] == '3gp': + dct['quality'] = q('tiny') no_audio = dct.get('acodec') == 'none' no_video = dct.get('vcodec') == 'none' if no_audio: @@ -2119,7 +2577,7 @@ def feed_entry(name): dct['container'] = dct['ext'] + '_dash' formats.append(dct) - skip_manifests = self._configuration_arg('skip') or [] + skip_manifests = self._configuration_arg('skip') get_dash = 'dash' not in skip_manifests and self.get_param('youtube_include_dash_manifest', True) get_hls = 'hls' not in skip_manifests and self.get_param('youtube_include_hls_manifest', True) @@ -2234,6 +2692,7 @@ def feed_entry(name): or microformat.get('lengthSeconds')) \ or parse_duration(search_meta('duration')) is_live = video_details.get('isLive') + is_upcoming = video_details.get('isUpcoming') owner_profile_url = microformat.get('ownerProfileUrl') info = { @@ -2297,7 +2756,7 @@ def process_language(container, base_url, lang_code, sub_name, query): continue process_language( subtitles, base_url, lang_code, - try_get(caption_track, lambda x: x.get('name').get('simpleText')), + try_get(caption_track, lambda x: x['name']['simpleText']), {}) continue automatic_captions = {} @@ -2347,8 +2806,10 @@ def process_language(container, base_url, lang_code, sub_name, query): webpage, self._YT_INITIAL_DATA_RE, video_id, 'yt initial data') if not initial_data: - initial_data = self._call_api( - 'next', {'videoId': video_id}, video_id, fatal=False, api_key=self._extract_api_key(ytcfg)) + initial_data = self._extract_response( + item_id=video_id, ep='next', fatal=False, + ytcfg=ytcfg, headers=headers, query={'videoId': video_id}, + note='Downloading initial data API JSON') try: # This will error if there is no livechat @@ -2357,7 +2818,7 @@ def process_language(container, base_url, lang_code, sub_name, query): 'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies 'video_id': video_id, 'ext': 'json', - 'protocol': 'youtube_live_chat' if is_live else 'youtube_live_chat_replay', + 'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay', }] except (KeyError, IndexError, TypeError): pass @@ -2551,7 +3012,7 @@ def chapter_time(mmlir): data=urlencode_postdata({xsrf_field_name: xsrf_token})) if get_comments: - info['__post_extractor'] = lambda: self._extract_comments(ytcfg, video_id, contents, webpage, xsrf_token) + info['__post_extractor'] = lambda: self._extract_comments(ytcfg, video_id, contents, webpage) self.mark_watched(video_id, player_response) @@ -3176,52 +3637,6 @@ def _rich_grid_entries(self, contents): if entry: yield entry ''' - - @staticmethod - def _build_continuation_query(continuation, ctp=None): - query = { - 'ctoken': continuation, - 'continuation': continuation, - } - if ctp: - query['itct'] = ctp - return query - - @staticmethod - def _extract_next_continuation_data(renderer): - next_continuation = try_get( - renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict) - if not next_continuation: - return - continuation = next_continuation.get('continuation') - if not continuation: - return - ctp = next_continuation.get('clickTrackingParams') - return YoutubeTabIE._build_continuation_query(continuation, ctp) - - @classmethod - def _extract_continuation(cls, renderer): - next_continuation = cls._extract_next_continuation_data(renderer) - if next_continuation: - return next_continuation - contents = [] - for key in ('contents', 'items'): - contents.extend(try_get(renderer, lambda x: x[key], list) or []) - for content in contents: - if not isinstance(content, dict): - continue - continuation_ep = try_get( - content, lambda x: x['continuationItemRenderer']['continuationEndpoint'], - dict) - if not continuation_ep: - continue - continuation = try_get( - continuation_ep, lambda x: x['continuationCommand']['token'], compat_str) - if not continuation: - continue - ctp = continuation_ep.get('clickTrackingParams') - return YoutubeTabIE._build_continuation_query(continuation, ctp) - def _entries(self, tab, item_id, identity_token, account_syncid, ytcfg): def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds @@ -3504,40 +3919,6 @@ def _extract_from_playlist(self, item_id, url, data, playlist, webpage): self._extract_mix_playlist(playlist, playlist_id, data, webpage), playlist_id=playlist_id, playlist_title=title) - @staticmethod - def _extract_alerts(data): - for alert_dict in try_get(data, lambda x: x['alerts'], list) or []: - if not isinstance(alert_dict, dict): - continue - for alert in alert_dict.values(): - alert_type = alert.get('type') - if not alert_type: - continue - message = try_get(alert, lambda x: x['text']['simpleText'], compat_str) or '' - if message: - yield alert_type, message - for run in try_get(alert, lambda x: x['text']['runs'], list) or []: - message += try_get(run, lambda x: x['text'], compat_str) - if message: - yield alert_type, message - - def _report_alerts(self, alerts, expected=True): - errors = [] - warnings = [] - for alert_type, alert_message in alerts: - if alert_type.lower() == 'error': - errors.append([alert_type, alert_message]) - else: - warnings.append([alert_type, alert_message]) - - for alert_type, alert_message in (warnings + errors[:-1]): - self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message)) - if errors: - raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected) - - def _extract_and_report_alerts(self, data, *args, **kwargs): - return self._report_alerts(self._extract_alerts(data), *args, **kwargs) - def _reload_with_unavailable_videos(self, item_id, data, webpage): """ Get playlist with unavailable videos if the 'show unavailable videos' button exists. @@ -3582,60 +3963,6 @@ def _reload_with_unavailable_videos(self, item_id, data, webpage): check_get_keys='contents', fatal=False, note='Downloading API JSON with unavailable videos') - def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None, - ytcfg=None, check_get_keys=None, ep='browse', fatal=True): - response = None - last_error = None - count = -1 - retries = self.get_param('extractor_retries', 3) - if check_get_keys is None: - check_get_keys = [] - while count < retries: - count += 1 - if last_error: - self.report_warning('%s. Retrying ...' % last_error) - try: - response = self._call_api( - ep=ep, fatal=True, headers=headers, - video_id=item_id, query=query, - context=self._extract_context(ytcfg), - api_key=self._extract_api_key(ytcfg), - note='%s%s' % (note, ' (retry #%d)' % count if count else '')) - except ExtractorError as e: - if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404): - # Downloading page may result in intermittent 5xx HTTP error - # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289 - last_error = 'HTTP Error %s' % e.cause.code - if count < retries: - continue - if fatal: - raise - else: - self.report_warning(error_to_compat_str(e)) - return - - else: - # Youtube may send alerts if there was an issue with the continuation page - try: - self._extract_and_report_alerts(response, expected=False) - except ExtractorError as e: - if fatal: - raise - self.report_warning(error_to_compat_str(e)) - return - if not check_get_keys or dict_get(response, check_get_keys): - break - # Youtube sometimes sends incomplete data - # See: https://github.com/ytdl-org/youtube-dl/issues/28194 - last_error = 'Incomplete data received' - if count >= retries: - if fatal: - raise ExtractorError(last_error) - else: - self.report_warning(last_error) - return - return response - def _extract_webpage(self, url, item_id): retries = self.get_param('extractor_retries', 3) count = -1