X-Git-Url: https://jfr.im/git/yt-dlp.git/blobdiff_plain/60bdb7bd9e7aa614c55084fdd86b0ef2ad9ca9fa..052e135029826a4caf84393263f13a13cc8cdac8:/yt_dlp/extractor/youtube.py diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index 16cb53add..48fc460ef 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -2,8 +2,10 @@ from __future__ import unicode_literals +import base64 import calendar import copy +import datetime import hashlib import itertools import json @@ -26,23 +28,29 @@ ) from ..jsinterp import JSInterpreter from ..utils import ( - bool_or_none, + bytes_to_intlist, clean_html, - dict_get, datetime_from_str, + dict_get, error_to_compat_str, ExtractorError, - format_field, float_or_none, + format_field, int_or_none, + intlist_to_bytes, mimetype2ext, + network_exceptions, + orderedSet, parse_codecs, + parse_count, parse_duration, + parse_iso8601, qualities, remove_start, smuggle_url, str_or_none, str_to_int, + traverse_obj, try_get, unescapeHTML, unified_strdate, @@ -50,7 +58,8 @@ update_url_query, url_or_none, urlencode_postdata, - urljoin + urljoin, + variadic, ) @@ -91,15 +100,18 @@ def warn(message): self.report_warning(message) # username+password login is broken - if self._LOGIN_REQUIRED and self.get_param('cookiefile') is None: + if (self._LOGIN_REQUIRED + and self.get_param('cookiefile') is None + and self.get_param('cookiesfrombrowser') is None): self.raise_login_required( 'Login details are needed to download this content', method='cookies') username, password = self._get_login_info() if username: warn('Logging in using username and password is broken. %s' % self._LOGIN_HINTS['cookies']) return - # Everything below this is broken! + # Everything below this is broken! + r''' # No authentication to be performed if username is None: if self._LOGIN_REQUIRED and self.get_param('cookiefile') is None: @@ -272,6 +284,7 @@ def req(url, f_req, note, errnote): return False return True + ''' def _initialize_consent(self): cookies = self._get_cookies('https://www.youtube.com/') @@ -354,7 +367,7 @@ def _real_initialize(self): 'hl': 'en', } }, - 'INNERTUBE_CONTEXT_CLIENT_NAME': 'ANDROID' + 'INNERTUBE_CONTEXT_CLIENT_NAME': 3 }, 'ANDROID_EMBEDDED_PLAYER': { 'INNERTUBE_API_VERSION': 'v1', @@ -368,7 +381,7 @@ def _real_initialize(self): 'hl': 'en', } }, - 'INNERTUBE_CONTEXT_CLIENT_NAME': 'ANDROID_EMBEDDED_PLAYER' + 'INNERTUBE_CONTEXT_CLIENT_NAME': 55 }, 'ANDROID_MUSIC': { 'INNERTUBE_API_VERSION': 'v1', @@ -382,7 +395,50 @@ def _real_initialize(self): 'hl': 'en', } }, - 'INNERTUBE_CONTEXT_CLIENT_NAME': 'ANDROID_MUSIC' + 'INNERTUBE_CONTEXT_CLIENT_NAME': 21 + }, + 'IOS': { + 'INNERTUBE_API_VERSION': 'v1', + 'INNERTUBE_CLIENT_NAME': 'IOS', + 'INNERTUBE_CLIENT_VERSION': '16.20', + 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'IOS', + 'clientVersion': '16.20', + 'hl': 'en', + } + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 5 + + }, + 'IOS_MUSIC': { + 'INNERTUBE_API_VERSION': 'v1', + 'INNERTUBE_CLIENT_NAME': 'IOS_MUSIC', + 'INNERTUBE_CLIENT_VERSION': '4.32', + 'INNERTUBE_API_KEY': 'AIzaSyDK3iBpDP9nHVTk2qL73FLJICfOC3c51Og', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'IOS_MUSIC', + 'clientVersion': '4.32', + 'hl': 'en', + } + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 26 + }, + 'IOS_MESSAGES_EXTENSION': { + 'INNERTUBE_API_VERSION': 'v1', + 'INNERTUBE_CLIENT_NAME': 'IOS_MESSAGES_EXTENSION', + 'INNERTUBE_CLIENT_VERSION': '16.20', + 'INNERTUBE_API_KEY': 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'IOS_MESSAGES_EXTENSION', + 'clientVersion': '16.20', + 'hl': 'en', + } + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 66 } } @@ -393,6 +449,22 @@ def _real_initialize(self): 'ANDROID_MUSIC': 'music.youtube.com' } + # clients starting with _ cannot be explicity requested by the user + _YT_CLIENTS = { + 'web': 'WEB', + 'web_music': 'WEB_REMIX', + '_web_embedded': 'WEB_EMBEDDED_PLAYER', + '_web_agegate': 'TVHTML5', + 'android': 'ANDROID', + 'android_music': 'ANDROID_MUSIC', + '_android_embedded': 'ANDROID_EMBEDDED_PLAYER', + '_android_agegate': 'ANDROID', + 'ios': 'IOS', + 'ios_music': 'IOS_MUSIC', + '_ios_embedded': 'IOS_MESSAGES_EXTENSION', + '_ios_agegate': 'IOS' + } + def _get_default_ytcfg(self, client='WEB'): if client in self._YT_DEFAULT_YTCFGS: return copy.deepcopy(self._YT_DEFAULT_YTCFGS[client]) @@ -410,6 +482,13 @@ def _ytcfg_get_safe(self, ytcfg, getter, expected_type=None, default_client='WEB def _extract_client_name(self, ytcfg, default_client='WEB'): return self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CLIENT_NAME'], compat_str, default_client) + @staticmethod + def _extract_session_index(*data): + for ytcfg in data: + session_index = int_or_none(try_get(ytcfg, lambda x: x['SESSION_INDEX'])) + if session_index is not None: + return session_index + def _extract_client_version(self, ytcfg, default_client='WEB'): return self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CLIENT_VERSION'], compat_str, default_client) @@ -442,13 +521,15 @@ def _generate_sapisidhash_header(self, origin='https://www.youtube.com'): yt_cookies = self._get_cookies('https://www.youtube.com') sapisid_cookie = dict_get( yt_cookies, ('__Secure-3PAPISID', 'SAPISID')) - if sapisid_cookie is None: + if sapisid_cookie is None or not sapisid_cookie.value: return time_now = round(time.time()) # SAPISID cookie is required if not already present if not yt_cookies.get('SAPISID'): + self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie', only_once=True) self._set_cookie( '.youtube.com', 'SAPISID', sapisid_cookie.value, secure=True, expire_time=time_now + 3600) + self.write_debug('Extracted SAPISID cookie', only_once=True) # SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323 sapisidhash = hashlib.sha1( f'{time_now} {sapisid_cookie.value} {origin}'.encode('utf-8')).hexdigest() @@ -460,7 +541,7 @@ def _call_api(self, ep, query, video_id, fatal=True, headers=None, data = {'context': context} if context else {'context': self._extract_context(default_client=default_client)} data.update(query) - real_headers = self._generate_api_headers(client=default_client) + real_headers = self.generate_api_headers(default_client=default_client) real_headers.update({'content-type': 'application/json'}) if headers: real_headers.update(headers) @@ -470,7 +551,7 @@ def _call_api(self, ep, query, video_id, fatal=True, headers=None, data=json.dumps(data).encode('utf8'), headers=real_headers, query={'key': api_key or self._extract_api_key()}) - def _extract_yt_initial_data(self, video_id, webpage): + def extract_yt_initial_data(self, video_id, webpage): return self._parse_json( self._search_regex( (r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE), @@ -478,7 +559,9 @@ def _extract_yt_initial_data(self, video_id, webpage): video_id) def _extract_identity_token(self, webpage, item_id): - ytcfg = self._extract_ytcfg(item_id, webpage) + if not webpage: + return None + ytcfg = self.extract_ytcfg(item_id, webpage) if ytcfg: token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str) if token: @@ -488,22 +571,25 @@ def _extract_identity_token(self, webpage, item_id): 'identity token', default=None) @staticmethod - def _extract_account_syncid(data): + def _extract_account_syncid(*args): """ Extract syncId required to download private playlists of secondary channels - @param data Either response or ytcfg + @params response and/or ytcfg """ - sync_ids = (try_get( - data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'], - lambda x: x['DATASYNC_ID']), compat_str) or '').split("||") - if len(sync_ids) >= 2 and sync_ids[1]: - # datasyncid is of the form "channel_syncid||user_syncid" for secondary channel - # and just "user_syncid||" for primary channel. We only want the channel_syncid - return sync_ids[0] - # ytcfg includes channel_syncid if on secondary channel - return data.get('DELEGATED_SESSION_ID') - - def _extract_ytcfg(self, video_id, webpage): + for data in args: + # ytcfg includes channel_syncid if on secondary channel + delegated_sid = try_get(data, lambda x: x['DELEGATED_SESSION_ID'], compat_str) + if delegated_sid: + return delegated_sid + sync_ids = (try_get( + data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'], + lambda x: x['DATASYNC_ID']), compat_str) or '').split("||") + if len(sync_ids) >= 2 and sync_ids[1]: + # datasyncid is of the form "channel_syncid||user_syncid" for secondary channel + # and just "user_syncid||" for primary channel. We only want the channel_syncid + return sync_ids[0] + + def extract_ytcfg(self, video_id, webpage): if not webpage: return {} return self._parse_json( @@ -511,20 +597,27 @@ def _extract_ytcfg(self, video_id, webpage): r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg', default='{}'), video_id, fatal=False) or {} - def _generate_api_headers(self, ytcfg=None, identity_token=None, account_syncid=None, - visitor_data=None, api_hostname=None, client='WEB'): - origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(client)) + def generate_api_headers( + self, ytcfg=None, identity_token=None, account_syncid=None, + visitor_data=None, api_hostname=None, default_client='WEB', session_index=None): + origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(default_client)) headers = { 'X-YouTube-Client-Name': compat_str( - self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=client)), - 'X-YouTube-Client-Version': self._extract_client_version(ytcfg, client), + self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)), + 'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client), 'Origin': origin } + if not visitor_data and ytcfg: + visitor_data = try_get( + self._extract_context(ytcfg, default_client), lambda x: x['client']['visitorData'], compat_str) if identity_token: headers['X-Youtube-Identity-Token'] = identity_token if account_syncid: headers['X-Goog-PageId'] = account_syncid - headers['X-Goog-AuthUser'] = 0 + if session_index is None and ytcfg: + session_index = self._extract_session_index(ytcfg) + if account_syncid or session_index is not None: + headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0 if visitor_data: headers['X-Goog-Visitor-Id'] = visitor_data auth = self._generate_sapisidhash_header(origin) @@ -534,7 +627,63 @@ def _generate_api_headers(self, ytcfg=None, identity_token=None, account_syncid= return headers @staticmethod - def _extract_alerts(data): + def _build_api_continuation_query(continuation, ctp=None): + query = { + 'continuation': continuation + } + # TODO: Inconsistency with clickTrackingParams. + # Currently we have a fixed ctp contained within context (from ytcfg) + # and a ctp in root query for continuation. + if ctp: + query['clickTracking'] = {'clickTrackingParams': ctp} + return query + + @classmethod + def _extract_next_continuation_data(cls, renderer): + next_continuation = try_get( + renderer, (lambda x: x['continuations'][0]['nextContinuationData'], + lambda x: x['continuation']['reloadContinuationData']), dict) + if not next_continuation: + return + continuation = next_continuation.get('continuation') + if not continuation: + return + ctp = next_continuation.get('clickTrackingParams') + return cls._build_api_continuation_query(continuation, ctp) + + @classmethod + def _extract_continuation_ep_data(cls, continuation_ep: dict): + if isinstance(continuation_ep, dict): + continuation = try_get( + continuation_ep, lambda x: x['continuationCommand']['token'], compat_str) + if not continuation: + return + ctp = continuation_ep.get('clickTrackingParams') + return cls._build_api_continuation_query(continuation, ctp) + + @classmethod + def _extract_continuation(cls, renderer): + next_continuation = cls._extract_next_continuation_data(renderer) + if next_continuation: + return next_continuation + + contents = [] + for key in ('contents', 'items'): + contents.extend(try_get(renderer, lambda x: x[key], list) or []) + + for content in contents: + if not isinstance(content, dict): + continue + continuation_ep = try_get( + content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'], + lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']), + dict) + continuation = cls._extract_continuation_ep_data(continuation_ep) + if continuation: + return continuation + + @classmethod + def _extract_alerts(cls, data): for alert_dict in try_get(data, lambda x: x['alerts'], list) or []: if not isinstance(alert_dict, dict): continue @@ -542,11 +691,7 @@ def _extract_alerts(data): alert_type = alert.get('type') if not alert_type: continue - message = try_get(alert, lambda x: x['text']['simpleText'], compat_str) or '' - if message: - yield alert_type, message - for run in try_get(alert, lambda x: x['text']['runs'], list) or []: - message += try_get(run, lambda x: x['text'], compat_str) + message = cls._get_text(alert, 'text') if message: yield alert_type, message @@ -567,6 +712,36 @@ def _report_alerts(self, alerts, expected=True): def _extract_and_report_alerts(self, data, *args, **kwargs): return self._report_alerts(self._extract_alerts(data), *args, **kwargs) + def _extract_badges(self, renderer: dict): + badges = set() + for badge in try_get(renderer, lambda x: x['badges'], list) or []: + label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label'], compat_str) + if label: + badges.add(label.lower()) + return badges + + @staticmethod + def _get_text(data, *path_list, max_runs=None): + for path in path_list or [None]: + if path is None: + obj = [data] + else: + obj = traverse_obj(data, path, default=[]) + if not any(key is ... or isinstance(key, (list, tuple)) for key in variadic(path)): + obj = [obj] + for item in obj: + text = try_get(item, lambda x: x['simpleText'], compat_str) + if text: + return text + runs = try_get(item, lambda x: x['runs'], list) or [] + if not runs and isinstance(item, list): + runs = item + + runs = runs[:min(len(runs), max_runs or len(runs))] + text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[])) + if text: + return text + def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None, ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None, default_client='WEB'): @@ -589,12 +764,15 @@ def _extract_response(self, item_id, query, note='Downloading API JSON', headers api_hostname=api_hostname, default_client=default_client, note='%s%s' % (note, ' (retry #%d)' % count if count else '')) except ExtractorError as e: - if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404): + if isinstance(e.cause, network_exceptions): # Downloading page may result in intermittent 5xx HTTP error # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289 - last_error = 'HTTP Error %s' % e.cause.code - if count < retries: - continue + # We also want to catch all other network exceptions since errors in later pages can be troublesome + # See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210 + if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429): + last_error = error_to_compat_str(e.cause or e) + if count < retries: + continue if fatal: raise else: @@ -629,24 +807,16 @@ def is_music_url(url): def _extract_video(self, renderer): video_id = renderer.get('videoId') - title = try_get( - renderer, - (lambda x: x['title']['runs'][0]['text'], - lambda x: x['title']['simpleText']), compat_str) - description = try_get( - renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'], - compat_str) - duration = parse_duration(try_get( - renderer, lambda x: x['lengthText']['simpleText'], compat_str)) - view_count_text = try_get( - renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or '' + title = self._get_text(renderer, 'title') + description = self._get_text(renderer, 'descriptionSnippet') + duration = parse_duration(self._get_text(renderer, 'lengthText')) + view_count_text = self._get_text(renderer, 'viewCountText') or '' view_count = str_to_int(self._search_regex( r'^([\d,]+)', re.sub(r'\s', '', view_count_text), 'view count', default=None)) - uploader = try_get( - renderer, - (lambda x: x['ownerText']['runs'][0]['text'], - lambda x: x['shortBylineText']['runs'][0]['text']), compat_str) + + uploader = self._get_text(renderer, 'ownerText', 'shortBylineText') + return { '_type': 'url', 'ie_key': YoutubeIE.ie_key(), @@ -981,21 +1151,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'format': '141/bestaudio[ext=m4a]', }, }, - # Controversy video - { - 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8', - 'info_dict': { - 'id': 'T4XJQO3qol8', - 'ext': 'mp4', - 'duration': 219, - 'upload_date': '20100909', - 'uploader': 'Amazing Atheist', - 'uploader_id': 'TheAmazingAtheist', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist', - 'title': 'Burning Everyone\'s Koran', - 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms \r\n\r\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html', - } - }, # Normal age-gate video (embed allowed) { 'url': 'https://youtube.com/watch?v=HtVdAasjOgU', @@ -1044,7 +1199,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'uploader_id': 'olympic', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic', 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games', - 'uploader': 'Olympic', + 'uploader': 'Olympics', 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games', }, 'params': { @@ -1209,16 +1364,16 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'id': 'lsguqyKfVQg', 'ext': 'mp4', 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21', - 'alt_title': 'Dark Walk - Position Music', + 'alt_title': 'Dark Walk', 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a', 'duration': 133, 'upload_date': '20151119', 'uploader_id': 'IronSoulElf', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf', 'uploader': 'IronSoulElf', - 'creator': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan', - 'track': 'Dark Walk - Position Music', - 'artist': 'Todd Haberman, Daniel Law Heath and Aaron Kaplan', + 'creator': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan', + 'track': 'Dark Walk', + 'artist': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan', 'album': 'Position Music - Production Music Vol. 143 - Dark Walk', }, 'params': { @@ -1528,6 +1683,20 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg', 'only_matching': True, }, + { + # controversial video, requires bpctr/contentCheckOk + 'url': 'https://www.youtube.com/watch?v=SZJvDhaSDnc', + 'info_dict': { + 'id': 'SZJvDhaSDnc', + 'ext': 'mp4', + 'title': 'San Diego teen commits suicide after bullying over embarrassing video', + 'channel_id': 'UC-SJ6nODDmufqBzPBwCvYvQ', + 'uploader': 'CBS This Morning', + 'uploader_id': 'CBSThisMorning', + 'upload_date': '20140716', + 'description': 'md5:acde3a73d3f133fc97e837a9f76b53b7' + } + }, { # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685 'url': 'cBvYw8_A0vQ', @@ -1561,13 +1730,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'url': 'https://www.youtube.com/watch?v=YOelRv7fMxY', 'info_dict': { 'id': 'YOelRv7fMxY', - 'title': 'Digging a Secret Tunnel from my Workshop', + 'title': 'DIGGING A SECRET TUNNEL Part 1', 'ext': '3gp', 'upload_date': '20210624', 'channel_id': 'UCp68_FLety0O-n9QU6phsgw', 'uploader': 'colinfurze', + 'uploader_id': 'colinfurze', 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCp68_FLety0O-n9QU6phsgw', - 'description': 'md5:ecb672623246d98c6c562eed6ae798c3' + 'description': 'md5:b5096f56af7ccd7a555c84db81738b22' }, 'params': { 'format': '17', # 3gp format available on android @@ -1601,10 +1771,12 @@ def __init__(self, *args, **kwargs): def _extract_player_url(self, ytcfg=None, webpage=None): player_url = try_get(ytcfg, (lambda x: x['PLAYER_JS_URL']), str) - if not player_url: + if not player_url and webpage: player_url = self._search_regex( r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"', webpage, 'player URL', fatal=False) + if not player_url: + return None if player_url.startswith('//'): player_url = 'https:' + player_url elif not re.match(r'https?://', player_url): @@ -1768,11 +1940,12 @@ def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=F 'JS player signature timestamp', group='sts', fatal=fatal)) return sts - def _mark_watched(self, video_id, player_response): - playback_url = url_or_none(try_get( - player_response, - lambda x: x['playbackTracking']['videostatsPlaybackUrl']['baseUrl'])) + def _mark_watched(self, video_id, player_responses): + playback_url = traverse_obj( + player_responses, (..., 'playbackTracking', 'videostatsPlaybackUrl', 'baseUrl'), + expected_type=url_or_none, get_all=False) if not playback_url: + self.report_warning('Unable to mark watched') return parsed_playback_url = compat_urlparse.urlparse(playback_url) qs = compat_urlparse.parse_qs(parsed_playback_url.query) @@ -1838,44 +2011,56 @@ def extract_id(cls, url): video_id = mobj.group(2) return video_id - def _extract_chapters_from_json(self, data, video_id, duration): - chapters_list = try_get( + def _extract_chapters_from_json(self, data, duration): + chapter_list = traverse_obj( + data, ( + 'playerOverlays', 'playerOverlayRenderer', 'decoratedPlayerBarRenderer', + 'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters' + ), expected_type=list) + + return self._extract_chapters( + chapter_list, + chapter_time=lambda chapter: float_or_none( + traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000), + chapter_title=lambda chapter: traverse_obj( + chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str), + duration=duration) + + def _extract_chapters_from_engagement_panel(self, data, duration): + content_list = traverse_obj( data, - lambda x: x['playerOverlays'] - ['playerOverlayRenderer'] - ['decoratedPlayerBarRenderer'] - ['decoratedPlayerBarRenderer'] - ['playerBar'] - ['chapteredPlayerBarRenderer'] - ['chapters'], - list) - if not chapters_list: - return - - def chapter_time(chapter): - return float_or_none( - try_get( - chapter, - lambda x: x['chapterRenderer']['timeRangeStartMillis'], - int), - scale=1000) + ('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'), + expected_type=list, default=[]) + chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription')) + chapter_title = lambda chapter: self._get_text(chapter, 'title') + + return next(( + filter(None, ( + self._extract_chapters( + traverse_obj(contents, (..., 'macroMarkersListItemRenderer')), + chapter_time, chapter_title, duration) + for contents in content_list + ))), []) + + def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration): chapters = [] - for next_num, chapter in enumerate(chapters_list, start=1): + last_chapter = {'start_time': 0} + for idx, chapter in enumerate(chapter_list or []): + title = chapter_title(chapter) start_time = chapter_time(chapter) if start_time is None: continue - end_time = (chapter_time(chapters_list[next_num]) - if next_num < len(chapters_list) else duration) - if end_time is None: - continue - title = try_get( - chapter, lambda x: x['chapterRenderer']['title']['simpleText'], - compat_str) - chapters.append({ - 'start_time': start_time, - 'end_time': end_time, - 'title': title, - }) + last_chapter['end_time'] = start_time + if start_time < last_chapter['start_time']: + if idx == 1: + chapters.pop() + self.report_warning('Invalid start time for chapter "%s"' % last_chapter['title']) + else: + self.report_warning(f'Invalid start time for chapter "{title}"') + continue + last_chapter = {'start_time': start_time, 'title': title} + chapters.append(last_chapter) + last_chapter['end_time'] = duration return chapters def _extract_yt_initial_variable(self, webpage, regex, video_id, name): @@ -1891,48 +2076,42 @@ def parse_time_text(time_text): """ time_text_split = time_text.split(' ') if len(time_text_split) >= 3: - return datetime_from_str('now-%s%s' % (time_text_split[0], time_text_split[1]), precision='auto') - - @staticmethod - def _join_text_entries(runs): - text = None - for run in runs: - if not isinstance(run, dict): - continue - sub_text = try_get(run, lambda x: x['text'], compat_str) - if sub_text: - if not text: - text = sub_text - continue - text += sub_text - return text + try: + return datetime_from_str('now-%s%s' % (time_text_split[0], time_text_split[1]), precision='auto') + except ValueError: + return None def _extract_comment(self, comment_renderer, parent=None): comment_id = comment_renderer.get('commentId') if not comment_id: return - comment_text_runs = try_get(comment_renderer, lambda x: x['contentText']['runs']) or [] - text = self._join_text_entries(comment_text_runs) or '' - comment_time_text = try_get(comment_renderer, lambda x: x['publishedTimeText']['runs']) or [] - time_text = self._join_text_entries(comment_time_text) - timestamp = calendar.timegm(self.parse_time_text(time_text).timetuple()) - author = try_get(comment_renderer, lambda x: x['authorText']['simpleText'], compat_str) + + text = self._get_text(comment_renderer, 'contentText') + + # note: timestamp is an estimate calculated from the current time and time_text + time_text = self._get_text(comment_renderer, 'publishedTimeText') or '' + time_text_dt = self.parse_time_text(time_text) + if isinstance(time_text_dt, datetime.datetime): + timestamp = calendar.timegm(time_text_dt.timetuple()) + author = self._get_text(comment_renderer, 'authorText') author_id = try_get(comment_renderer, lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str) - votes = str_to_int(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'], - lambda x: x['likeCount']), compat_str)) or 0 + + votes = parse_count(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'], + lambda x: x['likeCount']), compat_str)) or 0 author_thumbnail = try_get(comment_renderer, lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str) author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool) - is_liked = try_get(comment_renderer, lambda x: x['isLiked'], bool) + is_favorited = 'creatorHeart' in (try_get( + comment_renderer, lambda x: x['actionButtons']['commentActionButtonsRenderer'], dict) or {}) return { 'id': comment_id, 'text': text, 'timestamp': timestamp, 'time_text': time_text, 'like_count': votes, - 'is_favorited': is_liked, + 'is_favorited': is_favorited, 'author': author, 'author_id': author_id, 'author_thumbnail': author_thumbnail, @@ -1941,10 +2120,42 @@ def _extract_comment(self, comment_renderer, parent=None): } def _comment_entries(self, root_continuation_data, identity_token, account_syncid, - ytcfg, session_token_list, parent=None, comment_counts=None): + ytcfg, video_id, parent=None, comment_counts=None): - def extract_thread(parent_renderer): - contents = try_get(parent_renderer, lambda x: x['contents'], list) or [] + def extract_header(contents): + _total_comments = 0 + _continuation = None + for content in contents: + comments_header_renderer = try_get(content, lambda x: x['commentsHeaderRenderer']) + expected_comment_count = parse_count(self._get_text( + comments_header_renderer, 'countText', 'commentsCount', max_runs=1)) + + if expected_comment_count: + comment_counts[1] = expected_comment_count + self.to_screen('Downloading ~%d comments' % expected_comment_count) + _total_comments = comment_counts[1] + sort_mode_str = self._configuration_arg('comment_sort', [''])[0] + comment_sort_index = int(sort_mode_str != 'top') # 1 = new, 0 = top + + sort_menu_item = try_get( + comments_header_renderer, + lambda x: x['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'][comment_sort_index], dict) or {} + sort_continuation_ep = sort_menu_item.get('serviceEndpoint') or {} + + _continuation = self._extract_continuation_ep_data(sort_continuation_ep) or self._extract_continuation(sort_menu_item) + if not _continuation: + continue + + sort_text = sort_menu_item.get('title') + if isinstance(sort_text, compat_str): + sort_text = sort_text.lower() + else: + sort_text = 'top comments' if comment_sort_index == 0 else 'newest first' + self.to_screen('Sorting comments by %s' % sort_text) + break + return _total_comments, _continuation + + def extract_thread(contents): if not parent: comment_counts[2] = 0 for content in contents: @@ -1968,117 +2179,48 @@ def extract_thread(parent_renderer): comment_counts[2] += 1 comment_entries_iter = self._comment_entries( comment_replies_renderer, identity_token, account_syncid, ytcfg, - parent=comment.get('id'), session_token_list=session_token_list, - comment_counts=comment_counts) + video_id, parent=comment.get('id'), comment_counts=comment_counts) for reply_comment in comment_entries_iter: yield reply_comment + # YouTube comments have a max depth of 2 + max_depth = int_or_none(self._configuration_arg('max_comment_depth', [''])[0]) or float('inf') + if max_depth == 1 and parent: + return if not comment_counts: # comment so far, est. total comments, current comment thread # comment_counts = [0, 0, 0] - # TODO: Generalize the download code with TabIE - context = self._extract_context(ytcfg) - visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str) - continuation = YoutubeTabIE._extract_continuation(root_continuation_data) # TODO - first_continuation = False - if parent is None: - first_continuation = True + continuation = self._extract_continuation(root_continuation_data) + if continuation and len(continuation['continuation']) < 27: + self.write_debug('Detected old API continuation token. Generating new API compatible token.') + continuation_token = self._generate_comment_continuation(video_id) + continuation = self._build_api_continuation_query(continuation_token, None) + + visitor_data = None + is_first_continuation = parent is None for page_num in itertools.count(0): if not continuation: break - headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data) - retries = self.get_param('extractor_retries', 3) - count = -1 - last_error = None - - while count < retries: - count += 1 - if last_error: - self.report_warning('%s. Retrying ...' % last_error) - try: - query = { - 'ctoken': continuation['ctoken'], - 'pbj': 1, - 'type': 'next', - } - if 'itct' in continuation: - query['itct'] = continuation['itct'] - if parent: - query['action_get_comment_replies'] = 1 - else: - query['action_get_comments'] = 1 - - comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1]) - if page_num == 0: - if first_continuation: - note_prefix = 'Downloading initial comment continuation page' - else: - note_prefix = ' Downloading comment reply thread %d %s' % (comment_counts[2], comment_prog_str) - else: - note_prefix = '%sDownloading comment%s page %d %s' % ( - ' ' if parent else '', - ' replies' if parent else '', - page_num, - comment_prog_str) - - browse = self._download_json( - 'https://www.youtube.com/comment_service_ajax', None, - '%s %s' % (note_prefix, '(retry #%d)' % count if count else ''), - headers=headers, query=query, - data=urlencode_postdata({ - 'session_token': session_token_list[0] - })) - except ExtractorError as e: - if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404, 413): - if e.cause.code == 413: - self.report_warning('Assumed end of comments (received HTTP Error 413)') - return - # Downloading page may result in intermittent 5xx HTTP error - # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289 - last_error = 'HTTP Error %s' % e.cause.code - if e.cause.code == 404: - last_error = last_error + ' (this API is probably deprecated)' - if count < retries: - continue - raise + headers = self.generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data) + comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1]) + if page_num == 0: + if is_first_continuation: + note_prefix = 'Downloading comment section API JSON' else: - session_token = try_get(browse, lambda x: x['xsrf_token'], compat_str) - if session_token: - session_token_list[0] = session_token - - response = try_get(browse, - (lambda x: x['response'], - lambda x: x[1]['response']), dict) or {} - - if response.get('continuationContents'): - break - - # YouTube sometimes gives reload: now json if something went wrong (e.g. bad auth) - if isinstance(browse, dict): - if browse.get('reload'): - raise ExtractorError('Invalid or missing params in continuation request', expected=False) - - # TODO: not tested, merged from old extractor - err_msg = browse.get('externalErrorMessage') - if err_msg: - last_error = err_msg - continue - - response_error = try_get(response, lambda x: x['responseContext']['errors']['error'][0], dict) or {} - err_msg = response_error.get('externalErrorMessage') - if err_msg: - last_error = err_msg - continue - - # Youtube sometimes sends incomplete data - # See: https://github.com/ytdl-org/youtube-dl/issues/28194 - last_error = 'Incomplete data received' - if count >= retries: - raise ExtractorError(last_error) + note_prefix = ' Downloading comment API JSON reply thread %d %s' % ( + comment_counts[2], comment_prog_str) + else: + note_prefix = '%sDownloading comment%s API JSON page %d %s' % ( + ' ' if parent else '', ' replies' if parent else '', + page_num, comment_prog_str) + response = self._extract_response( + item_id=None, query=continuation, + ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix, + check_get_keys=('onResponseReceivedEndpoints', 'continuationContents')) if not response: break visitor_data = try_get( @@ -2086,80 +2228,107 @@ def extract_thread(parent_renderer): lambda x: x['responseContext']['webResponseContextExtensionData']['ytConfigData']['visitorData'], compat_str) or visitor_data - known_continuation_renderers = { - 'itemSectionContinuation': extract_thread, - 'commentRepliesContinuation': extract_thread - } - - # extract next root continuation from the results - continuation_contents = try_get( - response, lambda x: x['continuationContents'], dict) or {} - - for key, value in continuation_contents.items(): - if key not in known_continuation_renderers: - continue - continuation_renderer = value + continuation_contents = dict_get(response, ('onResponseReceivedEndpoints', 'continuationContents')) - if first_continuation: - first_continuation = False - expected_comment_count = try_get( - continuation_renderer, - (lambda x: x['header']['commentsHeaderRenderer']['countText']['runs'][0]['text'], - lambda x: x['header']['commentsHeaderRenderer']['commentsCount']['runs'][0]['text']), - compat_str) - - if expected_comment_count: - comment_counts[1] = str_to_int(expected_comment_count) - self.to_screen('Downloading ~%d comments' % str_to_int(expected_comment_count)) - yield comment_counts[1] - - # TODO: cli arg. - # 1/True for newest, 0/False for popular (default) - comment_sort_index = int(True) - sort_continuation_renderer = try_get( - continuation_renderer, - lambda x: x['header']['commentsHeaderRenderer']['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'] - [comment_sort_index]['continuation']['reloadContinuationData'], dict) - # If this fails, the initial continuation page - # starts off with popular anyways. - if sort_continuation_renderer: - continuation = YoutubeTabIE._build_continuation_query( - continuation=sort_continuation_renderer.get('continuation'), - ctp=sort_continuation_renderer.get('clickTrackingParams')) - self.to_screen('Sorting comments by %s' % ('popular' if comment_sort_index == 0 else 'newest')) + continuation = None + if isinstance(continuation_contents, list): + for continuation_section in continuation_contents: + if not isinstance(continuation_section, dict): + continue + continuation_items = try_get( + continuation_section, + (lambda x: x['reloadContinuationItemsCommand']['continuationItems'], + lambda x: x['appendContinuationItemsAction']['continuationItems']), + list) or [] + if is_first_continuation: + total_comments, continuation = extract_header(continuation_items) + if total_comments: + yield total_comments + is_first_continuation = False + if continuation: + break + continue + count = 0 + for count, entry in enumerate(extract_thread(continuation_items)): + yield entry + continuation = self._extract_continuation({'contents': continuation_items}) + if continuation: + # Sometimes YouTube provides a continuation without any comments + # In most cases we end up just downloading these with very little comments to come. + if count == 0: + if not parent: + self.report_warning('No comments received - assuming end of comments') + continuation = None break - for entry in known_continuation_renderers[key](continuation_renderer): - yield entry + # Deprecated response structure + elif isinstance(continuation_contents, dict): + known_continuation_renderers = ('itemSectionContinuation', 'commentRepliesContinuation') + for key, continuation_renderer in continuation_contents.items(): + if key not in known_continuation_renderers: + continue + if not isinstance(continuation_renderer, dict): + continue + if is_first_continuation: + header_continuation_items = [continuation_renderer.get('header') or {}] + total_comments, continuation = extract_header(header_continuation_items) + if total_comments: + yield total_comments + is_first_continuation = False + if continuation: + break - continuation = YoutubeTabIE._extract_continuation(continuation_renderer) # TODO - break + # Sometimes YouTube provides a continuation without any comments + # In most cases we end up just downloading these with very little comments to come. + count = 0 + for count, entry in enumerate(extract_thread(continuation_renderer.get('contents') or {})): + yield entry + continuation = self._extract_continuation(continuation_renderer) + if count == 0: + if not parent: + self.report_warning('No comments received - assuming end of comments') + continuation = None + break - def _extract_comments(self, ytcfg, video_id, contents, webpage, xsrf_token): + @staticmethod + def _generate_comment_continuation(video_id): + """ + Generates initial comment section continuation token from given video id + """ + b64_vid_id = base64.b64encode(bytes(video_id.encode('utf-8'))) + parts = ('Eg0SCw==', b64_vid_id, 'GAYyJyIRIgs=', b64_vid_id, 'MAB4AjAAQhBjb21tZW50cy1zZWN0aW9u') + new_continuation_intlist = list(itertools.chain.from_iterable( + [bytes_to_intlist(base64.b64decode(part)) for part in parts])) + return base64.b64encode(intlist_to_bytes(new_continuation_intlist)).decode('utf-8') + + def _extract_comments(self, ytcfg, video_id, contents, webpage): """Entry for comment extraction""" + def _real_comment_extract(contents): + if isinstance(contents, list): + for entry in contents: + for key, renderer in entry.items(): + if key not in known_entry_comment_renderers: + continue + yield from self._comment_entries( + renderer, video_id=video_id, ytcfg=ytcfg, + identity_token=self._extract_identity_token(webpage, item_id=video_id), + account_syncid=self._extract_account_syncid(ytcfg)) + break comments = [] - known_entry_comment_renderers = ( - 'itemSectionRenderer', - ) + known_entry_comment_renderers = ('itemSectionRenderer',) estimated_total = 0 - for entry in contents: - for key, renderer in entry.items(): - if key not in known_entry_comment_renderers: - continue - - comment_iter = self._comment_entries( - renderer, - identity_token=self._extract_identity_token(webpage, item_id=video_id), - account_syncid=self._extract_account_syncid(ytcfg), - ytcfg=ytcfg, - session_token_list=[xsrf_token]) + max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0]) or float('inf') - for comment in comment_iter: - if isinstance(comment, int): - estimated_total = comment - continue - comments.append(comment) - break + try: + for comment in _real_comment_extract(contents): + if len(comments) >= max_comments: + break + if isinstance(comment, int): + estimated_total = comment + continue + comments.append(comment) + except KeyboardInterrupt: + self.to_screen('Interrupted by user') self.to_screen('Downloaded %d/%d comments' % (len(comments), estimated_total)) return { 'comments': comments, @@ -2176,232 +2345,137 @@ def _generate_player_context(sts=None): return { 'playbackContext': { 'contentPlaybackContext': context - } + }, + 'contentCheckOk': True, + 'racyCheckOk': True } @staticmethod - def _get_video_info_params(video_id): - return { + def _get_video_info_params(video_id, client='TVHTML5'): + GVI_CLIENTS = { + 'ANDROID': { + 'c': 'ANDROID', + 'cver': '16.20', + }, + 'TVHTML5': { + 'c': 'TVHTML5', + 'cver': '6.20180913', + }, + 'IOS': { + 'c': 'IOS', + 'cver': '16.20' + } + } + query = { 'video_id': video_id, 'eurl': 'https://youtube.googleapis.com/v/' + video_id, - 'html5': '1', - 'c': 'TVHTML5', - 'cver': '6.20180913', + 'html5': '1' } + query.update(GVI_CLIENTS.get(client)) + return query - def _real_extract(self, url): - url, smuggled_data = unsmuggle_url(url, {}) - video_id = self._match_id(url) - - is_music_url = smuggled_data.get('is_music_url') or self.is_music_url(url) - - base_url = self.http_scheme() + '//www.youtube.com/' - webpage_url = base_url + 'watch?v=' + video_id - webpage = self._download_webpage( - webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False) + def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, identity_token, player_url, initial_pr): + + session_index = self._extract_session_index(player_ytcfg, master_ytcfg) + syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr) + sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) + headers = self.generate_api_headers( + player_ytcfg, identity_token, syncid, + default_client=self._YT_CLIENTS[client], session_index=session_index) + + yt_query = {'videoId': video_id} + yt_query.update(self._generate_player_context(sts)) + return self._extract_response( + item_id=video_id, ep='player', query=yt_query, + ytcfg=player_ytcfg, headers=headers, fatal=False, + default_client=self._YT_CLIENTS[client], + note='Downloading %s player API JSON' % client.replace('_', ' ').strip() + ) or None + + def _extract_age_gated_player_response(self, client, video_id, ytcfg, identity_token, player_url, initial_pr): + # get_video_info endpoint seems to be completely dead + gvi_client = None # self._YT_CLIENTS.get(f'_{client}_agegate') + if gvi_client: + pr = self._parse_json(traverse_obj( + compat_parse_qs(self._download_webpage( + self.http_scheme() + '//www.youtube.com/get_video_info', video_id, + 'Refetching age-gated %s info webpage' % gvi_client.lower(), + 'unable to download video info webpage', fatal=False, + query=self._get_video_info_params(video_id, client=gvi_client))), + ('player_response', 0), expected_type=str) or '{}', video_id) + if pr: + return pr + self.report_warning('Falling back to embedded-only age-gate workaround') - ytcfg = self._extract_ytcfg(video_id, webpage) or self._get_default_ytcfg() - identity_token = self._extract_identity_token(webpage, video_id) - syncid = self._extract_account_syncid(ytcfg) - headers = self._generate_api_headers(ytcfg, identity_token, syncid) + if not self._YT_CLIENTS.get(f'_{client}_embedded'): + return + embed_webpage = None + if client == 'web' and 'configs' not in self._configuration_arg('player_skip'): + embed_webpage = self._download_webpage( + 'https://www.youtube.com/embed/%s?html5=1' % video_id, + video_id=video_id, note=f'Downloading age-gated {client} embed config') + + ytcfg_age = self.extract_ytcfg(video_id, embed_webpage) or {} + # If we extracted the embed webpage, it'll tell us if we can view the video + embedded_pr = self._parse_json( + traverse_obj(ytcfg_age, ('PLAYER_VARS', 'embedded_player_response'), expected_type=str) or '{}', + video_id=video_id) + embedded_ps_reason = traverse_obj(embedded_pr, ('playabilityStatus', 'reason'), expected_type=str) or '' + if embedded_ps_reason in self._AGE_GATE_REASONS: + return + return self._extract_player_response( + f'_{client}_embedded', video_id, + ytcfg_age or ytcfg, ytcfg_age if client == 'web' else {}, + identity_token, player_url, initial_pr) - player_url = self._extract_player_url(ytcfg, webpage) + def _get_requested_clients(self, url, smuggled_data): + requested_clients = [client for client in self._configuration_arg('player_client') + if client[:0] != '_' and client in self._YT_CLIENTS] + if not requested_clients: + requested_clients = ['android', 'web'] - player_client = (self._configuration_arg('player_client') or [''])[0] - if player_client not in ('web', 'android', ''): - self.report_warning(f'Invalid player_client {player_client} given. Falling back to WEB') - force_mobile_client = player_client == 'android' - player_skip = self._configuration_arg('player_skip') + if smuggled_data.get('is_music_url') or self.is_music_url(url): + requested_clients.extend( + f'{client}_music' for client in requested_clients if not client.endswith('_music')) - def get_text(x): - if not x: - return - text = x.get('simpleText') - if text and isinstance(text, compat_str): - return text - runs = x.get('runs') - if not isinstance(runs, list): - return - return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)]) + return orderedSet(requested_clients) - ytm_streaming_data = {} - if is_music_url: - ytm_webpage = None - sts = self._extract_signature_timestamp(video_id, player_url, ytcfg, fatal=False) - if sts and not force_mobile_client and 'configs' not in player_skip: - ytm_webpage = self._download_webpage( - 'https://music.youtube.com', - video_id, fatal=False, note="Downloading remix client config") - - ytm_cfg = self._extract_ytcfg(video_id, ytm_webpage) or {} - ytm_client = 'WEB_REMIX' - if not sts or force_mobile_client: - # Android client already has signature descrambled - # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/562 - if not sts: - self.report_warning('Falling back to mobile remix client for player API.') - ytm_client = 'ANDROID_MUSIC' - ytm_cfg = {} - - ytm_headers = self._generate_api_headers( - ytm_cfg, identity_token, syncid, - client=ytm_client) - ytm_query = {'videoId': video_id} - ytm_query.update(self._generate_player_context(sts)) - - ytm_player_response = self._extract_response( - item_id=video_id, ep='player', query=ytm_query, - ytcfg=ytm_cfg, headers=ytm_headers, fatal=False, - default_client=ytm_client, - note='Downloading %sremix player API JSON' % ('mobile ' if force_mobile_client else '')) - - ytm_streaming_data = try_get(ytm_player_response, lambda x: x['streamingData']) or {} - player_response = None + def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg, player_url, identity_token): + initial_pr = None if webpage: - player_response = self._extract_yt_initial_variable( + initial_pr = self._extract_yt_initial_variable( webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, video_id, 'initial player response') - if not player_response or force_mobile_client: - sts = self._extract_signature_timestamp(video_id, player_url, ytcfg, fatal=False) - yt_client = 'WEB' - ytpcfg = ytcfg - ytp_headers = headers - if not sts or force_mobile_client: - # Android client already has signature descrambled - # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/562 - if not sts: - self.report_warning('Falling back to mobile client for player API.') - yt_client = 'ANDROID' - ytpcfg = {} - ytp_headers = self._generate_api_headers(ytpcfg, identity_token, syncid, yt_client) - - yt_query = {'videoId': video_id} - yt_query.update(self._generate_player_context(sts)) - player_response = self._extract_response( - item_id=video_id, ep='player', query=yt_query, - ytcfg=ytpcfg, headers=ytp_headers, fatal=False, - default_client=yt_client, - note='Downloading %splayer API JSON' % ('mobile ' if force_mobile_client else '') - ) - - # Age-gate workarounds - playability_status = player_response.get('playabilityStatus') or {} - if playability_status.get('reason') in self._AGE_GATE_REASONS: - pr = self._parse_json(try_get(compat_parse_qs( - self._download_webpage( - base_url + 'get_video_info', video_id, - 'Refetching age-gated info webpage', 'unable to download video info webpage', - query=self._get_video_info_params(video_id), fatal=False)), - lambda x: x['player_response'][0], - compat_str) or '{}', video_id) - if not pr: - self.report_warning('Falling back to embedded-only age-gate workaround.') - embed_webpage = None - sts = self._extract_signature_timestamp(video_id, player_url, ytcfg, fatal=False) - if sts and not force_mobile_client and 'configs' not in player_skip: - embed_webpage = self._download_webpage( - 'https://www.youtube.com/embed/%s?html5=1' % video_id, - video_id=video_id, note='Downloading age-gated embed config') - - ytcfg_age = self._extract_ytcfg(video_id, embed_webpage) or {} - # If we extracted the embed webpage, it'll tell us if we can view the video - embedded_pr = self._parse_json( - try_get(ytcfg_age, lambda x: x['PLAYER_VARS']['embedded_player_response'], str) or '{}', - video_id=video_id) - embedded_ps_reason = try_get(embedded_pr, lambda x: x['playabilityStatus']['reason'], str) or '' - if embedded_ps_reason not in self._AGE_GATE_REASONS: - yt_client = 'WEB_EMBEDDED_PLAYER' - if not sts or force_mobile_client: - # Android client already has signature descrambled - # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/562 - if not sts: - self.report_warning( - 'Falling back to mobile embedded client for player API (note: some formats may be missing).') - yt_client = 'ANDROID_EMBEDDED_PLAYER' - ytcfg_age = {} - - ytage_headers = self._generate_api_headers( - ytcfg_age, identity_token, syncid, client=yt_client) - yt_age_query = {'videoId': video_id} - yt_age_query.update(self._generate_player_context(sts)) - pr = self._extract_response( - item_id=video_id, ep='player', query=yt_age_query, - ytcfg=ytcfg_age, headers=ytage_headers, fatal=False, - default_client=yt_client, - note='Downloading %sage-gated player API JSON' % ('mobile ' if force_mobile_client else '') - ) or {} - - if pr: - player_response = pr - - trailer_video_id = try_get( - playability_status, - lambda x: x['errorScreen']['playerLegacyDesktopYpcTrailerRenderer']['trailerVideoId'], - compat_str) - if trailer_video_id: - return self.url_result( - trailer_video_id, self.ie_key(), trailer_video_id) - - search_meta = ( - lambda x: self._html_search_meta(x, webpage, default=None)) \ - if webpage else lambda x: None - - video_details = player_response.get('videoDetails') or {} - microformat = try_get( - player_response, - lambda x: x['microformat']['playerMicroformatRenderer'], - dict) or {} - video_title = video_details.get('title') \ - or get_text(microformat.get('title')) \ - or search_meta(['og:title', 'twitter:title', 'title']) - video_description = video_details.get('shortDescription') - - if not smuggled_data.get('force_singlefeed', False): - if not self.get_param('noplaylist'): - multifeed_metadata_list = try_get( - player_response, - lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'], - compat_str) - if multifeed_metadata_list: - entries = [] - feed_ids = [] - for feed in multifeed_metadata_list.split(','): - # Unquote should take place before split on comma (,) since textual - # fields may contain comma as well (see - # https://github.com/ytdl-org/youtube-dl/issues/8536) - feed_data = compat_parse_qs( - compat_urllib_parse_unquote_plus(feed)) - - def feed_entry(name): - return try_get( - feed_data, lambda x: x[name][0], compat_str) - - feed_id = feed_entry('id') - if not feed_id: - continue - feed_title = feed_entry('title') - title = video_title - if feed_title: - title += ' (%s)' % feed_title - entries.append({ - '_type': 'url_transparent', - 'ie_key': 'Youtube', - 'url': smuggle_url( - base_url + 'watch?v=' + feed_data['id'][0], - {'force_singlefeed': True}), - 'title': title, - }) - feed_ids.append(feed_id) - self.to_screen( - 'Downloading multifeed video (%s) - add --no-playlist to just download video %s' - % (', '.join(feed_ids), video_id)) - return self.playlist_result( - entries, video_id, video_title, video_description) + for client in clients: + player_ytcfg = master_ytcfg if client == 'web' else {} + if client == 'web' and initial_pr: + pr = initial_pr else: - self.to_screen('Downloading just video %s because of --no-playlist' % video_id) - - formats, itags, stream_ids = [], [], [] + if client == 'web_music' and 'configs' not in self._configuration_arg('player_skip'): + ytm_webpage = self._download_webpage( + 'https://music.youtube.com', + video_id, fatal=False, note='Downloading remix client config') + player_ytcfg = self.extract_ytcfg(video_id, ytm_webpage) or {} + pr = self._extract_player_response( + client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, identity_token, player_url, initial_pr) + if pr: + yield pr + if traverse_obj(pr, ('playabilityStatus', 'reason')) in self._AGE_GATE_REASONS: + pr = self._extract_age_gated_player_response( + client, video_id, player_ytcfg or master_ytcfg, identity_token, player_url, initial_pr) + if pr: + yield pr + # Android player_response does not have microFormats which are needed for + # extraction of some data. So we return the initial_pr with formats + # stripped out even if not requested by the user + # See: https://github.com/yt-dlp/yt-dlp/issues/501 + if initial_pr and 'web' not in clients: + initial_pr['streamingData'] = None + yield initial_pr + + def _extract_formats(self, streaming_data, video_id, player_url, is_live): + itags, stream_ids = [], [] itag_qualities = {} q = qualities([ # "tiny" is the smallest video-only format. But some audio-only formats @@ -2409,12 +2483,7 @@ def feed_entry(name): 'tiny', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres' ]) - - streaming_data = player_response.get('streamingData') or {} - streaming_formats = streaming_data.get('formats') or [] - streaming_formats.extend(streaming_data.get('adaptiveFormats') or []) - streaming_formats.extend(ytm_streaming_data.get('formats') or []) - streaming_formats.extend(ytm_streaming_data.get('adaptiveFormats') or []) + streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[]) for fmt in streaming_formats: if fmt.get('targetDurationSec') or fmt.get('drmFamilies'): @@ -2460,7 +2529,8 @@ def feed_entry(name): 'asr': int_or_none(fmt.get('audioSampleRate')), 'filesize': int_or_none(fmt.get('contentLength')), 'format_id': itag, - 'format_note': audio_track.get('displayName') or fmt.get('qualityLabel') or quality, + 'format_note': ', '.join(filter(None, ( + audio_track.get('displayName'), fmt.get('qualityLabel') or quality))), 'fps': int_or_none(fmt.get('fps')), 'height': int_or_none(fmt.get('height')), 'quality': q(quality), @@ -2478,6 +2548,7 @@ def feed_entry(name): # but is actually worse than all other formats if dct['ext'] == '3gp': dct['quality'] = q('tiny') + dct['preference'] = -10 no_audio = dct.get('acodec') == 'none' no_video = dct.get('vcodec') == 'none' if no_audio: @@ -2491,22 +2562,25 @@ def feed_entry(name): } if dct.get('ext'): dct['container'] = dct['ext'] + '_dash' - formats.append(dct) + yield dct skip_manifests = self._configuration_arg('skip') - get_dash = 'dash' not in skip_manifests and self.get_param('youtube_include_dash_manifest', True) + get_dash = not is_live and 'dash' not in skip_manifests and self.get_param('youtube_include_dash_manifest', True) get_hls = 'hls' not in skip_manifests and self.get_param('youtube_include_hls_manifest', True) - for sd in (streaming_data, ytm_streaming_data): + for sd in streaming_data: hls_manifest_url = get_hls and sd.get('hlsManifestUrl') if hls_manifest_url: for f in self._extract_m3u8_formats( hls_manifest_url, video_id, 'mp4', fatal=False): itag = self._search_regex( r'/itag/(\d+)', f['url'], 'itag', default=None) + if itag in itags: + continue if itag: f['format_id'] = itag - formats.append(f) + itags.append(itag) + yield f dash_manifest_url = get_dash and sd.get('dashManifestUrl') if dash_manifest_url: @@ -2515,6 +2589,8 @@ def feed_entry(name): itag = f['format_id'] if itag in itags: continue + if itag: + itags.append(itag) if itag in itag_qualities: f['quality'] = q(itag_qualities[itag]) filesize = int_or_none(self._search_regex( @@ -2522,33 +2598,134 @@ def feed_entry(name): or f['url'], 'file size', default=None)) if filesize: f['filesize'] = filesize - formats.append(f) + yield f + + def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) + video_id = self._match_id(url) + + base_url = self.http_scheme() + '//www.youtube.com/' + webpage_url = base_url + 'watch?v=' + video_id + webpage = self._download_webpage( + webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False) + + master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg() + player_url = self._extract_player_url(master_ytcfg, webpage) + identity_token = self._extract_identity_token(webpage, video_id) + + player_responses = list(self._extract_player_responses( + self._get_requested_clients(url, smuggled_data), + video_id, webpage, master_ytcfg, player_url, identity_token)) + + get_first = lambda obj, keys, **kwargs: traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False) + + playability_statuses = traverse_obj( + player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[]) + + trailer_video_id = get_first( + playability_statuses, + ('errorScreen', 'playerLegacyDesktopYpcTrailerRenderer', 'trailerVideoId'), + expected_type=str) + if trailer_video_id: + return self.url_result( + trailer_video_id, self.ie_key(), trailer_video_id) + + search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None)) + if webpage else (lambda x: None)) + + video_details = traverse_obj( + player_responses, (..., 'videoDetails'), expected_type=dict, default=[]) + microformats = traverse_obj( + player_responses, (..., 'microformat', 'playerMicroformatRenderer'), + expected_type=dict, default=[]) + video_title = ( + get_first(video_details, 'title') + or self._get_text(microformats, (..., 'title')) + or search_meta(['og:title', 'twitter:title', 'title'])) + video_description = get_first(video_details, 'shortDescription') + + if not smuggled_data.get('force_singlefeed', False): + if not self.get_param('noplaylist'): + multifeed_metadata_list = get_first( + player_responses, + ('multicamera', 'playerLegacyMulticameraRenderer', 'metadataList'), + expected_type=str) + if multifeed_metadata_list: + entries = [] + feed_ids = [] + for feed in multifeed_metadata_list.split(','): + # Unquote should take place before split on comma (,) since textual + # fields may contain comma as well (see + # https://github.com/ytdl-org/youtube-dl/issues/8536) + feed_data = compat_parse_qs( + compat_urllib_parse_unquote_plus(feed)) + + def feed_entry(name): + return try_get( + feed_data, lambda x: x[name][0], compat_str) + + feed_id = feed_entry('id') + if not feed_id: + continue + feed_title = feed_entry('title') + title = video_title + if feed_title: + title += ' (%s)' % feed_title + entries.append({ + '_type': 'url_transparent', + 'ie_key': 'Youtube', + 'url': smuggle_url( + '%swatch?v=%s' % (base_url, feed_data['id'][0]), + {'force_singlefeed': True}), + 'title': title, + }) + feed_ids.append(feed_id) + self.to_screen( + 'Downloading multifeed video (%s) - add --no-playlist to just download video %s' + % (', '.join(feed_ids), video_id)) + return self.playlist_result( + entries, video_id, video_title, video_description) + else: + self.to_screen('Downloading just video %s because of --no-playlist' % video_id) + + live_broadcast_details = traverse_obj(microformats, (..., 'liveBroadcastDetails')) + is_live = get_first(video_details, 'isLive') + if is_live is None: + is_live = get_first(live_broadcast_details, 'isLiveNow') + + streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[]) + formats = list(self._extract_formats(streaming_data, video_id, player_url, is_live)) if not formats: - if not self.get_param('allow_unplayable_formats') and streaming_data.get('licenseInfos'): + if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')): self.raise_no_formats( 'This video is DRM protected.', expected=True) - pemr = try_get( - playability_status, - lambda x: x['errorScreen']['playerErrorMessageRenderer'], - dict) or {} - reason = get_text(pemr.get('reason')) or playability_status.get('reason') - subreason = pemr.get('subreason') + pemr = get_first( + playability_statuses, + ('errorScreen', 'playerErrorMessageRenderer'), expected_type=dict) or {} + reason = self._get_text(pemr, 'reason') or get_first(playability_statuses, 'reason') + subreason = clean_html(self._get_text(pemr, 'subreason') or '') if subreason: - subreason = clean_html(get_text(subreason)) if subreason == 'The uploader has not made this video available in your country.': - countries = microformat.get('availableCountries') + countries = get_first(microformats, 'availableCountries') if not countries: regions_allowed = search_meta('regionsAllowed') countries = regions_allowed.split(',') if regions_allowed else None self.raise_geo_restricted(subreason, countries, metadata_available=True) - reason += '\n' + subreason + reason += f'. {subreason}' if reason: self.raise_no_formats(reason, expected=True) + for f in formats: + # TODO: detect if throttled + if '&n=' in f['url']: # possibly throttled + f['source_preference'] = -10 + # note = f.get('format_note') + # f['format_note'] = f'{note} (throttled)' if note else '(throttled)' + self._sort_formats(formats) - keywords = video_details.get('keywords') or [] + keywords = get_first(video_details, 'keywords', expected_type=list) or [] if not keywords and webpage: keywords = [ unescapeHTML(m.group('content')) @@ -2567,49 +2744,74 @@ def feed_entry(name): break thumbnails = [] - for container in (video_details, microformat): - for thumbnail in (try_get( - container, - lambda x: x['thumbnail']['thumbnails'], list) or []): - thumbnail_url = thumbnail.get('url') - if not thumbnail_url: - continue - # Sometimes youtube gives a wrong thumbnail URL. See: - # https://github.com/yt-dlp/yt-dlp/issues/233 - # https://github.com/ytdl-org/youtube-dl/issues/28023 - if 'maxresdefault' in thumbnail_url: - thumbnail_url = thumbnail_url.split('?')[0] - thumbnails.append({ - 'url': thumbnail_url, - 'height': int_or_none(thumbnail.get('height')), - 'width': int_or_none(thumbnail.get('width')), - 'preference': 1 if 'maxresdefault' in thumbnail_url else -1 - }) + thumbnail_dicts = traverse_obj( + (video_details, microformats), (..., ..., 'thumbnail', 'thumbnails', ...), + expected_type=dict, default=[]) + for thumbnail in thumbnail_dicts: + thumbnail_url = thumbnail.get('url') + if not thumbnail_url: + continue + # Sometimes youtube gives a wrong thumbnail URL. See: + # https://github.com/yt-dlp/yt-dlp/issues/233 + # https://github.com/ytdl-org/youtube-dl/issues/28023 + if 'maxresdefault' in thumbnail_url: + thumbnail_url = thumbnail_url.split('?')[0] + thumbnails.append({ + 'url': thumbnail_url, + 'height': int_or_none(thumbnail.get('height')), + 'width': int_or_none(thumbnail.get('width')), + }) thumbnail_url = search_meta(['og:image', 'twitter:image']) if thumbnail_url: thumbnails.append({ 'url': thumbnail_url, - 'preference': 1 if 'maxresdefault' in thumbnail_url else -1 }) - # All videos have a maxresdefault thumbnail, but sometimes it does not appear in the webpage - # See: https://github.com/ytdl-org/youtube-dl/issues/29049 - thumbnails.append({ - 'url': 'https://i.ytimg.com/vi/%s/maxresdefault.jpg' % video_id, - 'preference': 1, - }) + # The best resolution thumbnails sometimes does not appear in the webpage + # See: https://github.com/ytdl-org/youtube-dl/issues/29049, https://github.com/yt-dlp/yt-dlp/issues/340 + # List of possible thumbnails - Ref: + hq_thumbnail_names = ['maxresdefault', 'hq720', 'sddefault', 'sd1', 'sd2', 'sd3'] + # TODO: Test them also? - For some videos, even these don't exist + guaranteed_thumbnail_names = [ + 'hqdefault', 'hq1', 'hq2', 'hq3', '0', + 'mqdefault', 'mq1', 'mq2', 'mq3', + 'default', '1', '2', '3' + ] + thumbnail_names = hq_thumbnail_names + guaranteed_thumbnail_names + n_thumbnail_names = len(thumbnail_names) + + thumbnails.extend({ + 'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format( + video_id=video_id, name=name, ext=ext, + webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''), + '_test_url': name in hq_thumbnail_names, + } for name in thumbnail_names for ext in ('webp', 'jpg')) + for thumb in thumbnails: + i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names) + thumb['preference'] = (0 if '.webp' in thumb['url'] else -1) - (2 * i) self._remove_duplicate_formats(thumbnails) - category = microformat.get('category') or search_meta('genre') - channel_id = video_details.get('channelId') \ - or microformat.get('externalChannelId') \ - or search_meta('channelId') + category = get_first(microformats, 'category') or search_meta('genre') + channel_id = str_or_none( + get_first(video_details, 'channelId') + or get_first(microformats, 'externalChannelId') + or search_meta('channelId')) duration = int_or_none( - video_details.get('lengthSeconds') - or microformat.get('lengthSeconds')) \ - or parse_duration(search_meta('duration')) - is_live = video_details.get('isLive') - is_upcoming = video_details.get('isUpcoming') - owner_profile_url = microformat.get('ownerProfileUrl') + get_first(video_details, 'lengthSeconds') + or get_first(microformats, 'lengthSeconds') + or parse_duration(search_meta('duration'))) or None + owner_profile_url = get_first(microformats, 'ownerProfileUrl') + + live_content = get_first(video_details, 'isLiveContent') + is_upcoming = get_first(video_details, 'isUpcoming') + if is_live is None: + if is_upcoming or live_content is False: + is_live = False + if is_upcoming is None and (live_content or is_live): + is_upcoming = False + live_starttime = parse_iso8601(get_first(live_broadcast_details, 'startTimestamp')) + live_endtime = parse_iso8601(get_first(live_broadcast_details, 'endTimestamp')) + if not duration and live_endtime and live_starttime: + duration = live_endtime - live_starttime info = { 'id': video_id, @@ -2618,34 +2820,42 @@ def feed_entry(name): 'thumbnails': thumbnails, 'description': video_description, 'upload_date': unified_strdate( - microformat.get('uploadDate') + get_first(microformats, 'uploadDate') or search_meta('uploadDate')), - 'uploader': video_details['author'], + 'uploader': get_first(video_details, 'author'), 'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None, 'uploader_url': owner_profile_url, 'channel_id': channel_id, - 'channel_url': 'https://www.youtube.com/channel/' + channel_id if channel_id else None, + 'channel_url': f'https://www.youtube.com/channel/{channel_id}' if channel_id else None, 'duration': duration, 'view_count': int_or_none( - video_details.get('viewCount') - or microformat.get('viewCount') + get_first((video_details, microformats), (..., 'viewCount')) or search_meta('interactionCount')), - 'average_rating': float_or_none(video_details.get('averageRating')), + 'average_rating': float_or_none(get_first(video_details, 'averageRating')), 'age_limit': 18 if ( - microformat.get('isFamilySafe') is False + get_first(microformats, 'isFamilySafe') is False or search_meta('isFamilyFriendly') == 'false' or search_meta('og:restrictions:age') == '18+') else 0, 'webpage_url': webpage_url, 'categories': [category] if category else None, 'tags': keywords, + 'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'), 'is_live': is_live, - 'playable_in_embed': playability_status.get('playableInEmbed'), - 'was_live': video_details.get('isLiveContent'), + 'was_live': (False if is_live or is_upcoming or live_content is False + else None if is_live is None or is_upcoming is None + else live_content), + 'live_status': 'is_upcoming' if is_upcoming else None, # rest will be set by YoutubeDL + 'release_timestamp': live_starttime, } - pctr = try_get( - player_response, - lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict) + pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict) + # Converted into dicts to remove duplicates + captions = { + sub.get('baseUrl'): sub + for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])} + translation_languages = { + lang.get('languageCode'): lang.get('languageName') + for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])} subtitles = {} if pctr: def process_language(container, base_url, lang_code, sub_name, query): @@ -2660,8 +2870,7 @@ def process_language(container, base_url, lang_code, sub_name, query): 'name': sub_name, }) - for caption_track in (pctr.get('captionTracks') or []): - base_url = caption_track.get('baseUrl') + for base_url, caption_track in captions.items(): if not base_url: continue if caption_track.get('kind') != 'asr': @@ -2672,20 +2881,17 @@ def process_language(container, base_url, lang_code, sub_name, query): continue process_language( subtitles, base_url, lang_code, - try_get(caption_track, lambda x: x.get('name').get('simpleText')), + traverse_obj(caption_track, ('name', 'simpleText')), {}) continue automatic_captions = {} - for translation_language in (pctr.get('translationLanguages') or []): - translation_language_code = translation_language.get('languageCode') - if not translation_language_code: + for trans_code, trans_name in translation_languages.items(): + if not trans_code: continue process_language( - automatic_captions, base_url, translation_language_code, - try_get(translation_language, ( - lambda x: x['languageName']['simpleText'], - lambda x: x['languageName']['runs'][0]['text'])), - {'tlang': translation_language_code}) + automatic_captions, base_url, trans_code, + self._get_text(trans_name, max_runs=1), + {'tlang': trans_code}) info['automatic_captions'] = automatic_captions info['subtitles'] = subtitles @@ -2722,9 +2928,13 @@ def process_language(container, base_url, lang_code, sub_name, query): webpage, self._YT_INITIAL_DATA_RE, video_id, 'yt initial data') if not initial_data: + headers = self.generate_api_headers( + master_ytcfg, identity_token, self._extract_account_syncid(master_ytcfg), + session_index=self._extract_session_index(master_ytcfg)) + initial_data = self._extract_response( item_id=video_id, ep='next', fatal=False, - ytcfg=ytcfg, headers=headers, query={'videoId': video_id}, + ytcfg=master_ytcfg, headers=headers, query={'videoId': video_id}, note='Downloading initial data API JSON') try: @@ -2740,38 +2950,10 @@ def process_language(container, base_url, lang_code, sub_name, query): pass if initial_data: - chapters = self._extract_chapters_from_json( - initial_data, video_id, duration) - if not chapters: - for engagment_pannel in (initial_data.get('engagementPanels') or []): - contents = try_get( - engagment_pannel, lambda x: x['engagementPanelSectionListRenderer']['content']['macroMarkersListRenderer']['contents'], - list) - if not contents: - continue - - def chapter_time(mmlir): - return parse_duration( - get_text(mmlir.get('timeDescription'))) - - chapters = [] - for next_num, content in enumerate(contents, start=1): - mmlir = content.get('macroMarkersListItemRenderer') or {} - start_time = chapter_time(mmlir) - end_time = chapter_time(try_get( - contents, lambda x: x[next_num]['macroMarkersListItemRenderer'])) \ - if next_num < len(contents) else duration - if start_time is None or end_time is None: - continue - chapters.append({ - 'start_time': start_time, - 'end_time': end_time, - 'title': get_text(mmlir.get('title')), - }) - if chapters: - break - if chapters: - info['chapters'] = chapters + info['chapters'] = ( + self._extract_chapters_from_json(initial_data, duration) + or self._extract_chapters_from_engagement_panel(initial_data, duration) + or None) contents = try_get( initial_data, @@ -2782,7 +2964,7 @@ def chapter_time(mmlir): if vpir: stl = vpir.get('superTitleLink') if stl: - stl = get_text(stl) + stl = self._get_text(stl) if try_get( vpir, lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN': @@ -2822,10 +3004,7 @@ def chapter_time(mmlir): }) vsir = content.get('videoSecondaryInfoRenderer') if vsir: - info['channel'] = get_text(try_get( - vsir, - lambda x: x['owner']['videoOwnerRenderer']['title'], - dict)) + info['channel'] = self._get_text(vsir, ('owner', 'videoOwnerRenderer', 'title')) rows = try_get( vsir, lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'], @@ -2840,8 +3019,8 @@ def chapter_time(mmlir): mrr_title = mrr.get('title') if not mrr_title: continue - mrr_title = get_text(mrr['title']) - mrr_contents_text = get_text(mrr['contents'][0]) + mrr_title = self._get_text(mrr, 'title') + mrr_contents_text = self._get_text(mrr, ('contents', 0)) if mrr_title == 'License': info['license'] = mrr_contents_text elif not multiple_songs: @@ -2866,28 +3045,27 @@ def chapter_time(mmlir): if v: info[d_k] = v - is_private = bool_or_none(video_details.get('isPrivate')) - is_unlisted = bool_or_none(microformat.get('isUnlisted')) + is_private = get_first(video_details, 'isPrivate', expected_type=bool) + is_unlisted = get_first(microformats, 'isUnlisted', expected_type=bool) is_membersonly = None is_premium = None if initial_data and is_private is not None: is_membersonly = False is_premium = False - contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) - for content in contents or []: - badges = try_get(content, lambda x: x['videoPrimaryInfoRenderer']['badges'], list) - for badge in badges or []: - label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label']) or '' - if label.lower() == 'members only': - is_membersonly = True - break - elif label.lower() == 'premium': - is_premium = True - break - if is_membersonly or is_premium: - break + contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or [] + badge_labels = set() + for content in contents: + if not isinstance(content, dict): + continue + badge_labels.update(self._extract_badges(content.get('videoPrimaryInfoRenderer'))) + for badge_label in badge_labels: + if badge_label.lower() == 'members only': + is_membersonly = True + elif badge_label.lower() == 'premium': + is_premium = True + elif badge_label.lower() == 'unlisted': + is_unlisted = True - # TODO: Add this for playlists info['availability'] = self._availability( is_private=is_private, needs_premium=is_premium, @@ -2900,9 +3078,8 @@ def chapter_time(mmlir): get_comments = self.get_param('getcomments', False) if get_annotations or get_comments: xsrf_token = None - ytcfg = self._extract_ytcfg(video_id, webpage) - if ytcfg: - xsrf_token = try_get(ytcfg, lambda x: x['XSRF_TOKEN'], compat_str) + if master_ytcfg: + xsrf_token = try_get(master_ytcfg, lambda x: x['XSRF_TOKEN'], compat_str) if not xsrf_token: xsrf_token = self._search_regex( r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P(?:(?!\2).)+)\2', @@ -2910,12 +3087,14 @@ def chapter_time(mmlir): # annotations if get_annotations: - invideo_url = try_get( - player_response, lambda x: x['annotations'][0]['playerAnnotationsUrlsRenderer']['invideoUrl'], compat_str) + invideo_url = get_first( + player_responses, + ('annotations', 0, 'playerAnnotationsUrlsRenderer', 'invideoUrl'), + expected_type=str) if xsrf_token and invideo_url: xsrf_field_name = None - if ytcfg: - xsrf_field_name = try_get(ytcfg, lambda x: x['XSRF_FIELD_NAME'], compat_str) + if master_ytcfg: + xsrf_field_name = try_get(master_ytcfg, lambda x: x['XSRF_FIELD_NAME'], compat_str) if not xsrf_field_name: xsrf_field_name = self._search_regex( r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P\w+)\2', @@ -2928,9 +3107,9 @@ def chapter_time(mmlir): data=urlencode_postdata({xsrf_field_name: xsrf_token})) if get_comments: - info['__post_extractor'] = lambda: self._extract_comments(ytcfg, video_id, contents, webpage, xsrf_token) + info['__post_extractor'] = lambda: self._extract_comments(master_ytcfg, video_id, contents, webpage) - self.mark_watched(video_id, player_response) + self.mark_watched(video_id, player_responses) return info @@ -3211,7 +3390,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, { 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live', 'info_dict': { - 'id': 'X1whbWASnNQ', # This will keep changing + 'id': 'FMtPN8yp5LU', # This will keep changing 'ext': 'mp4', 'title': compat_str, 'uploader': 'Sky News', @@ -3361,6 +3540,17 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'title': 'Album - Royalty Free Music Library V2 (50 Songs)', }, 'playlist_count': 50, + }, { + 'note': 'unlisted single video playlist', + 'url': 'https://www.youtube.com/playlist?list=PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf', + 'info_dict': { + 'uploader_id': 'UC9zHu_mHU96r19o-wV5Qs1Q', + 'uploader': 'colethedj', + 'id': 'PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf', + 'title': 'yt-dlp unlisted playlist test', + 'availability': 'unlisted' + }, + 'playlist_count': 1, }] @classmethod @@ -3402,9 +3592,8 @@ def _grid_entries(self, grid_renderer): renderer = self._extract_basic_item_renderer(item) if not isinstance(renderer, dict): continue - title = try_get( - renderer, (lambda x: x['title']['runs'][0]['text'], - lambda x: x['title']['simpleText']), compat_str) + title = self._get_text(renderer, 'title') + # playlist playlist_id = renderer.get('playlistId') if playlist_id: @@ -3421,8 +3610,6 @@ def _grid_entries(self, grid_renderer): # channel channel_id = renderer.get('channelId') if channel_id: - title = try_get( - renderer, lambda x: x['title']['simpleText'], compat_str) yield self.url_result( 'https://www.youtube.com/channel/%s' % channel_id, ie=YoutubeTabIE.ie_key(), video_title=title) @@ -3465,8 +3652,7 @@ def _shelf_entries(self, shelf_renderer, skip_channels=False): # will not work if skip_channels and '/channels?' in shelf_url: return - title = try_get( - shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str) + title = self._get_text(shelf_renderer, 'title') yield self.url_result(shelf_url, video_title=title) # Shelf may not contain shelf URL, fallback to extraction from content for entry in self._shelf_entries_from_content(shelf_renderer): @@ -3553,52 +3739,6 @@ def _rich_grid_entries(self, contents): if entry: yield entry ''' - - @staticmethod - def _build_continuation_query(continuation, ctp=None): - query = { - 'ctoken': continuation, - 'continuation': continuation, - } - if ctp: - query['itct'] = ctp - return query - - @staticmethod - def _extract_next_continuation_data(renderer): - next_continuation = try_get( - renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict) - if not next_continuation: - return - continuation = next_continuation.get('continuation') - if not continuation: - return - ctp = next_continuation.get('clickTrackingParams') - return YoutubeTabIE._build_continuation_query(continuation, ctp) - - @classmethod - def _extract_continuation(cls, renderer): - next_continuation = cls._extract_next_continuation_data(renderer) - if next_continuation: - return next_continuation - contents = [] - for key in ('contents', 'items'): - contents.extend(try_get(renderer, lambda x: x[key], list) or []) - for content in contents: - if not isinstance(content, dict): - continue - continuation_ep = try_get( - content, lambda x: x['continuationItemRenderer']['continuationEndpoint'], - dict) - if not continuation_ep: - continue - continuation = try_get( - continuation_ep, lambda x: x['continuationCommand']['token'], compat_str) - if not continuation: - continue - ctp = continuation_ep.get('clickTrackingParams') - return YoutubeTabIE._build_continuation_query(continuation, ctp) - def _entries(self, tab, item_id, identity_token, account_syncid, ytcfg): def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds @@ -3651,20 +3791,15 @@ def extract_entries(parent_renderer): # this needs to called again for continua for entry in extract_entries(parent_renderer): yield entry continuation = continuation_list[0] - context = self._extract_context(ytcfg) - visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str) + visitor_data = None for page_num in itertools.count(1): if not continuation: break - query = { - 'continuation': continuation['continuation'], - 'clickTracking': {'clickTrackingParams': continuation['itct']} - } - headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data) + headers = self.generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data) response = self._extract_response( item_id='%s page %s' % (item_id, page_num), - query=query, headers=headers, ytcfg=ytcfg, + query=continuation, headers=headers, ytcfg=ytcfg, check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints')) if not response: @@ -3696,6 +3831,7 @@ def extract_entries(parent_renderer): # this needs to called again for continua known_renderers = { 'gridPlaylistRenderer': (self._grid_entries, 'items'), 'gridVideoRenderer': (self._grid_entries, 'items'), + 'gridChannelRenderer': (self._grid_entries, 'items'), 'playlistVideoRenderer': (self._playlist_entries, 'contents'), 'itemSectionRenderer': (extract_entries, 'contents'), # for feeds 'richItemRenderer': (extract_entries, 'contents'), # for hashtag @@ -3728,27 +3864,19 @@ def _extract_selected_tab(tabs): else: raise ExtractorError('Unable to find selected tab') - @staticmethod - def _extract_uploader(data): + @classmethod + def _extract_uploader(cls, data): uploader = {} - sidebar_renderer = try_get( - data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) - if sidebar_renderer: - for item in sidebar_renderer: - if not isinstance(item, dict): - continue - renderer = item.get('playlistSidebarSecondaryInfoRenderer') - if not isinstance(renderer, dict): - continue - owner = try_get( - renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict) - if owner: - uploader['uploader'] = owner.get('text') - uploader['uploader_id'] = try_get( - owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str) - uploader['uploader_url'] = urljoin( - 'https://www.youtube.com/', - try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str)) + renderer = cls._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {} + owner = try_get( + renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict) + if owner: + uploader['uploader'] = owner.get('text') + uploader['uploader_id'] = try_get( + owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str) + uploader['uploader_url'] = urljoin( + 'https://www.youtube.com/', + try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str)) return {k: v for k, v in uploader.items() if v is not None} def _extract_from_tabs(self, item_id, webpage, data, tabs): @@ -3774,8 +3902,8 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs): thumbnails_list = ( try_get(renderer, lambda x: x['avatar']['thumbnails'], list) or try_get( - data, - lambda x: x['sidebar']['playlistSidebarRenderer']['items'][0]['playlistSidebarPrimaryInfoRenderer']['thumbnailRenderer']['playlistVideoThumbnailRenderer']['thumbnail']['thumbnails'], + self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer'), + lambda x: x['thumbnailRenderer']['playlistVideoThumbnailRenderer']['thumbnail']['thumbnails'], list) or []) @@ -3799,7 +3927,6 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs): or playlist_id) title += format_field(selected_tab, 'title', ' - %s') title += format_field(selected_tab, 'expandedText', ' - %s') - metadata = { 'playlist_id': playlist_id, 'playlist_title': title, @@ -3810,27 +3937,29 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs): 'thumbnails': thumbnails, 'tags': tags, } + availability = self._extract_availability(data) + if availability: + metadata['availability'] = availability if not channel_id: metadata.update(self._extract_uploader(data)) metadata.update({ 'channel': metadata['uploader'], 'channel_id': metadata['uploader_id'], 'channel_url': metadata['uploader_url']}) + ytcfg = self.extract_ytcfg(item_id, webpage) return self.playlist_result( self._entries( selected_tab, playlist_id, self._extract_identity_token(webpage, item_id), - self._extract_account_syncid(data), - self._extract_ytcfg(item_id, webpage)), + self._extract_account_syncid(ytcfg, data), ytcfg), **metadata) def _extract_mix_playlist(self, playlist, playlist_id, data, webpage): first_id = last_id = None - ytcfg = self._extract_ytcfg(playlist_id, webpage) - headers = self._generate_api_headers( - ytcfg, account_syncid=self._extract_account_syncid(data), - identity_token=self._extract_identity_token(webpage, item_id=playlist_id), - visitor_data=try_get(self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str)) + ytcfg = self.extract_ytcfg(playlist_id, webpage) + headers = self.generate_api_headers( + ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data), + identity_token=self._extract_identity_token(webpage, item_id=playlist_id)) for page_num in itertools.count(1): videos = list(self._playlist_entries(playlist)) if not videos: @@ -3855,9 +3984,7 @@ def _extract_mix_playlist(self, playlist, playlist_id, data, webpage): } response = self._extract_response( item_id='%s page %d' % (playlist_id, page_num), - query=query, - ep='next', - headers=headers, + query=query, ep='next', headers=headers, ytcfg=ytcfg, check_get_keys='contents' ) playlist = try_get( @@ -3881,49 +4008,85 @@ def _extract_from_playlist(self, item_id, url, data, playlist, webpage): self._extract_mix_playlist(playlist, playlist_id, data, webpage), playlist_id=playlist_id, playlist_title=title) + def _extract_availability(self, data): + """ + Gets the availability of a given playlist/tab. + Note: Unless YouTube tells us explicitly, we do not assume it is public + @param data: response + """ + is_private = is_unlisted = None + renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {} + badge_labels = self._extract_badges(renderer) + + # Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge + privacy_dropdown_entries = try_get( + renderer, lambda x: x['privacyForm']['dropdownFormFieldRenderer']['dropdown']['dropdownRenderer']['entries'], list) or [] + for renderer_dict in privacy_dropdown_entries: + is_selected = try_get( + renderer_dict, lambda x: x['privacyDropdownItemRenderer']['isSelected'], bool) or False + if not is_selected: + continue + label = self._get_text(renderer_dict, ('privacyDropdownItemRenderer', 'label')) + if label: + badge_labels.add(label.lower()) + break + + for badge_label in badge_labels: + if badge_label == 'unlisted': + is_unlisted = True + elif badge_label == 'private': + is_private = True + elif badge_label == 'public': + is_unlisted = is_private = False + return self._availability(is_private, False, False, False, is_unlisted) + + @staticmethod + def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict): + sidebar_renderer = try_get( + data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) or [] + for item in sidebar_renderer: + renderer = try_get(item, lambda x: x[info_renderer], expected_type) + if renderer: + return renderer + def _reload_with_unavailable_videos(self, item_id, data, webpage): """ Get playlist with unavailable videos if the 'show unavailable videos' button exists. """ - sidebar_renderer = try_get( - data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) - if not sidebar_renderer: - return browse_id = params = None - for item in sidebar_renderer: - if not isinstance(item, dict): + renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') + if not renderer: + return + menu_renderer = try_get( + renderer, lambda x: x['menu']['menuRenderer']['items'], list) or [] + for menu_item in menu_renderer: + if not isinstance(menu_item, dict): continue - renderer = item.get('playlistSidebarPrimaryInfoRenderer') - menu_renderer = try_get( - renderer, lambda x: x['menu']['menuRenderer']['items'], list) or [] - for menu_item in menu_renderer: - if not isinstance(menu_item, dict): - continue - nav_item_renderer = menu_item.get('menuNavigationItemRenderer') - text = try_get( - nav_item_renderer, lambda x: x['text']['simpleText'], compat_str) - if not text or text.lower() != 'show unavailable videos': - continue - browse_endpoint = try_get( - nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {} - browse_id = browse_endpoint.get('browseId') - params = browse_endpoint.get('params') - break + nav_item_renderer = menu_item.get('menuNavigationItemRenderer') + text = try_get( + nav_item_renderer, lambda x: x['text']['simpleText'], compat_str) + if not text or text.lower() != 'show unavailable videos': + continue + browse_endpoint = try_get( + nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {} + browse_id = browse_endpoint.get('browseId') + params = browse_endpoint.get('params') + break - ytcfg = self._extract_ytcfg(item_id, webpage) - headers = self._generate_api_headers( - ytcfg, account_syncid=self._extract_account_syncid(ytcfg), - identity_token=self._extract_identity_token(webpage, item_id=item_id), - visitor_data=try_get( - self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str)) - query = { - 'params': params or 'wgYCCAA=', - 'browseId': browse_id or 'VL%s' % item_id - } - return self._extract_response( - item_id=item_id, headers=headers, query=query, - check_get_keys='contents', fatal=False, - note='Downloading API JSON with unavailable videos') + ytcfg = self.extract_ytcfg(item_id, webpage) + headers = self.generate_api_headers( + ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data), + identity_token=self._extract_identity_token(webpage, item_id=item_id), + visitor_data=try_get( + self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str)) + query = { + 'params': params or 'wgYCCAA=', + 'browseId': browse_id or 'VL%s' % item_id + } + return self._extract_response( + item_id=item_id, headers=headers, query=query, + check_get_keys='contents', fatal=False, ytcfg=ytcfg, + note='Downloading API JSON with unavailable videos') def _extract_webpage(self, url, item_id): retries = self.get_param('extractor_retries', 3) @@ -3938,7 +4101,7 @@ def _extract_webpage(self, url, item_id): webpage = self._download_webpage( url, item_id, 'Downloading webpage%s' % (' (retry #%d)' % count if count else '')) - data = self._extract_yt_initial_data(item_id, webpage) + data = self.extract_yt_initial_data(item_id, webpage) if data.get('contents') or data.get('currentVideoEndpoint'): break # Extract alerts here only when there is error @@ -4060,7 +4223,6 @@ def get_mobj(url): if 'no-youtube-unavailable-videos' not in compat_opts: data = self._reload_with_unavailable_videos(item_id, data, webpage) or data self._extract_and_report_alerts(data) - tabs = try_get( data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list) if tabs: @@ -4105,6 +4267,7 @@ class YoutubePlaylistIE(InfoExtractor): 'id': 'PLBB231211A4F62143', 'uploader': 'Wickydoo', 'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q', + 'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2', }, 'playlist_mincount': 29, }, { @@ -4127,12 +4290,13 @@ class YoutubePlaylistIE(InfoExtractor): } }, { 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl', - 'playlist_mincount': 982, + 'playlist_mincount': 654, 'info_dict': { 'title': '2018 Chinese New Singles (11/6 updated)', 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl', 'uploader': 'LBK', 'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA', + 'description': 'md5:da521864744d60a198e3a88af4db0d9d', } }, { 'url': 'TLGGrESM50VT6acwMjAyMjAxNw', @@ -4255,7 +4419,9 @@ def _entries(self, query, n): if self._SEARCH_PARAMS: data['params'] = self._SEARCH_PARAMS total = 0 + continuation = {} for page_num in itertools.count(1): + data.update(continuation) search = self._extract_response( item_id='query "%s" page %s' % (query, page_num), ep='search', query=data, check_get_keys=('contents', 'onResponseReceivedCommands') @@ -4273,13 +4439,10 @@ def _entries(self, query, n): # Youtube sometimes adds promoted content to searches, # changing the index location of videos and token. # So we search through all entries till we find them. - continuation_token = None + continuation = None for slr_content in slr_contents: - if continuation_token is None: - continuation_token = try_get( - slr_content, - lambda x: x['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'], - compat_str) + if not continuation: + continuation = self._extract_continuation({'contents': [slr_content]}) isr_contents = try_get( slr_content, @@ -4302,13 +4465,12 @@ def _entries(self, query, n): if total == n: return - if not continuation_token: + if not continuation: break - data['continuation'] = continuation_token def _get_n_results(self, query, n): """Get a specified number of results for a query""" - return self.playlist_result(self._entries(query, n), query) + return self.playlist_result(self._entries(query, n), query, query) class YoutubeSearchDateIE(YoutubeSearchIE): @@ -4327,6 +4489,7 @@ class YoutubeSearchURLIE(YoutubeSearchIE): 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', 'playlist_mincount': 5, 'info_dict': { + 'id': 'youtube-dl test video', 'title': 'youtube-dl test video', } }, {