X-Git-Url: https://jfr.im/git/yt-dlp.git/blobdiff_plain/65524694334de9de4664b8b338d1bb3fa0b63f91..e820fbaa6ff41625b6f4d8453253883b86bf9ca4:/yt_dlp/extractor/youtube.py diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index d9d414d75..b9566a0a7 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -38,6 +38,7 @@ format_field, int_or_none, intlist_to_bytes, + is_html, mimetype2ext, network_exceptions, orderedSet, @@ -45,7 +46,9 @@ parse_count, parse_duration, parse_iso8601, + parse_qs, qualities, + remove_end, remove_start, smuggle_url, str_or_none, @@ -57,49 +60,210 @@ unsmuggle_url, update_url_query, url_or_none, - urlencode_postdata, urljoin, variadic, ) -def parse_qs(url): - return compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) +# any clients starting with _ cannot be explicity requested by the user +INNERTUBE_CLIENTS = { + 'web': { + 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'WEB', + 'clientVersion': '2.20210622.10.00', + } + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 1 + }, + 'web_embedded': { + 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'WEB_EMBEDDED_PLAYER', + 'clientVersion': '1.20210620.0.1', + }, + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 56 + }, + 'web_music': { + 'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30', + 'INNERTUBE_HOST': 'music.youtube.com', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'WEB_REMIX', + 'clientVersion': '1.20210621.00.00', + } + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 67, + }, + 'web_creator': { + 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'WEB_CREATOR', + 'clientVersion': '1.20210621.00.00', + } + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 62, + }, + 'android': { + 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'ANDROID', + 'clientVersion': '16.20', + } + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 3, + 'REQUIRE_JS_PLAYER': False + }, + 'android_embedded': { + 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'ANDROID_EMBEDDED_PLAYER', + 'clientVersion': '16.20', + }, + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 55, + 'REQUIRE_JS_PLAYER': False + }, + 'android_music': { + 'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30', + 'INNERTUBE_HOST': 'music.youtube.com', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'ANDROID_MUSIC', + 'clientVersion': '4.32', + } + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 21, + 'REQUIRE_JS_PLAYER': False + }, + 'android_creator': { + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'ANDROID_CREATOR', + 'clientVersion': '21.24.100', + }, + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 14, + 'REQUIRE_JS_PLAYER': False + }, + # ios has HLS live streams + # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680 + 'ios': { + 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'IOS', + 'clientVersion': '16.20', + } + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 5, + 'REQUIRE_JS_PLAYER': False + }, + 'ios_embedded': { + 'INNERTUBE_API_KEY': 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'IOS_MESSAGES_EXTENSION', + 'clientVersion': '16.20', + }, + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 66, + 'REQUIRE_JS_PLAYER': False + }, + 'ios_music': { + 'INNERTUBE_API_KEY': 'AIzaSyDK3iBpDP9nHVTk2qL73FLJICfOC3c51Og', + 'INNERTUBE_HOST': 'music.youtube.com', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'IOS_MUSIC', + 'clientVersion': '4.32', + }, + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 26, + 'REQUIRE_JS_PLAYER': False + }, + 'ios_creator': { + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'IOS_CREATOR', + 'clientVersion': '21.24.100', + }, + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 15, + 'REQUIRE_JS_PLAYER': False + }, + # mweb has 'ultralow' formats + # See: https://github.com/yt-dlp/yt-dlp/pull/557 + 'mweb': { + 'INNERTUBE_API_KEY': 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8', + 'INNERTUBE_CONTEXT': { + 'client': { + 'clientName': 'MWEB', + 'clientVersion': '2.20210721.07.00', + } + }, + 'INNERTUBE_CONTEXT_CLIENT_NAME': 2 + }, +} + + +def build_innertube_clients(): + third_party = { + 'embedUrl': 'https://google.com', # Can be any valid URL + } + base_clients = ('android', 'web', 'ios', 'mweb') + priority = qualities(base_clients[::-1]) + + for client, ytcfg in tuple(INNERTUBE_CLIENTS.items()): + ytcfg.setdefault('INNERTUBE_API_KEY', 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8') + ytcfg.setdefault('INNERTUBE_HOST', 'www.youtube.com') + ytcfg.setdefault('REQUIRE_JS_PLAYER', True) + ytcfg['INNERTUBE_CONTEXT']['client'].setdefault('hl', 'en') + ytcfg['priority'] = 10 * priority(client.split('_', 1)[0]) + + if client in base_clients: + INNERTUBE_CLIENTS[f'{client}_agegate'] = agegate_ytcfg = copy.deepcopy(ytcfg) + agegate_ytcfg['INNERTUBE_CONTEXT']['client']['clientScreen'] = 'EMBED' + agegate_ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = third_party + agegate_ytcfg['priority'] -= 1 + elif client.endswith('_embedded'): + ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = third_party + ytcfg['priority'] -= 2 + else: + ytcfg['priority'] -= 3 + + +build_innertube_clients() class YoutubeBaseInfoExtractor(InfoExtractor): """Provide base functions for Youtube extractors""" - _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' - _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge' - - _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup' - _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge' - _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}' _RESERVED_NAMES = ( - r'channel|c|user|browse|playlist|watch|w|v|embed|e|watch_popup|shorts|' - r'movies|results|shared|hashtag|trending|feed|feeds|oembed|get_video_info|' + r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|' + r'shorts|movies|results|shared|hashtag|trending|feed|feeds|' + r'browse|oembed|get_video_info|iframe_api|s/player|' r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout') + _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)' + _NETRC_MACHINE = 'youtube' + # If True it will raise an error if no login info is provided _LOGIN_REQUIRED = False - _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)' - def _login(self): """ Attempt to log in to YouTube. - True is returned if successful or skipped. - False is returned if login failed. - If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised. """ - def warn(message): - self.report_warning(message) - - # username+password login is broken if (self._LOGIN_REQUIRED and self.get_param('cookiefile') is None and self.get_param('cookiesfrombrowser') is None): @@ -107,184 +271,7 @@ def warn(message): 'Login details are needed to download this content', method='cookies') username, password = self._get_login_info() if username: - warn('Logging in using username and password is broken. %s' % self._LOGIN_HINTS['cookies']) - return - - # Everything below this is broken! - r''' - # No authentication to be performed - if username is None: - if self._LOGIN_REQUIRED and self.get_param('cookiefile') is None: - raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True) - # if self.get_param('cookiefile'): # TODO remove 'and False' later - too many people using outdated cookies and open issues, remind them. - # self.to_screen('[Cookies] Reminder - Make sure to always use up to date cookies!') - return True - - login_page = self._download_webpage( - self._LOGIN_URL, None, - note='Downloading login page', - errnote='unable to fetch login page', fatal=False) - if login_page is False: - return - - login_form = self._hidden_inputs(login_page) - - def req(url, f_req, note, errnote): - data = login_form.copy() - data.update({ - 'pstMsg': 1, - 'checkConnection': 'youtube', - 'checkedDomains': 'youtube', - 'hl': 'en', - 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]', - 'f.req': json.dumps(f_req), - 'flowName': 'GlifWebSignIn', - 'flowEntry': 'ServiceLogin', - # TODO: reverse actual botguard identifier generation algo - 'bgRequest': '["identifier",""]', - }) - return self._download_json( - url, None, note=note, errnote=errnote, - transform_source=lambda s: re.sub(r'^[^[]*', '', s), - fatal=False, - data=urlencode_postdata(data), headers={ - 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8', - 'Google-Accounts-XSRF': 1, - }) - - lookup_req = [ - username, - None, [], None, 'US', None, None, 2, False, True, - [ - None, None, - [2, 1, None, 1, - 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', - None, [], 4], - 1, [None, None, []], None, None, None, True - ], - username, - ] - - lookup_results = req( - self._LOOKUP_URL, lookup_req, - 'Looking up account info', 'Unable to look up account info') - - if lookup_results is False: - return False - - user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str) - if not user_hash: - warn('Unable to extract user hash') - return False - - challenge_req = [ - user_hash, - None, 1, None, [1, None, None, None, [password, None, True]], - [ - None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4], - 1, [None, None, []], None, None, None, True - ]] - - challenge_results = req( - self._CHALLENGE_URL, challenge_req, - 'Logging in', 'Unable to log in') - - if challenge_results is False: - return - - login_res = try_get(challenge_results, lambda x: x[0][5], list) - if login_res: - login_msg = try_get(login_res, lambda x: x[5], compat_str) - warn( - 'Unable to login: %s' % 'Invalid password' - if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg) - return False - - res = try_get(challenge_results, lambda x: x[0][-1], list) - if not res: - warn('Unable to extract result entry') - return False - - login_challenge = try_get(res, lambda x: x[0][0], list) - if login_challenge: - challenge_str = try_get(login_challenge, lambda x: x[2], compat_str) - if challenge_str == 'TWO_STEP_VERIFICATION': - # SEND_SUCCESS - TFA code has been successfully sent to phone - # QUOTA_EXCEEDED - reached the limit of TFA codes - status = try_get(login_challenge, lambda x: x[5], compat_str) - if status == 'QUOTA_EXCEEDED': - warn('Exceeded the limit of TFA codes, try later') - return False - - tl = try_get(challenge_results, lambda x: x[1][2], compat_str) - if not tl: - warn('Unable to extract TL') - return False - - tfa_code = self._get_tfa_info('2-step verification code') - - if not tfa_code: - warn( - 'Two-factor authentication required. Provide it either interactively or with --twofactor ' - '(Note that only TOTP (Google Authenticator App) codes work at this time.)') - return False - - tfa_code = remove_start(tfa_code, 'G-') - - tfa_req = [ - user_hash, None, 2, None, - [ - 9, None, None, None, None, None, None, None, - [None, tfa_code, True, 2] - ]] - - tfa_results = req( - self._TFA_URL.format(tl), tfa_req, - 'Submitting TFA code', 'Unable to submit TFA code') - - if tfa_results is False: - return False - - tfa_res = try_get(tfa_results, lambda x: x[0][5], list) - if tfa_res: - tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str) - warn( - 'Unable to finish TFA: %s' % 'Invalid TFA code' - if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg) - return False - - check_cookie_url = try_get( - tfa_results, lambda x: x[0][-1][2], compat_str) - else: - CHALLENGES = { - 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.", - 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.', - 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.", - } - challenge = CHALLENGES.get( - challenge_str, - '%s returned error %s.' % (self.IE_NAME, challenge_str)) - warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge) - return False - else: - check_cookie_url = try_get(res, lambda x: x[2], compat_str) - - if not check_cookie_url: - warn('Unable to extract CheckCookie URL') - return False - - check_cookie_results = self._download_webpage( - check_cookie_url, None, 'Checking cookie', fatal=False) - - if check_cookie_results is False: - return False - - if 'https://myaccount.google.com/' not in check_cookie_results: - warn('Unable to log in') - return False - - return True - ''' + self.report_warning(f'Cannot login to YouTube using username and password. {self._LOGIN_HINTS["cookies"]}') def _initialize_consent(self): cookies = self._get_cookies('https://www.youtube.com/') @@ -303,274 +290,37 @@ def _initialize_consent(self): def _real_initialize(self): self._initialize_consent() - if self._downloader is None: - return - if not self._login(): - return + self._login() _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;' _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;' _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|= 2 and sync_ids[1]: # datasyncid is of the form "channel_syncid||user_syncid" for secondary channel # and just "user_syncid||" for primary channel. We only want the channel_syncid return sync_ids[0] + @staticmethod + def _extract_visitor_data(*args): + """ + Extracts visitorData from an API response or ytcfg + Appears to be used to track session state + """ + return traverse_obj( + args, (..., ('VISITOR_DATA', ('INNERTUBE_CONTEXT', 'client', 'visitorData'), ('responseContext', 'visitorData'))), + expected_type=compat_str, get_all=False) + + @property + def is_authenticated(self): + return bool(self._generate_sapisidhash_header()) + def extract_ytcfg(self, video_id, webpage): if not webpage: return {} @@ -673,33 +454,29 @@ def extract_ytcfg(self, video_id, webpage): default='{}'), video_id, fatal=False) or {} def generate_api_headers( - self, ytcfg=None, identity_token=None, account_syncid=None, - visitor_data=None, api_hostname=None, default_client='WEB', session_index=None): + self, *, ytcfg=None, account_syncid=None, session_index=None, + visitor_data=None, identity_token=None, api_hostname=None, default_client='web'): + origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(default_client)) headers = { 'X-YouTube-Client-Name': compat_str( self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)), 'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client), - 'Origin': origin + 'Origin': origin, + 'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg), + 'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg), + 'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg) } - if not visitor_data and ytcfg: - visitor_data = try_get( - self._extract_context(ytcfg, default_client), lambda x: x['client']['visitorData'], compat_str) - if identity_token: - headers['X-Youtube-Identity-Token'] = identity_token - if account_syncid: - headers['X-Goog-PageId'] = account_syncid - if session_index is None and ytcfg: + if session_index is None: session_index = self._extract_session_index(ytcfg) if account_syncid or session_index is not None: headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0 - if visitor_data: - headers['X-Goog-Visitor-Id'] = visitor_data + auth = self._generate_sapisidhash_header(origin) if auth is not None: headers['Authorization'] = auth headers['X-Origin'] = origin - return headers + return {h: v for h, v in headers.items() if v is not None} @staticmethod def _build_api_continuation_query(continuation, ctp=None): @@ -770,17 +547,17 @@ def _extract_alerts(cls, data): if message: yield alert_type, message - def _report_alerts(self, alerts, expected=True): + def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False): errors = [] warnings = [] for alert_type, alert_message in alerts: - if alert_type.lower() == 'error': + if alert_type.lower() == 'error' and fatal: errors.append([alert_type, alert_message]) else: warnings.append([alert_type, alert_message]) for alert_type, alert_message in (warnings + errors[:-1]): - self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message)) + self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message), only_once=only_once) if errors: raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected) @@ -819,7 +596,7 @@ def _get_text(data, *path_list, max_runs=None): def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None, ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None, - default_client='WEB'): + default_client='web'): response = None last_error = None count = -1 @@ -829,7 +606,7 @@ def _extract_response(self, item_id, query, note='Downloading API JSON', headers while count < retries: count += 1 if last_error: - self.report_warning('%s. Retrying ...' % last_error) + self.report_warning('%s. Retrying ...' % remove_end(last_error, '.')) try: response = self._call_api( ep=ep, fatal=True, headers=headers, @@ -840,12 +617,19 @@ def _extract_response(self, item_id, query, note='Downloading API JSON', headers note='%s%s' % (note, ' (retry #%d)' % count if count else '')) except ExtractorError as e: if isinstance(e.cause, network_exceptions): + if isinstance(e.cause, compat_HTTPError) and not is_html(e.cause.read(512)): + e.cause.seek(0) + yt_error = try_get( + self._parse_json(e.cause.read().decode(), item_id, fatal=False), + lambda x: x['error']['message'], compat_str) + if yt_error: + self._report_alerts([('ERROR', yt_error)], fatal=False) # Downloading page may result in intermittent 5xx HTTP error # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289 # We also want to catch all other network exceptions since errors in later pages can be troublesome # See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210 if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429): - last_error = error_to_compat_str(e.cause or e) + last_error = error_to_compat_str(e.cause or e.msg) if count < retries: continue if fatal: @@ -855,10 +639,14 @@ def _extract_response(self, item_id, query, note='Downloading API JSON', headers return else: - # Youtube may send alerts if there was an issue with the continuation page try: - self._extract_and_report_alerts(response, expected=False) + self._extract_and_report_alerts(response, only_once=True) except ExtractorError as e: + # YouTube servers may return errors we want to retry on in a 200 OK response + # See: https://github.com/yt-dlp/yt-dlp/issues/839 + if 'unknown error' in e.msg.lower(): + last_error = e.msg + continue if fatal: raise self.report_warning(error_to_compat_str(e)) @@ -897,7 +685,7 @@ def _extract_video(self, renderer): '_type': 'url', 'ie_key': YoutubeIE.ie_key(), 'id': video_id, - 'url': video_id, + 'url': f'https://www.youtube.com/watch?v={video_id}', 'title': title, 'description': description, 'duration': duration, @@ -984,7 +772,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains (?:.*?\#/)? # handle anchor (#/) redirect urls (?: # the various things that can precede the ID: - (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/ + (?:(?:v|embed|e|shorts)/(?!videoseries)) # v/ or embed/ or e/ or shorts/ |(?: # or the v= param in all its forms (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! @@ -1111,18 +899,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor): '_rtmp': {'protocol': 'rtmp'}, # av01 video only formats sometimes served with "unknown" codecs - '394': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, - '395': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, - '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, - '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'}, + '394': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'}, + '395': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'}, + '396': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'av01.0.01M.08'}, + '397': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'av01.0.04M.08'}, + '398': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'av01.0.05M.08'}, + '399': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'av01.0.08M.08'}, + '400': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'}, + '401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'}, } _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt') - _AGE_GATE_REASONS = ( - 'Sign in to confirm your age', - 'This video may be inappropriate for some users.', - 'Sorry, this content is age-restricted.') - _GEO_BYPASS = False IE_NAME = 'youtube' @@ -1227,8 +1014,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'format': '141/bestaudio[ext=m4a]', }, }, - # Normal age-gate video (embed allowed) + # Age-gate videos. See https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-888837000 { + 'note': 'Embed allowed age-gate video', 'url': 'https://youtube.com/watch?v=HtVdAasjOgU', 'info_dict': { 'id': 'HtVdAasjOgU', @@ -1243,6 +1031,52 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'age_limit': 18, }, }, + { + 'note': 'Age-gate video with embed allowed in public site', + 'url': 'https://youtube.com/watch?v=HsUATh_Nc2U', + 'info_dict': { + 'id': 'HsUATh_Nc2U', + 'ext': 'mp4', + 'title': 'Godzilla 2 (Official Video)', + 'description': 'md5:bf77e03fcae5529475e500129b05668a', + 'upload_date': '20200408', + 'uploader_id': 'FlyingKitty900', + 'uploader': 'FlyingKitty', + 'age_limit': 18, + }, + }, + { + 'note': 'Age-gate video embedable only with clientScreen=EMBED', + 'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg', + 'info_dict': { + 'id': 'Tq92D6wQ1mg', + 'title': '[MMD] Adios - EVERGLOW [+Motion DL]', + 'ext': 'mp4', + 'upload_date': '20191227', + 'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ', + 'uploader': 'Projekt Melody', + 'description': 'md5:17eccca93a786d51bc67646756894066', + 'age_limit': 18, + }, + }, + { + 'note': 'Non-Agegated non-embeddable video', + 'url': 'https://youtube.com/watch?v=MeJVWBSsPAY', + 'info_dict': { + 'id': 'MeJVWBSsPAY', + 'ext': 'mp4', + 'title': 'OOMPH! - Such Mich Find Mich (Lyrics)', + 'uploader': 'Herr Lurik', + 'uploader_id': 'st3in234', + 'description': 'Fan Video. Music & Lyrics by OOMPH!.', + 'upload_date': '20130730', + }, + }, + { + 'note': 'Non-bypassable age-gated video', + 'url': 'https://youtube.com/watch?v=Cr381pDsSsA', + 'only_matching': True, + }, # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421) # YouTube Red ad is not captured for creator { @@ -1411,6 +1245,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'params': { 'skip_download': True, }, + 'skip': 'Not multifeed anymore', }, { # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536) @@ -1827,14 +1662,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'params': { 'extractor_args': {'youtube': {'player_skip': ['configs']}}, }, - } + }, { + # shorts + 'url': 'https://www.youtube.com/shorts/BGQWPY4IigY', + 'only_matching': True, + }, ] @classmethod def suitable(cls, url): - # Hack for lazy extractors until more generic solution is implemented - # (see #28780) - from .youtube import parse_qs + from ..utils import parse_qs + qs = parse_qs(url) if qs.get('list', [None])[0]: return False @@ -1845,14 +1683,12 @@ def __init__(self, *args, **kwargs): self._code_cache = {} self._player_cache = {} - def _extract_player_url(self, ytcfg=None, webpage=None): - player_url = try_get(ytcfg, (lambda x: x['PLAYER_JS_URL']), str) - if not player_url and webpage: - player_url = self._search_regex( - r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"', - webpage, 'player URL', fatal=False) + def _extract_player_url(self, *ytcfgs, webpage=None): + player_url = traverse_obj( + ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'), + get_all=False, expected_type=compat_str) if not player_url: - return None + return if player_url.startswith('//'): player_url = 'https:' + player_url elif not re.match(r'https?://', player_url): @@ -1860,6 +1696,16 @@ def _extract_player_url(self, ytcfg=None, webpage=None): 'https://www.youtube.com', player_url) return player_url + def _download_player_url(self, video_id, fatal=False): + res = self._download_webpage( + 'https://www.youtube.com/iframe_api', + note='Downloading iframe API JS', video_id=video_id, fatal=fatal) + if res: + player_version = self._search_regex( + r'player\\?/([0-9a-fA-F]{8})\\?/', res, 'player version', fatal=fatal) + if player_version: + return f'https://www.youtube.com/s/player/{player_version}/player_ias.vflset/en_US/base.js' + def _signature_cache_id(self, example_sig): """ Return a string representation of a signature """ return '.'.join(compat_str(len(part)) for part in example_sig.split('.')) @@ -1877,10 +1723,12 @@ def _extract_player_info(cls, player_url): def _load_player(self, video_id, player_url, fatal=True) -> bool: player_id = self._extract_player_info(player_url) if player_id not in self._code_cache: - self._code_cache[player_id] = self._download_webpage( + code = self._download_webpage( player_url, video_id, fatal=fatal, note='Downloading player ' + player_id, errnote='Download of %s failed' % player_url) + if code: + self._code_cache[player_id] = code return player_id in self._code_cache def _extract_signature_function(self, video_id, player_url, example_sig): @@ -1949,10 +1797,10 @@ def _parse_sig_js(self, jscode): funcname = self._search_regex( (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P[a-zA-Z0-9$]+)\(', r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P[a-zA-Z0-9$]+)\(', - r'\bm=(?P[a-zA-Z0-9$]{2})\(decodeURIComponent\(h\.s\)\)', - r'\bc&&\(c=(?P[a-zA-Z0-9$]{2})\(decodeURIComponent\(c\)\)', - r'(?:\b|[^a-zA-Z0-9$])(?P[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)', - r'(?:\b|[^a-zA-Z0-9$])(?P[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', + r'\bm=(?P[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)', + r'\bc&&\(c=(?P[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)', + r'(?:\b|[^a-zA-Z0-9$])(?P[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)', + r'(?:\b|[^a-zA-Z0-9$])(?P[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', r'(?P[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # Obsolete patterns r'(["\'])signature\1\s*,\s*(?P[a-zA-Z0-9$]+)\(', @@ -2084,8 +1932,7 @@ def extract_id(cls, url): mobj = re.match(cls._VALID_URL, url, re.VERBOSE) if mobj is None: raise ExtractorError('Invalid URL: %s' % url) - video_id = mobj.group(2) - return video_id + return mobj.group('id') def _extract_chapters_from_json(self, data, duration): chapter_list = traverse_obj( @@ -2195,11 +2042,9 @@ def _extract_comment(self, comment_renderer, parent=None): 'parent': parent or 'root' } - def _comment_entries(self, root_continuation_data, identity_token, account_syncid, - ytcfg, video_id, parent=None, comment_counts=None): + def _comment_entries(self, root_continuation_data, ytcfg, video_id, parent=None, comment_counts=None): def extract_header(contents): - _total_comments = 0 _continuation = None for content in contents: comments_header_renderer = try_get(content, lambda x: x['commentsHeaderRenderer']) @@ -2209,7 +2054,6 @@ def extract_header(contents): if expected_comment_count: comment_counts[1] = expected_comment_count self.to_screen('Downloading ~%d comments' % expected_comment_count) - _total_comments = comment_counts[1] sort_mode_str = self._configuration_arg('comment_sort', [''])[0] comment_sort_index = int(sort_mode_str != 'top') # 1 = new, 0 = top @@ -2229,7 +2073,7 @@ def extract_header(contents): sort_text = 'top comments' if comment_sort_index == 0 else 'newest first' self.to_screen('Sorting comments by %s' % sort_text) break - return _total_comments, _continuation + return _continuation def extract_thread(contents): if not parent: @@ -2254,8 +2098,8 @@ def extract_thread(contents): if comment_replies_renderer: comment_counts[2] += 1 comment_entries_iter = self._comment_entries( - comment_replies_renderer, identity_token, account_syncid, ytcfg, - video_id, parent=comment.get('id'), comment_counts=comment_counts) + comment_replies_renderer, ytcfg, video_id, + parent=comment.get('id'), comment_counts=comment_counts) for reply_comment in comment_entries_iter: yield reply_comment @@ -2274,13 +2118,17 @@ def extract_thread(contents): continuation_token = self._generate_comment_continuation(video_id) continuation = self._build_api_continuation_query(continuation_token, None) + message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1) + if message and not parent: + self.report_warning(message, video_id=video_id) + visitor_data = None is_first_continuation = parent is None for page_num in itertools.count(0): if not continuation: break - headers = self.generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data) + headers = self.generate_api_headers(ytcfg=ytcfg, visitor_data=visitor_data) comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1]) if page_num == 0: if is_first_continuation: @@ -2317,9 +2165,7 @@ def extract_thread(contents): lambda x: x['appendContinuationItemsAction']['continuationItems']), list) or [] if is_first_continuation: - total_comments, continuation = extract_header(continuation_items) - if total_comments: - yield total_comments + continuation = extract_header(continuation_items) is_first_continuation = False if continuation: break @@ -2347,9 +2193,7 @@ def extract_thread(contents): continue if is_first_continuation: header_continuation_items = [continuation_renderer.get('header') or {}] - total_comments, continuation = extract_header(header_continuation_items) - if total_comments: - yield total_comments + continuation = extract_header(header_continuation_items) is_first_continuation = False if continuation: break @@ -2377,46 +2221,28 @@ def _generate_comment_continuation(video_id): [bytes_to_intlist(base64.b64decode(part)) for part in parts])) return base64.b64encode(intlist_to_bytes(new_continuation_intlist)).decode('utf-8') - def _extract_comments(self, ytcfg, video_id, contents, webpage): + def _get_comments(self, ytcfg, video_id, contents, webpage): """Entry for comment extraction""" def _real_comment_extract(contents): - if isinstance(contents, list): - for entry in contents: - for key, renderer in entry.items(): - if key not in known_entry_comment_renderers: - continue - yield from self._comment_entries( - renderer, video_id=video_id, ytcfg=ytcfg, - identity_token=self._extract_identity_token(webpage, item_id=video_id), - account_syncid=self._extract_account_syncid(ytcfg)) - break - comments = [] - known_entry_comment_renderers = ('itemSectionRenderer',) - estimated_total = 0 - max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0]) or float('inf') + renderer = next(( + item for item in traverse_obj(contents, (..., 'itemSectionRenderer'), default={}) + if item.get('sectionIdentifier') == 'comment-item-section'), None) + yield from self._comment_entries(renderer, ytcfg, video_id) + + max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0]) # Force English regardless of account setting to prevent parsing issues # See: https://github.com/yt-dlp/yt-dlp/issues/532 ytcfg = copy.deepcopy(ytcfg) traverse_obj( ytcfg, ('INNERTUBE_CONTEXT', 'client'), expected_type=dict, default={})['hl'] = 'en' - try: - for comment in _real_comment_extract(contents): - if len(comments) >= max_comments: - break - if isinstance(comment, int): - estimated_total = comment - continue - comments.append(comment) - except KeyboardInterrupt: - self.to_screen('Interrupted by user') - self.to_screen('Downloaded %d/%d comments' % (len(comments), estimated_total)) - return { - 'comments': comments, - 'comment_count': len(comments), - } + return itertools.islice(_real_comment_extract(contents), 0, max_comments) @staticmethod - def _generate_player_context(sts=None): + def _get_checkok_params(): + return {'contentCheckOk': True, 'racyCheckOk': True} + + @classmethod + def _generate_player_context(cls, sts=None): context = { 'html5Preference': 'HTML5_PREF_WANTS', } @@ -2426,31 +2252,47 @@ def _generate_player_context(sts=None): 'playbackContext': { 'contentPlaybackContext': context }, - 'contentCheckOk': True, - 'racyCheckOk': True + **cls._get_checkok_params() } - def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, identity_token, player_url, initial_pr): + @staticmethod + def _is_agegated(player_response): + if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')): + return True + + reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[]) + AGE_GATE_REASONS = ( + 'confirm your age', 'age-restricted', 'inappropriate', # reason + 'age_verification_required', 'age_check_required', # status + ) + return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons) + + @staticmethod + def _is_unplayable(player_response): + return traverse_obj(player_response, ('playabilityStatus', 'status')) == 'UNPLAYABLE' + + def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr): session_index = self._extract_session_index(player_ytcfg, master_ytcfg) syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr) - sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) + sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) if player_url else None headers = self.generate_api_headers( - player_ytcfg, identity_token, syncid, - default_client=self._YT_CLIENTS[client], session_index=session_index) + ytcfg=player_ytcfg, account_syncid=syncid, session_index=session_index, default_client=client) yt_query = {'videoId': video_id} yt_query.update(self._generate_player_context(sts)) return self._extract_response( item_id=video_id, ep='player', query=yt_query, - ytcfg=player_ytcfg, headers=headers, fatal=False, - default_client=self._YT_CLIENTS[client], + ytcfg=player_ytcfg, headers=headers, fatal=True, + default_client=client, note='Downloading %s player API JSON' % client.replace('_', ' ').strip() ) or None def _get_requested_clients(self, url, smuggled_data): requested_clients = [] - allowed_clients = [client for client in self._YT_CLIENTS.keys() if client[:1] != '_'] + allowed_clients = sorted( + [client for client in INNERTUBE_CLIENTS.keys() if client[:1] != '_'], + key=lambda client: INNERTUBE_CLIENTS[client]['priority'], reverse=True) for client in self._configuration_arg('player_client'): if client in allowed_clients: requested_clients.append(client) @@ -2463,7 +2305,7 @@ def _get_requested_clients(self, url, smuggled_data): if smuggled_data.get('is_music_url') or self.is_music_url(url): requested_clients.extend( - f'{client}_music' for client in requested_clients if not client.endswith('_music')) + f'{client}_music' for client in requested_clients if f'{client}_music' in INNERTUBE_CLIENTS) return orderedSet(requested_clients) @@ -2477,7 +2319,7 @@ def _extract_player_ytcfg(self, client, video_id): webpage = self._download_webpage(url, video_id, fatal=False, note=f'Downloading {client} config') return self.extract_ytcfg(video_id, webpage) or {} - def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg, player_url, identity_token): + def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg): initial_pr = None if webpage: initial_pr = self._extract_yt_initial_variable( @@ -2486,40 +2328,63 @@ def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg, pl original_clients = clients clients = clients[::-1] + prs = [] + + def append_client(client_name): + if client_name in INNERTUBE_CLIENTS and client_name not in original_clients: + clients.append(client_name) + + # Android player_response does not have microFormats which are needed for + # extraction of some data. So we return the initial_pr with formats + # stripped out even if not requested by the user + # See: https://github.com/yt-dlp/yt-dlp/issues/501 + if initial_pr: + pr = dict(initial_pr) + pr['streamingData'] = None + prs.append(pr) + + last_error = None + tried_iframe_fallback = False + player_url = None while clients: client = clients.pop() player_ytcfg = master_ytcfg if client == 'web' else {} if 'configs' not in self._configuration_arg('player_skip'): player_ytcfg = self._extract_player_ytcfg(client, video_id) or player_ytcfg - if client == 'web_embedded': - # If we extracted the embed webpage, it'll tell us if we can view the video - embedded_pr = self._parse_json( - traverse_obj(player_ytcfg, ('PLAYER_VARS', 'embedded_player_response'), expected_type=str) or '{}', - video_id=video_id) - embedded_ps_reason = traverse_obj(embedded_pr, ('playabilityStatus', 'reason'), expected_type=str) or '' - if embedded_ps_reason in self._AGE_GATE_REASONS: - self.report_warning(f'Youtube said: {embedded_ps_reason}') - continue - pr = ( - initial_pr if client == 'web' and initial_pr - else self._extract_player_response( - client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, identity_token, player_url, initial_pr)) + player_url = player_url or self._extract_player_url(master_ytcfg, player_ytcfg, webpage=webpage) + require_js_player = self._get_default_ytcfg(client).get('REQUIRE_JS_PLAYER') + if 'js' in self._configuration_arg('player_skip'): + require_js_player = False + player_url = None + + if not player_url and not tried_iframe_fallback and require_js_player: + player_url = self._download_player_url(video_id) + tried_iframe_fallback = True + + try: + pr = initial_pr if client == 'web' and initial_pr else self._extract_player_response( + client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr) + except ExtractorError as e: + if last_error: + self.report_warning(last_error) + last_error = e + continue + if pr: - yield pr + prs.append(pr) - if traverse_obj(pr, ('playabilityStatus', 'reason')) in self._AGE_GATE_REASONS: - client = f'{client}_agegate' - if client in self._YT_CLIENTS and client not in original_clients: - clients.append(client) + # creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in + if client.endswith('_agegate') and self._is_unplayable(pr) and self.is_authenticated: + append_client(client.replace('_agegate', '_creator')) + elif self._is_agegated(pr): + append_client(f'{client}_agegate') - # Android player_response does not have microFormats which are needed for - # extraction of some data. So we return the initial_pr with formats - # stripped out even if not requested by the user - # See: https://github.com/yt-dlp/yt-dlp/issues/501 - if initial_pr and 'web' not in original_clients: - initial_pr['streamingData'] = None - yield initial_pr + if last_error: + if not len(prs): + raise last_error + self.report_warning(last_error) + return prs, player_url def _extract_formats(self, streaming_data, video_id, player_url, is_live): itags, stream_ids = [], [] @@ -2586,7 +2451,8 @@ def _extract_formats(self, streaming_data, video_id, player_url, is_live): 'filesize': int_or_none(fmt.get('contentLength')), 'format_id': itag, 'format_note': ', '.join(filter(None, ( - audio_track.get('displayName'), + '%s%s' % (audio_track.get('displayName') or '', + ' (default)' if audio_track.get('audioIsDefault') else ''), fmt.get('qualityLabel') or quality.replace('audio_quality_', '')))), 'fps': int_or_none(fmt.get('fps')), 'height': height, @@ -2595,6 +2461,7 @@ def _extract_formats(self, streaming_data, video_id, player_url, is_live): 'url': fmt_url, 'width': int_or_none(fmt.get('width')), 'language': audio_track.get('id', '').split('.')[0], + 'language_preference': 1 if audio_track.get('audioIsDefault') else -1, } mime_mobj = re.match( r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '') @@ -2617,7 +2484,9 @@ def _extract_formats(self, streaming_data, video_id, player_url, is_live): yield dct skip_manifests = self._configuration_arg('skip') - get_dash = not is_live and 'dash' not in skip_manifests and self.get_param('youtube_include_dash_manifest', True) + get_dash = ( + (not is_live or self._configuration_arg('include_live_dash')) + and 'dash' not in skip_manifests and self.get_param('youtube_include_dash_manifest', True)) get_hls = 'hls' not in skip_manifests and self.get_param('youtube_include_hls_manifest', True) def guess_quality(f): @@ -2633,7 +2502,9 @@ def guess_quality(f): itag = self._search_regex( r'/itag/(\d+)', f['url'], 'itag', default=None) if itag in itags: - continue + itag += '-hls' + if itag in itags: + continue if itag: f['format_id'] = itag itags.append(itag) @@ -2645,8 +2516,11 @@ def guess_quality(f): for f in self._extract_mpd_formats(dash_manifest_url, video_id, fatal=False): itag = f['format_id'] if itag in itags: - continue + itag += '-dash' + if itag in itags: + continue if itag: + f['format_id'] = itag itags.append(itag) f['quality'] = guess_quality(f) filesize = int_or_none(self._search_regex( @@ -2662,16 +2536,16 @@ def _real_extract(self, url): base_url = self.http_scheme() + '//www.youtube.com/' webpage_url = base_url + 'watch?v=' + video_id - webpage = self._download_webpage( - webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False) + webpage = None + if 'webpage' not in self._configuration_arg('player_skip'): + webpage = self._download_webpage( + webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False) master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg() - player_url = self._extract_player_url(master_ytcfg, webpage) - identity_token = self._extract_identity_token(webpage, video_id) - player_responses = list(self._extract_player_responses( + player_responses, player_url = self._extract_player_responses( self._get_requested_clients(url, smuggled_data), - video_id, webpage, master_ytcfg, player_url, identity_token)) + video_id, webpage, master_ytcfg) get_first = lambda obj, keys, **kwargs: traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False) @@ -2754,8 +2628,7 @@ def feed_entry(name): if not formats: if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')): - self.raise_no_formats( - 'This video is DRM protected.', expected=True) + self.report_drm(video_id) pemr = get_first( playability_statuses, ('errorScreen', 'playerErrorMessageRenderer'), expected_type=dict) or {} @@ -2775,12 +2648,12 @@ def feed_entry(name): for f in formats: if '&c=WEB&' in f['url'] and '&ratebypass=yes&' not in f['url']: # throttled f['source_preference'] = -10 - note = f.get('format_note') - f['format_note'] = f'{note} (throttled)' if note else '(throttled)' + # TODO: this method is not reliable + f['format_note'] = format_field(f, 'format_note', '%s ') + '(maybe throttled)' # Source is given priority since formats that throttle are given lower source_preference # When throttling issue is fully fixed, remove this - self._sort_formats(formats, ('quality', 'height', 'fps', 'source')) + self._sort_formats(formats, ('quality', 'res', 'fps', 'source', 'codec:vp9.2', 'lang')) keywords = get_first(video_details, 'keywords', expected_type=list) or [] if not keywords and webpage: @@ -2826,21 +2699,18 @@ def feed_entry(name): # The best resolution thumbnails sometimes does not appear in the webpage # See: https://github.com/ytdl-org/youtube-dl/issues/29049, https://github.com/yt-dlp/yt-dlp/issues/340 # List of possible thumbnails - Ref: - hq_thumbnail_names = ['maxresdefault', 'hq720', 'sddefault', 'sd1', 'sd2', 'sd3'] - # TODO: Test them also? - For some videos, even these don't exist - guaranteed_thumbnail_names = [ + thumbnail_names = [ + 'maxresdefault', 'hq720', 'sddefault', 'sd1', 'sd2', 'sd3', 'hqdefault', 'hq1', 'hq2', 'hq3', '0', 'mqdefault', 'mq1', 'mq2', 'mq3', 'default', '1', '2', '3' ] - thumbnail_names = hq_thumbnail_names + guaranteed_thumbnail_names n_thumbnail_names = len(thumbnail_names) thumbnails.extend({ 'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format( video_id=video_id, name=name, ext=ext, webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''), - '_test_url': name in hq_thumbnail_names, } for name in thumbnail_names for ext in ('webp', 'jpg')) for thumb in thumbnails: i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names) @@ -2906,15 +2776,19 @@ def feed_entry(name): } pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict) - # Converted into dicts to remove duplicates - captions = { - sub.get('baseUrl'): sub - for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])} - translation_languages = { - lang.get('languageCode'): lang.get('languageName') - for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])} - subtitles = {} if pctr: + def get_lang_code(track): + return (remove_start(track.get('vssId') or '', '.').replace('.', '-') + or track.get('languageCode')) + + # Converted into dicts to remove duplicates + captions = { + get_lang_code(sub): sub + for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])} + translation_languages = { + lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1) + for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])} + def process_language(container, base_url, lang_code, sub_name, query): lang_subs = container.setdefault(lang_code, []) for fmt in self._SUBTITLE_FORMATS: @@ -2927,30 +2801,29 @@ def process_language(container, base_url, lang_code, sub_name, query): 'name': sub_name, }) - for base_url, caption_track in captions.items(): + subtitles, automatic_captions = {}, {} + for lang_code, caption_track in captions.items(): + base_url = caption_track.get('baseUrl') if not base_url: continue + lang_name = self._get_text(caption_track, 'name', max_runs=1) if caption_track.get('kind') != 'asr': - lang_code = ( - remove_start(caption_track.get('vssId') or '', '.').replace('.', '-') - or caption_track.get('languageCode')) if not lang_code: continue process_language( - subtitles, base_url, lang_code, - traverse_obj(caption_track, ('name', 'simpleText')), - {}) - continue - automatic_captions = {} + subtitles, base_url, lang_code, lang_name, {}) + if not caption_track.get('isTranslatable'): + continue for trans_code, trans_name in translation_languages.items(): if not trans_code: continue + if caption_track.get('kind') != 'asr': + trans_code += f'-{lang_code}' + trans_name += format_field(lang_name, template=' from %s') process_language( - automatic_captions, base_url, trans_code, - self._get_text(trans_name, max_runs=1), - {'tlang': trans_code}) - info['automatic_captions'] = automatic_captions - info['subtitles'] = subtitles + automatic_captions, base_url, trans_code, trans_name, {'tlang': trans_code}) + info['automatic_captions'] = automatic_captions + info['subtitles'] = subtitles parsed_url = compat_urllib_parse_urlparse(url) for component in [parsed_url.fragment, parsed_url.query]: @@ -2985,19 +2858,18 @@ def process_language(container, base_url, lang_code, sub_name, query): webpage, self._YT_INITIAL_DATA_RE, video_id, 'yt initial data') if not initial_data: - headers = self.generate_api_headers( - master_ytcfg, identity_token, self._extract_account_syncid(master_ytcfg), - session_index=self._extract_session_index(master_ytcfg)) - + query = {'videoId': video_id} + query.update(self._get_checkok_params()) initial_data = self._extract_response( item_id=video_id, ep='next', fatal=False, - ytcfg=master_ytcfg, headers=headers, query={'videoId': video_id}, + ytcfg=master_ytcfg, query=query, + headers=self.generate_api_headers(ytcfg=master_ytcfg), note='Downloading initial data API JSON') try: # This will error if there is no livechat initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation'] - info['subtitles']['live_chat'] = [{ + info.setdefault('subtitles', {})['live_chat'] = [{ 'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies 'video_id': video_id, 'ext': 'json', @@ -3130,41 +3002,7 @@ def process_language(container, base_url, lang_code, sub_name, query): needs_auth=info['age_limit'] >= 18, is_unlisted=None if is_private is None else is_unlisted) - # get xsrf for annotations or comments - get_annotations = self.get_param('writeannotations', False) - get_comments = self.get_param('getcomments', False) - if get_annotations or get_comments: - xsrf_token = None - if master_ytcfg: - xsrf_token = try_get(master_ytcfg, lambda x: x['XSRF_TOKEN'], compat_str) - if not xsrf_token: - xsrf_token = self._search_regex( - r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P(?:(?!\2).)+)\2', - webpage, 'xsrf token', group='xsrf_token', fatal=False) - - # annotations - if get_annotations: - invideo_url = get_first( - player_responses, - ('annotations', 0, 'playerAnnotationsUrlsRenderer', 'invideoUrl'), - expected_type=str) - if xsrf_token and invideo_url: - xsrf_field_name = None - if master_ytcfg: - xsrf_field_name = try_get(master_ytcfg, lambda x: x['XSRF_FIELD_NAME'], compat_str) - if not xsrf_field_name: - xsrf_field_name = self._search_regex( - r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P\w+)\2', - webpage, 'xsrf field name', - group='xsrf_field_name', default='session_token') - info['annotations'] = self._download_webpage( - self._proto_relative_url(invideo_url), - video_id, note='Downloading annotations', - errnote='Unable to download video annotations', fatal=False, - data=urlencode_postdata({xsrf_field_name: xsrf_token})) - - if get_comments: - info['__post_extractor'] = lambda: self._extract_comments(master_ytcfg, video_id, contents, webpage) + info['__post_extractor'] = self.extract_comments(master_ytcfg, video_id, contents, webpage) self.mark_watched(video_id, player_responses) @@ -3447,7 +3285,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, { 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live', 'info_dict': { - 'id': 'FMtPN8yp5LU', # This will keep changing + 'id': '3yImotZU3tw', # This will keep changing 'ext': 'mp4', 'title': compat_str, 'uploader': 'Sky News', @@ -3511,7 +3349,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'url': 'https://www.youtube.com/feed/watch_later', 'only_matching': True, }, { - 'note': 'Recommended - redirects to home page', + 'note': 'Recommended - redirects to home page.', 'url': 'https://www.youtube.com/feed/recommended', 'only_matching': True, }, { @@ -3608,6 +3446,51 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'availability': 'unlisted' }, 'playlist_count': 1, + }, { + 'note': 'API Fallback: Recommended - redirects to home page. Requires visitorData', + 'url': 'https://www.youtube.com/feed/recommended', + 'info_dict': { + 'id': 'recommended', + 'title': 'recommended', + }, + 'playlist_mincount': 50, + 'params': { + 'skip_download': True, + 'extractor_args': {'youtubetab': {'skip': ['webpage']}} + }, + }, { + 'note': 'API Fallback: /videos tab, sorted by oldest first', + 'url': 'https://www.youtube.com/user/theCodyReeder/videos?view=0&sort=da&flow=grid', + 'info_dict': { + 'id': 'UCu6mSoMNzHQiBIOCkHUa2Aw', + 'title': 'Cody\'sLab - Videos', + 'description': 'md5:d083b7c2f0c67ee7a6c74c3e9b4243fa', + 'uploader': 'Cody\'sLab', + 'uploader_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw', + }, + 'playlist_mincount': 650, + 'params': { + 'skip_download': True, + 'extractor_args': {'youtubetab': {'skip': ['webpage']}} + }, + }, { + 'note': 'API Fallback: Topic, should redirect to playlist?list=UU...', + 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw', + 'info_dict': { + 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw', + 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw', + 'title': 'Uploads from Royalty Free Music - Topic', + 'uploader': 'Royalty Free Music - Topic', + }, + 'expected_warnings': [ + 'A channel/user page was given', + 'The URL does not have a videos tab', + ], + 'playlist_mincount': 101, + 'params': { + 'skip_download': True, + 'extractor_args': {'youtubetab': {'skip': ['webpage']}} + }, }] @classmethod @@ -3796,7 +3679,7 @@ def _rich_grid_entries(self, contents): if entry: yield entry ''' - def _entries(self, tab, item_id, identity_token, account_syncid, ytcfg): + def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data): def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds contents = try_get(parent_renderer, lambda x: x['contents'], list) or [] @@ -3838,7 +3721,7 @@ def extract_entries(parent_renderer): # this needs to called again for continua if not continuation_list[0]: continuation_list[0] = self._extract_continuation(parent_renderer) - continuation_list = [None] # Python 2 doesnot support nonlocal + continuation_list = [None] # Python 2 does not support nonlocal tab_content = try_get(tab, lambda x: x['content'], dict) if not tab_content: return @@ -3848,12 +3731,12 @@ def extract_entries(parent_renderer): # this needs to called again for continua for entry in extract_entries(parent_renderer): yield entry continuation = continuation_list[0] - visitor_data = None for page_num in itertools.count(1): if not continuation: break - headers = self.generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data) + headers = self.generate_api_headers( + ytcfg=ytcfg, account_syncid=account_syncid, visitor_data=visitor_data) response = self._extract_response( item_id='%s page %s' % (item_id, page_num), query=continuation, headers=headers, ytcfg=ytcfg, @@ -3861,8 +3744,9 @@ def extract_entries(parent_renderer): # this needs to called again for continua if not response: break - visitor_data = try_get( - response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data + # Extracting updated visitor data is required to prevent an infinite extraction loop in some cases + # See: https://github.com/ytdl-org/youtube-dl/issues/28702 + visitor_data = self._extract_visitor_data(response) or visitor_data known_continuation_renderers = { 'playlistVideoListContinuation': self._playlist_entries, @@ -3936,9 +3820,10 @@ def _extract_uploader(cls, data): try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str)) return {k: v for k, v in uploader.items() if v is not None} - def _extract_from_tabs(self, item_id, webpage, data, tabs): + def _extract_from_tabs(self, item_id, ytcfg, data, tabs): playlist_id = title = description = channel_url = channel_name = channel_id = None - thumbnails_list = tags = [] + thumbnails_list = [] + tags = [] selected_tab = self._extract_selected_tab(tabs) renderer = try_get( @@ -4003,20 +3888,15 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs): 'channel': metadata['uploader'], 'channel_id': metadata['uploader_id'], 'channel_url': metadata['uploader_url']}) - ytcfg = self.extract_ytcfg(item_id, webpage) return self.playlist_result( self._entries( - selected_tab, playlist_id, - self._extract_identity_token(webpage, item_id), - self._extract_account_syncid(ytcfg, data), ytcfg), + selected_tab, playlist_id, ytcfg, + self._extract_account_syncid(ytcfg, data), + self._extract_visitor_data(data, ytcfg)), **metadata) - def _extract_mix_playlist(self, playlist, playlist_id, data, webpage): - first_id = last_id = None - ytcfg = self.extract_ytcfg(playlist_id, webpage) - headers = self.generate_api_headers( - ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data), - identity_token=self._extract_identity_token(webpage, item_id=playlist_id)) + def _extract_mix_playlist(self, playlist, playlist_id, data, ytcfg): + first_id = last_id = response = None for page_num in itertools.count(1): videos = list(self._playlist_entries(playlist)) if not videos: @@ -4033,6 +3913,9 @@ def _extract_mix_playlist(self, playlist, playlist_id, data, webpage): last_id = videos[-1]['id'] watch_endpoint = try_get( playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint']) + headers = self.generate_api_headers( + ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data), + visitor_data=self._extract_visitor_data(response, data, ytcfg)) query = { 'playlistId': playlist_id, 'videoId': watch_endpoint.get('videoId') or last_id, @@ -4047,7 +3930,7 @@ def _extract_mix_playlist(self, playlist, playlist_id, data, webpage): playlist = try_get( response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict) - def _extract_from_playlist(self, item_id, url, data, playlist, webpage): + def _extract_from_playlist(self, item_id, url, data, playlist, ytcfg): title = playlist.get('title') or try_get( data, lambda x: x['titleText']['simpleText'], compat_str) playlist_id = playlist.get('playlistId') or item_id @@ -4062,7 +3945,7 @@ def _extract_from_playlist(self, item_id, url, data, playlist, webpage): video_title=title) return self.playlist_result( - self._extract_mix_playlist(playlist, playlist_id, data, webpage), + self._extract_mix_playlist(playlist, playlist_id, data, ytcfg), playlist_id=playlist_id, playlist_title=title) def _extract_availability(self, data): @@ -4106,7 +3989,7 @@ def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict): if renderer: return renderer - def _reload_with_unavailable_videos(self, item_id, data, webpage): + def _reload_with_unavailable_videos(self, item_id, data, ytcfg): """ Get playlist with unavailable videos if the 'show unavailable videos' button exists. """ @@ -4130,12 +4013,9 @@ def _reload_with_unavailable_videos(self, item_id, data, webpage): params = browse_endpoint.get('params') break - ytcfg = self.extract_ytcfg(item_id, webpage) headers = self.generate_api_headers( - ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data), - identity_token=self._extract_identity_token(webpage, item_id=item_id), - visitor_data=try_get( - self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str)) + ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data), + visitor_data=self._extract_visitor_data(data, ytcfg)) query = { 'params': params or 'wgYCCAA=', 'browseId': browse_id or 'VL%s' % item_id @@ -4145,28 +4025,87 @@ def _reload_with_unavailable_videos(self, item_id, data, webpage): check_get_keys='contents', fatal=False, ytcfg=ytcfg, note='Downloading API JSON with unavailable videos') - def _extract_webpage(self, url, item_id): + def _extract_webpage(self, url, item_id, fatal=True): retries = self.get_param('extractor_retries', 3) count = -1 - last_error = 'Incomplete yt initial data recieved' + webpage = data = last_error = None while count < retries: count += 1 # Sometimes youtube returns a webpage with incomplete ytInitialData # See: https://github.com/yt-dlp/yt-dlp/issues/116 - if count: + if last_error: self.report_warning('%s. Retrying ...' % last_error) - webpage = self._download_webpage( - url, item_id, - 'Downloading webpage%s' % (' (retry #%d)' % count if count else '')) - data = self.extract_yt_initial_data(item_id, webpage) - if data.get('contents') or data.get('currentVideoEndpoint'): + try: + webpage = self._download_webpage( + url, item_id, + note='Downloading webpage%s' % (' (retry #%d)' % count if count else '',)) + data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {} + except ExtractorError as e: + if isinstance(e.cause, network_exceptions): + if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429): + last_error = error_to_compat_str(e.cause or e.msg) + if count < retries: + continue + if fatal: + raise + self.report_warning(error_to_compat_str(e)) break - # Extract alerts here only when there is error - self._extract_and_report_alerts(data) - if count >= retries: - raise ExtractorError(last_error) + else: + try: + self._extract_and_report_alerts(data) + except ExtractorError as e: + if fatal: + raise + self.report_warning(error_to_compat_str(e)) + break + + if dict_get(data, ('contents', 'currentVideoEndpoint')): + break + + last_error = 'Incomplete yt initial data received' + if count >= retries: + if fatal: + raise ExtractorError(last_error) + self.report_warning(last_error) + break + return webpage, data + def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=False, default_client='web'): + data = None + if 'webpage' not in self._configuration_arg('skip'): + webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal) + ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage) + if not data: + if not ytcfg and self.is_authenticated: + msg = 'Playlists that require authentication may not extract correctly without a successful webpage download.' + if 'authcheck' not in self._configuration_arg('skip') and fatal: + raise ExtractorError( + msg + ' If you are not downloading private content, or your cookies are only for the first account and channel,' + ' pass "--extractor-args youtubetab:skip=authcheck" to skip this check', + expected=True) + self.report_warning(msg, only_once=True) + data = self._extract_tab_endpoint(url, item_id, ytcfg, fatal=fatal, default_client=default_client) + return data, ytcfg + + def _extract_tab_endpoint(self, url, item_id, ytcfg=None, fatal=True, default_client='web'): + headers = self.generate_api_headers(ytcfg=ytcfg, default_client=default_client) + resolve_response = self._extract_response( + item_id=item_id, query={'url': url}, check_get_keys='endpoint', headers=headers, ytcfg=ytcfg, fatal=fatal, + ep='navigation/resolve_url', note='Downloading API parameters API JSON', default_client=default_client) + endpoints = {'browseEndpoint': 'browse', 'watchEndpoint': 'next'} + for ep_key, ep in endpoints.items(): + params = try_get(resolve_response, lambda x: x['endpoint'][ep_key], dict) + if params: + return self._extract_response( + item_id=item_id, query=params, ep=ep, headers=headers, + ytcfg=ytcfg, fatal=fatal, default_client=default_client, + check_get_keys=('contents', 'currentVideoEndpoint')) + err_note = 'Failed to resolve url (does the playlist exist?)' + if fatal: + raise ExtractorError(err_note, expected=True) + self.report_warning(err_note, item_id) + @staticmethod def _smuggle_data(entries, data): for entry in entries: @@ -4199,7 +4138,6 @@ def get_mobj(url): mobj = get_mobj(url) # Youtube returns incomplete data if tabname is not lower case pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel'] - if is_channel: if smuggled_data.get('is_music_url'): if item_id[:2] == 'VL': @@ -4207,12 +4145,14 @@ def get_mobj(url): item_id = item_id[2:] pre, tab, post, is_channel = 'https://www.youtube.com/playlist?list=%s' % item_id, '', '', False elif item_id[:2] == 'MP': - # Youtube music albums (/channel/MP...) have a OLAK playlist that can be extracted from the webpage - item_id = self._search_regex( - r'\\x22audioPlaylistId\\x22:\\x22([0-9A-Za-z_-]+)\\x22', - self._download_webpage('https://music.youtube.com/channel/%s' % item_id, item_id), - 'playlist id') - pre, tab, post, is_channel = 'https://www.youtube.com/playlist?list=%s' % item_id, '', '', False + # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist + mdata = self._extract_tab_endpoint( + 'https://music.youtube.com/channel/%s' % item_id, item_id, default_client='web_music') + murl = traverse_obj( + mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'), get_all=False, expected_type=compat_str) + if not murl: + raise ExtractorError('Failed to resolve album to playlist.') + return self.url_result(murl, ie=YoutubeTabIE.ie_key()) elif mobj['channel_type'] == 'browse': # Youtube music /browse/ should be changed to /channel/ pre = 'https://www.youtube.com/channel/%s' % item_id @@ -4243,10 +4183,10 @@ def get_mobj(url): if video_id and playlist_id: if self.get_param('noplaylist'): self.to_screen('Downloading just video %s because of --no-playlist' % video_id) - return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id) + return self.url_result(f'https://www.youtube.com/watch?v={video_id}', ie=YoutubeIE.ie_key(), video_id=video_id) self.to_screen('Downloading playlist %s; add --no-playlist to just download video %s' % (playlist_id, video_id)) - webpage, data = self._extract_webpage(url, item_id) + data, ytcfg = self._extract_data(url, item_id) tabs = try_get( data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list) @@ -4264,11 +4204,7 @@ def get_mobj(url): pl_id = 'UU%s' % item_id[2:] pl_url = 'https://www.youtube.com/playlist?list=%s%s' % (pl_id, mobj['post']) try: - pl_webpage, pl_data = self._extract_webpage(pl_url, pl_id) - for alert_type, alert_message in self._extract_alerts(pl_data): - if alert_type == 'error': - raise ExtractorError('Youtube said: %s' % alert_message) - item_id, url, webpage, data = pl_id, pl_url, pl_webpage, pl_data + data, ytcfg, item_id, url = *self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True), pl_id, pl_url except ExtractorError: self.report_warning('The playlist gave error. Falling back to channel URL') else: @@ -4278,17 +4214,17 @@ def get_mobj(url): # YouTube sometimes provides a button to reload playlist with unavailable videos. if 'no-youtube-unavailable-videos' not in compat_opts: - data = self._reload_with_unavailable_videos(item_id, data, webpage) or data - self._extract_and_report_alerts(data) + data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data + self._extract_and_report_alerts(data, only_once=True) tabs = try_get( data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list) if tabs: - return self._extract_from_tabs(item_id, webpage, data, tabs) + return self._extract_from_tabs(item_id, ytcfg, data, tabs) playlist = try_get( data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict) if playlist: - return self._extract_from_playlist(item_id, url, data, playlist, webpage) + return self._extract_from_playlist(item_id, url, data, playlist, ytcfg) video_id = try_get( data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'], @@ -4296,7 +4232,7 @@ def get_mobj(url): if video_id: if mobj['tab'] != '/live': # live tab is expected to redirect to video self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id) - return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id) + return self.url_result(f'https://www.youtube.com/watch?v={video_id}', ie=YoutubeIE.ie_key(), video_id=video_id) raise ExtractorError('Unable to recognize tab page') @@ -4416,7 +4352,7 @@ class YoutubeYtBeIE(InfoExtractor): }] def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) + mobj = self._match_valid_url(url) video_id = mobj.group('id') playlist_id = mobj.group('playlist_id') return self.url_result( @@ -4471,11 +4407,10 @@ class YoutubeSearchIE(SearchInfoExtractor, YoutubeTabIE): _SEARCH_PARAMS = None _TESTS = [] - def _entries(self, query, n): + def _search_results(self, query): data = {'query': query} if self._SEARCH_PARAMS: data['params'] = self._SEARCH_PARAMS - total = 0 continuation = {} for page_num in itertools.count(1): data.update(continuation) @@ -4518,17 +4453,10 @@ def _entries(self, query, n): continue yield self._extract_video(video) - total += 1 - if total == n: - return if not continuation: break - def _get_n_results(self, query, n): - """Get a specified number of results for a query""" - return self.playlist_result(self._entries(query, n), query, query) - class YoutubeSearchDateIE(YoutubeSearchIE): IE_NAME = YoutubeSearchIE.IE_NAME + ':date' @@ -4559,7 +4487,7 @@ def _make_valid_url(cls): return cls._VALID_URL def _real_extract(self, url): - qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query) + qs = parse_qs(url) query = (qs.get('search_query') or qs.get('q'))[0] self._SEARCH_PARAMS = qs.get('sp', ('',))[0] return self._get_n_results(query, self._MAX_RESULTS) @@ -4686,6 +4614,16 @@ def _real_extract(self, url): expected=True) +class YoutubeClipIE(InfoExtractor): + IE_NAME = 'youtube:clip' + IE_DESC = False # Do not list + _VALID_URL = r'https?://(?:www\.)?youtube\.com/clip/' + + def _real_extract(self, url): + self.report_warning('YouTube clips are not currently supported. The entire video will be downloaded instead') + return self.url_result(url, 'Generic') + + class YoutubeTruncatedIDIE(InfoExtractor): IE_NAME = 'youtube:truncated_id' IE_DESC = False # Do not list