X-Git-Url: https://jfr.im/git/yt-dlp.git/blobdiff_plain/f3eaa8dd1c3949b7165157b306ad47df338eaa06..120916dac243d3d16e50749927c39e29241f7e61:/yt_dlp/extractor/youtube.py diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index f9323d292..51abeb2db 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals +import calendar import hashlib import itertools import json @@ -15,7 +16,6 @@ from ..compat import ( compat_chr, compat_HTTPError, - compat_kwargs, compat_parse_qs, compat_str, compat_urllib_parse_unquote_plus, @@ -25,7 +25,11 @@ ) from ..jsinterp import JSInterpreter from ..utils import ( + bool_or_none, clean_html, + dict_get, + datetime_from_str, + error_to_compat_str, ExtractorError, format_field, float_or_none, @@ -45,10 +49,14 @@ update_url_query, url_or_none, urlencode_postdata, - urljoin, + urljoin ) +def parse_qs(url): + return compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) + + class YoutubeBaseInfoExtractor(InfoExtractor): """Provide base functions for Youtube extractors""" _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' @@ -60,7 +68,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor): _RESERVED_NAMES = ( r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|' - r'movies|results|shared|hashtag|trending|feed|feeds|' + r'movies|results|shared|hashtag|trending|feed|feeds|oembed|' r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout') _NETRC_MACHINE = 'youtube' @@ -69,11 +77,6 @@ class YoutubeBaseInfoExtractor(InfoExtractor): _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)' - def _ids_to_results(self, ids): - return [ - self.url_result(vid_id, 'Youtube', video_id=vid_id) - for vid_id in ids] - def _login(self): """ Attempt to log in to YouTube. @@ -124,7 +127,7 @@ def req(url, f_req, note, errnote): }) def warn(message): - self._downloader.report_warning(message) + self.report_warning(message) lookup_req = [ username, @@ -259,27 +262,30 @@ def warn(message): return True - def _download_webpage_handle(self, *args, **kwargs): - query = kwargs.get('query', {}).copy() - kwargs['query'] = query - return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle( - *args, **compat_kwargs(kwargs)) + def _initialize_consent(self): + cookies = self._get_cookies('https://www.youtube.com/') + if cookies.get('__Secure-3PSID'): + return + consent_id = None + consent = cookies.get('CONSENT') + if consent: + if 'YES' in consent.value: + return + consent_id = self._search_regex( + r'PENDING\+(\d+)', consent.value, 'consent', default=None) + if not consent_id: + consent_id = random.randint(100, 999) + self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id) def _real_initialize(self): + self._initialize_consent() if self._downloader is None: return if not self._login(): return - _DEFAULT_API_DATA = { - 'context': { - 'client': { - 'clientName': 'WEB', - 'clientVersion': '2.20210301.08.00', - } - }, - } - + _YT_WEB_CLIENT_VERSION = '2.20210407.08.00' + _YT_INNERTUBE_API_KEY = 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8' _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;' _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;' _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|= 2 and sync_ids[1]: + # datasyncid is of the form "channel_syncid||user_syncid" for secondary channel + # and just "user_syncid||" for primary channel. We only want the channel_syncid + return sync_ids[0] + # ytcfg includes channel_syncid if on secondary channel + return data.get('DELEGATED_SESSION_ID') + def _extract_ytcfg(self, video_id, webpage): + if not webpage: + return {} return self._parse_json( self._search_regex( r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg', - default='{}'), video_id, fatal=False) + default='{}'), video_id, fatal=False) or {} + + def __extract_client_version(self, ytcfg): + return try_get(ytcfg, lambda x: x['INNERTUBE_CLIENT_VERSION'], compat_str) or self._YT_WEB_CLIENT_VERSION + + def _extract_context(self, ytcfg=None): + context = try_get(ytcfg, lambda x: x['INNERTUBE_CONTEXT'], dict) + if context: + return context + + # Recreate the client context (required) + client_version = self.__extract_client_version(ytcfg) + client_name = try_get(ytcfg, lambda x: x['INNERTUBE_CLIENT_NAME'], compat_str) or 'WEB' + context = { + 'client': { + 'clientName': client_name, + 'clientVersion': client_version, + } + } + visitor_data = try_get(ytcfg, lambda x: x['VISITOR_DATA'], compat_str) + if visitor_data: + context['client']['visitorData'] = visitor_data + return context + + def _generate_api_headers(self, ytcfg=None, identity_token=None, account_syncid=None, visitor_data=None): + headers = { + 'X-YouTube-Client-Name': '1', + 'X-YouTube-Client-Version': self.__extract_client_version(ytcfg), + } + if identity_token: + headers['x-youtube-identity-token'] = identity_token + if account_syncid: + headers['X-Goog-PageId'] = account_syncid + headers['X-Goog-AuthUser'] = 0 + if visitor_data: + headers['x-goog-visitor-id'] = visitor_data + auth = self._generate_sapisidhash_header() + if auth is not None: + headers['Authorization'] = auth + headers['X-Origin'] = 'https://www.youtube.com' + return headers def _extract_video(self, renderer): video_id = renderer.get('videoId') @@ -342,7 +419,7 @@ def _extract_video(self, renderer): (lambda x: x['ownerText']['runs'][0]['text'], lambda x: x['shortBylineText']['runs'][0]['text']), compat_str) return { - '_type': 'url_transparent', + '_type': 'url', 'ie_key': YoutubeIE.ie_key(), 'id': video_id, 'url': video_id, @@ -362,14 +439,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor): r'(?:(?:www|dev)\.)?invidio\.us', # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md r'(?:www\.)?invidious\.pussthecat\.org', - r'(?:www\.)?invidious\.048596\.xyz', r'(?:www\.)?invidious\.zee\.li', - r'(?:www\.)?vid\.puffyan\.us', r'(?:(?:www|au)\.)?ytprivate\.com', r'(?:www\.)?invidious\.namazso\.eu', r'(?:www\.)?invidious\.ethibox\.fr', - r'(?:www\.)?inv\.skyn3t\.in', - r'(?:www\.)?invidious\.himiko\.cloud', r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion', r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion', r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion', @@ -378,25 +451,32 @@ class YoutubeIE(YoutubeBaseInfoExtractor): r'(?:(?:www|no)\.)?invidiou\.sh', r'(?:(?:www|fi)\.)?invidious\.snopyta\.org', r'(?:www\.)?invidious\.kabi\.tk', - r'(?:www\.)?invidious\.13ad\.de', r'(?:www\.)?invidious\.mastodon\.host', r'(?:www\.)?invidious\.zapashcanon\.fr', r'(?:www\.)?invidious\.kavin\.rocks', + r'(?:www\.)?invidious\.tinfoil-hat\.net', + r'(?:www\.)?invidious\.himiko\.cloud', + r'(?:www\.)?invidious\.reallyancient\.tech', r'(?:www\.)?invidious\.tube', r'(?:www\.)?invidiou\.site', r'(?:www\.)?invidious\.site', r'(?:www\.)?invidious\.xyz', r'(?:www\.)?invidious\.nixnet\.xyz', + r'(?:www\.)?invidious\.048596\.xyz', r'(?:www\.)?invidious\.drycat\.fr', + r'(?:www\.)?inv\.skyn3t\.in', r'(?:www\.)?tube\.poal\.co', r'(?:www\.)?tube\.connect\.cafe', r'(?:www\.)?vid\.wxzm\.sx', r'(?:www\.)?vid\.mint\.lgbt', + r'(?:www\.)?vid\.puffyan\.us', r'(?:www\.)?yewtu\.be', r'(?:www\.)?yt\.elukerio\.org', r'(?:www\.)?yt\.lelux\.fi', r'(?:www\.)?invidious\.ggc-project\.de', r'(?:www\.)?yt\.maisputain\.ovh', + r'(?:www\.)?ytprivate\.com', + r'(?:www\.)?invidious\.13ad\.de', r'(?:www\.)?invidious\.toot\.koeln', r'(?:www\.)?invidious\.fdn\.fr', r'(?:www\.)?watch\.nettohikari\.com', @@ -439,16 +519,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor): |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId= ) )? # all until now is optional -> you can pass the naked ID - (?P[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID - (?!.*?\blist= - (?: - %(playlist_id)s| # combined list/video URLs are handled by the playlist IE - WL # WL are handled by the watch later IE - ) - ) + (?P[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID (?(1).+)? # if we found the ID, everything can follow $""" % { - 'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE, 'invidious': '|'.join(_INVIDIOUS_SITES), } _PLAYER_INFO_RE = ( @@ -933,6 +1006,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): }, 'skip': 'This video does not exist.', }, + { + # Video with incomplete 'yt:stretch=16:' + 'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI', + 'only_matching': True, + }, { # Video licensed under Creative Commons 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA', @@ -1209,8 +1287,44 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg', 'only_matching': True, }, + { + # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685 + 'url': 'cBvYw8_A0vQ', + 'info_dict': { + 'id': 'cBvYw8_A0vQ', + 'ext': 'mp4', + 'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き', + 'description': 'md5:ea770e474b7cd6722b4c95b833c03630', + 'upload_date': '20201120', + 'uploader': 'Walk around Japan', + 'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw', + 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw', + }, + 'params': { + 'skip_download': True, + }, + }, { + # Has multiple audio streams + 'url': 'WaOKSUlf4TM', + 'only_matching': True + }, + { + # multiple subtitles with same lang_code + 'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug', + 'only_matching': True, + }, ] + @classmethod + def suitable(cls, url): + # Hack for lazy extractors until more generic solution is implemented + # (see #28780) + from .youtube import parse_qs + qs = parse_qs(url) + if qs.get('list', [None])[0]: + return False + return super(YoutubeIE, cls).suitable(url) + def __init__(self, *args, **kwargs): super(YoutubeIE, self).__init__(*args, **kwargs) self._code_cache = {} @@ -1462,23 +1576,297 @@ def _extract_yt_initial_variable(self, webpage, regex, video_id, name): (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE), regex), webpage, name, default='{}'), video_id, fatal=False) + @staticmethod + def parse_time_text(time_text): + """ + Parse the comment time text + time_text is in the format 'X units ago (edited)' + """ + time_text_split = time_text.split(' ') + if len(time_text_split) >= 3: + return datetime_from_str('now-%s%s' % (time_text_split[0], time_text_split[1]), precision='auto') + + @staticmethod + def _join_text_entries(runs): + text = None + for run in runs: + if not isinstance(run, dict): + continue + sub_text = try_get(run, lambda x: x['text'], compat_str) + if sub_text: + if not text: + text = sub_text + continue + text += sub_text + return text + + def _extract_comment(self, comment_renderer, parent=None): + comment_id = comment_renderer.get('commentId') + if not comment_id: + return + comment_text_runs = try_get(comment_renderer, lambda x: x['contentText']['runs']) or [] + text = self._join_text_entries(comment_text_runs) or '' + comment_time_text = try_get(comment_renderer, lambda x: x['publishedTimeText']['runs']) or [] + time_text = self._join_text_entries(comment_time_text) + timestamp = calendar.timegm(self.parse_time_text(time_text).timetuple()) + author = try_get(comment_renderer, lambda x: x['authorText']['simpleText'], compat_str) + author_id = try_get(comment_renderer, + lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str) + votes = str_to_int(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'], + lambda x: x['likeCount']), compat_str)) or 0 + author_thumbnail = try_get(comment_renderer, + lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str) + + author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool) + is_liked = try_get(comment_renderer, lambda x: x['isLiked'], bool) + return { + 'id': comment_id, + 'text': text, + 'timestamp': timestamp, + 'time_text': time_text, + 'like_count': votes, + 'is_favorited': is_liked, + 'author': author, + 'author_id': author_id, + 'author_thumbnail': author_thumbnail, + 'author_is_uploader': author_is_uploader, + 'parent': parent or 'root' + } + + def _comment_entries(self, root_continuation_data, identity_token, account_syncid, + ytcfg, session_token_list, parent=None, comment_counts=None): + + def extract_thread(parent_renderer): + contents = try_get(parent_renderer, lambda x: x['contents'], list) or [] + if not parent: + comment_counts[2] = 0 + for content in contents: + comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer']) + comment_renderer = try_get( + comment_thread_renderer, (lambda x: x['comment']['commentRenderer'], dict)) or try_get( + content, (lambda x: x['commentRenderer'], dict)) + + if not comment_renderer: + continue + comment = self._extract_comment(comment_renderer, parent) + if not comment: + continue + comment_counts[0] += 1 + yield comment + # Attempt to get the replies + comment_replies_renderer = try_get( + comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict) + + if comment_replies_renderer: + comment_counts[2] += 1 + comment_entries_iter = self._comment_entries( + comment_replies_renderer, identity_token, account_syncid, ytcfg, + parent=comment.get('id'), session_token_list=session_token_list, + comment_counts=comment_counts) + + for reply_comment in comment_entries_iter: + yield reply_comment + + if not comment_counts: + # comment so far, est. total comments, current comment thread # + comment_counts = [0, 0, 0] + + # TODO: Generalize the download code with TabIE + context = self._extract_context(ytcfg) + visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str) + continuation = YoutubeTabIE._extract_continuation(root_continuation_data) # TODO + first_continuation = False + if parent is None: + first_continuation = True + + for page_num in itertools.count(0): + if not continuation: + break + headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data) + retries = self._downloader.params.get('extractor_retries', 3) + count = -1 + last_error = None + + while count < retries: + count += 1 + if last_error: + self.report_warning('%s. Retrying ...' % last_error) + try: + query = { + 'ctoken': continuation['ctoken'], + 'pbj': 1, + 'type': 'next', + } + if parent: + query['action_get_comment_replies'] = 1 + else: + query['action_get_comments'] = 1 + + comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1]) + if page_num == 0: + if first_continuation: + note_prefix = 'Downloading initial comment continuation page' + else: + note_prefix = ' Downloading comment reply thread %d %s' % (comment_counts[2], comment_prog_str) + else: + note_prefix = '%sDownloading comment%s page %d %s' % ( + ' ' if parent else '', + ' replies' if parent else '', + page_num, + comment_prog_str) + + browse = self._download_json( + 'https://www.youtube.com/comment_service_ajax', None, + '%s %s' % (note_prefix, '(retry #%d)' % count if count else ''), + headers=headers, query=query, + data=urlencode_postdata({ + 'session_token': session_token_list[0] + })) + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404, 413): + if e.cause.code == 413: + self.report_warning('Assumed end of comments (received HTTP Error 413)') + return + # Downloading page may result in intermittent 5xx HTTP error + # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289 + last_error = 'HTTP Error %s' % e.cause.code + if e.cause.code == 404: + last_error = last_error + ' (this API is probably deprecated)' + if count < retries: + continue + raise + else: + session_token = try_get(browse, lambda x: x['xsrf_token'], compat_str) + if session_token: + session_token_list[0] = session_token + + response = try_get(browse, + (lambda x: x['response'], + lambda x: x[1]['response'])) or {} + + if response.get('continuationContents'): + break + + # YouTube sometimes gives reload: now json if something went wrong (e.g. bad auth) + if browse.get('reload'): + raise ExtractorError('Invalid or missing params in continuation request', expected=False) + + # TODO: not tested, merged from old extractor + err_msg = browse.get('externalErrorMessage') + if err_msg: + raise ExtractorError('YouTube said: %s' % err_msg, expected=False) + + # Youtube sometimes sends incomplete data + # See: https://github.com/ytdl-org/youtube-dl/issues/28194 + last_error = 'Incomplete data received' + if count >= retries: + raise ExtractorError(last_error) + + if not response: + break + visitor_data = try_get( + response, + lambda x: x['responseContext']['webResponseContextExtensionData']['ytConfigData']['visitorData'], + compat_str) or visitor_data + + known_continuation_renderers = { + 'itemSectionContinuation': extract_thread, + 'commentRepliesContinuation': extract_thread + } + + # extract next root continuation from the results + continuation_contents = try_get( + response, lambda x: x['continuationContents'], dict) or {} + + for key, value in continuation_contents.items(): + if key not in known_continuation_renderers: + continue + continuation_renderer = value + + if first_continuation: + first_continuation = False + expected_comment_count = try_get( + continuation_renderer, + (lambda x: x['header']['commentsHeaderRenderer']['countText']['runs'][0]['text'], + lambda x: x['header']['commentsHeaderRenderer']['commentsCount']['runs'][0]['text']), + compat_str) + + if expected_comment_count: + comment_counts[1] = str_to_int(expected_comment_count) + self.to_screen('Downloading ~%d comments' % str_to_int(expected_comment_count)) + yield comment_counts[1] + + # TODO: cli arg. + # 1/True for newest, 0/False for popular (default) + comment_sort_index = int(True) + sort_continuation_renderer = try_get( + continuation_renderer, + lambda x: x['header']['commentsHeaderRenderer']['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'] + [comment_sort_index]['continuation']['reloadContinuationData'], dict) + # If this fails, the initial continuation page + # starts off with popular anyways. + if sort_continuation_renderer: + continuation = YoutubeTabIE._build_continuation_query( + continuation=sort_continuation_renderer.get('continuation'), + ctp=sort_continuation_renderer.get('clickTrackingParams')) + self.to_screen('Sorting comments by %s' % ('popular' if comment_sort_index == 0 else 'newest')) + break + + for entry in known_continuation_renderers[key](continuation_renderer): + yield entry + + continuation = YoutubeTabIE._extract_continuation(continuation_renderer) # TODO + break + + def _extract_comments(self, ytcfg, video_id, contents, webpage, xsrf_token): + """Entry for comment extraction""" + comments = [] + known_entry_comment_renderers = ( + 'itemSectionRenderer', + ) + estimated_total = 0 + for entry in contents: + for key, renderer in entry.items(): + if key not in known_entry_comment_renderers: + continue + + comment_iter = self._comment_entries( + renderer, + identity_token=self._extract_identity_token(webpage, item_id=video_id), + account_syncid=self._extract_account_syncid(ytcfg), + ytcfg=ytcfg, + session_token_list=[xsrf_token]) + + for comment in comment_iter: + if isinstance(comment, int): + estimated_total = comment + continue + comments.append(comment) + break + self.to_screen('Downloaded %d/%d comments' % (len(comments), estimated_total)) + return { + 'comments': comments, + 'comment_count': len(comments), + } + def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) video_id = self._match_id(url) base_url = self.http_scheme() + '//www.youtube.com/' webpage_url = base_url + 'watch?v=' + video_id webpage = self._download_webpage( - webpage_url + '&has_verified=1&bpctr=9999999999', - video_id, fatal=False) + webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False) player_response = None if webpage: player_response = self._extract_yt_initial_variable( webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, video_id, 'initial player response') + + ytcfg = self._extract_ytcfg(video_id, webpage) if not player_response: player_response = self._call_api( - 'player', {'videoId': video_id}, video_id) + 'player', {'videoId': video_id}, video_id, api_key=self._extract_api_key(ytcfg)) playability_status = player_response.get('playabilityStatus') or {} if playability_status.get('reason') == 'Sign in to confirm your age': @@ -1506,7 +1894,13 @@ def _real_extract(self, url): def get_text(x): if not x: return - return x.get('simpleText') or ''.join([r['text'] for r in x['runs']]) + text = x.get('simpleText') + if text and isinstance(text, compat_str): + return text + runs = x.get('runs') + if not isinstance(runs, list): + return + return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)]) search_meta = ( lambda x: self._html_search_meta(x, webpage, default=None)) \ @@ -1611,17 +2005,19 @@ def feed_entry(name): itags.append(itag) tbr = float_or_none( fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) + audio_track = fmt.get('audioTrack') or {} dct = { 'asr': int_or_none(fmt.get('audioSampleRate')), 'filesize': int_or_none(fmt.get('contentLength')), 'format_id': itag, - 'format_note': fmt.get('qualityLabel') or quality, + 'format_note': audio_track.get('displayName') or fmt.get('qualityLabel') or quality, 'fps': int_or_none(fmt.get('fps')), 'height': int_or_none(fmt.get('height')), 'quality': q(quality), 'tbr': tbr, 'url': fmt_url, 'width': fmt.get('width'), + 'language': audio_track.get('id', '').split('.')[0], } mimetype = fmt.get('mimeType') if mimetype: @@ -1655,7 +2051,7 @@ def feed_entry(name): f['format_id'] = itag formats.append(f) - if self._downloader.params.get('youtube_include_dash_manifest'): + if self._downloader.params.get('youtube_include_dash_manifest', True): dash_manifest_url = streaming_data.get('dashManifestUrl') if dash_manifest_url: for f in self._extract_mpd_formats( @@ -1677,7 +2073,7 @@ def feed_entry(name): if not formats: if not self._downloader.params.get('allow_unplayable_formats') and streaming_data.get('licenseInfos'): - raise ExtractorError( + self.raise_no_formats( 'This video is DRM protected.', expected=True) pemr = try_get( playability_status, @@ -1692,11 +2088,10 @@ def feed_entry(name): if not countries: regions_allowed = search_meta('regionsAllowed') countries = regions_allowed.split(',') if regions_allowed else None - self.raise_geo_restricted( - subreason, countries) + self.raise_geo_restricted(subreason, countries, metadata_available=True) reason += '\n' + subreason if reason: - raise ExtractorError(reason, expected=True) + self.raise_no_formats(reason, expected=True) self._sort_formats(formats) @@ -1707,13 +2102,16 @@ def feed_entry(name): for m in re.finditer(self._meta_regex('og:video:tag'), webpage)] for keyword in keywords: if keyword.startswith('yt:stretch='): - w, h = keyword.split('=')[1].split(':') - w, h = int(w), int(h) - if w > 0 and h > 0: - ratio = w / h - for f in formats: - if f.get('vcodec') != 'none': - f['stretched_ratio'] = ratio + mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword) + if mobj: + # NB: float is intentional for forcing float division + w, h = (float(v) for v in mobj.groups()) + if w > 0 and h > 0: + ratio = w / h + for f in formats: + if f.get('vcodec') != 'none': + f['stretched_ratio'] = ratio + break thumbnails = [] for container in (video_details, microformat): @@ -1723,6 +2121,11 @@ def feed_entry(name): thumbnail_url = thumbnail.get('url') if not thumbnail_url: continue + # Sometimes youtube gives a wrong thumbnail URL. See: + # https://github.com/yt-dlp/yt-dlp/issues/233 + # https://github.com/ytdl-org/youtube-dl/issues/28023 + if 'maxresdefault' in thumbnail_url: + thumbnail_url = thumbnail_url.split('?')[0] thumbnails.append({ 'height': int_or_none(thumbnail.get('height')), 'url': thumbnail_url, @@ -1775,7 +2178,7 @@ def feed_entry(name): 'tags': keywords, 'is_live': is_live, 'playable_in_embed': playability_status.get('playableInEmbed'), - 'was_live': video_details.get('isLiveContent') + 'was_live': video_details.get('isLiveContent'), } pctr = try_get( @@ -1784,7 +2187,7 @@ def feed_entry(name): subtitles = {} if pctr: def process_language(container, base_url, lang_code, query): - lang_subs = [] + lang_subs = container.setdefault(lang_code, []) for fmt in self._SUBTITLE_FORMATS: query.update({ 'fmt': fmt, @@ -1793,14 +2196,15 @@ def process_language(container, base_url, lang_code, query): 'ext': fmt, 'url': update_url_query(base_url, query), }) - container[lang_code] = lang_subs for caption_track in (pctr.get('captionTracks') or []): base_url = caption_track.get('baseUrl') if not base_url: continue if caption_track.get('kind') != 'asr': - lang_code = caption_track.get('languageCode') + lang_code = ( + remove_start(caption_track.get('vssId') or '', '.').replace('.', '-') + or caption_track.get('languageCode')) if not lang_code: continue process_language( @@ -1851,13 +2255,14 @@ def process_language(container, base_url, lang_code, query): 'yt initial data') if not initial_data: initial_data = self._call_api( - 'next', {'videoId': video_id}, video_id, fatal=False) + 'next', {'videoId': video_id}, video_id, fatal=False, api_key=self._extract_api_key(ytcfg)) if not is_live: try: # This will error if there is no livechat initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation'] info['subtitles']['live_chat'] = [{ + 'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies 'video_id': video_id, 'ext': 'json', 'protocol': 'youtube_live_chat_replay', @@ -1951,7 +2356,7 @@ def chapter_time(mmlir): info['channel'] = get_text(try_get( vsir, lambda x: x['owner']['videoOwnerRenderer']['title'], - compat_str)) + dict)) rows = try_get( vsir, lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'], @@ -1992,6 +2397,35 @@ def chapter_time(mmlir): if v: info[d_k] = v + is_private = bool_or_none(video_details.get('isPrivate')) + is_unlisted = bool_or_none(microformat.get('isUnlisted')) + is_membersonly = None + is_premium = None + if initial_data and is_private is not None: + is_membersonly = False + is_premium = False + contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) + for content in contents or []: + badges = try_get(content, lambda x: x['videoPrimaryInfoRenderer']['badges'], list) + for badge in badges or []: + label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label']) or '' + if label.lower() == 'members only': + is_membersonly = True + break + elif label.lower() == 'premium': + is_premium = True + break + if is_membersonly or is_premium: + break + + # TODO: Add this for playlists + info['availability'] = self._availability( + is_private=is_private, + needs_premium=is_premium, + needs_subscription=is_membersonly, + needs_auth=info['age_limit'] >= 18, + is_unlisted=None if is_private is None else is_unlisted) + # get xsrf for annotations or comments get_annotations = self._downloader.params.get('writeannotations', False) get_comments = self._downloader.params.get('getcomments', False) @@ -2024,156 +2458,8 @@ def chapter_time(mmlir): errnote='Unable to download video annotations', fatal=False, data=urlencode_postdata({xsrf_field_name: xsrf_token})) - # Get comments - # TODO: Refactor and move to seperate function - def extract_comments(): - expected_video_comment_count = 0 - video_comments = [] - comment_xsrf = xsrf_token - - def find_value(html, key, num_chars=2, separator='"'): - pos_begin = html.find(key) + len(key) + num_chars - pos_end = html.find(separator, pos_begin) - return html[pos_begin: pos_end] - - def search_dict(partial, key): - if isinstance(partial, dict): - for k, v in partial.items(): - if k == key: - yield v - else: - for o in search_dict(v, key): - yield o - elif isinstance(partial, list): - for i in partial: - for o in search_dict(i, key): - yield o - - continuations = [] - if initial_data: - try: - ncd = next(search_dict(initial_data, 'nextContinuationData')) - continuations = [ncd['continuation']] - # Handle videos where comments have been disabled entirely - except StopIteration: - pass - - def get_continuation(continuation, session_token, replies=False): - query = { - 'pbj': 1, - 'ctoken': continuation, - } - if replies: - query['action_get_comment_replies'] = 1 - else: - query['action_get_comments'] = 1 - - while True: - content, handle = self._download_webpage_handle( - 'https://www.youtube.com/comment_service_ajax', - video_id, - note=False, - expected_status=[413], - data=urlencode_postdata({ - 'session_token': session_token - }), - query=query, - headers={ - 'Accept': '*/*', - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0', - 'X-YouTube-Client-Name': '1', - 'X-YouTube-Client-Version': '2.20201202.06.01' - } - ) - - response_code = handle.getcode() - if (response_code == 200): - return self._parse_json(content, video_id) - if (response_code == 413): - return None - raise ExtractorError('Unexpected HTTP error code: %s' % response_code) - - first_continuation = True - chain_msg = '' - self.to_screen('Downloading comments') - while continuations: - continuation = continuations.pop() - comment_response = get_continuation(continuation, comment_xsrf) - if not comment_response: - continue - if list(search_dict(comment_response, 'externalErrorMessage')): - raise ExtractorError('Error returned from server: ' + next(search_dict(comment_response, 'externalErrorMessage'))) - - if 'continuationContents' not in comment_response['response']: - # Something is wrong here. Youtube won't accept this continuation token for some reason and responds with a user satisfaction dialog (error?) - continue - # not sure if this actually helps - if 'xsrf_token' in comment_response: - comment_xsrf = comment_response['xsrf_token'] - - item_section = comment_response['response']['continuationContents']['itemSectionContinuation'] - if first_continuation: - expected_video_comment_count = int(item_section['header']['commentsHeaderRenderer']['countText']['runs'][0]['text'].replace(' Comments', '').replace('1 Comment', '1').replace(',', '')) - first_continuation = False - if 'contents' not in item_section: - # continuation returned no comments? - # set an empty array as to not break the for loop - item_section['contents'] = [] - - for meta_comment in item_section['contents']: - comment = meta_comment['commentThreadRenderer']['comment']['commentRenderer'] - video_comments.append({ - 'id': comment['commentId'], - 'text': ''.join([c['text'] for c in try_get(comment, lambda x: x['contentText']['runs'], list) or []]), - 'time_text': ''.join([c['text'] for c in comment['publishedTimeText']['runs']]), - 'author': comment.get('authorText', {}).get('simpleText', ''), - 'votes': comment.get('voteCount', {}).get('simpleText', '0'), - 'author_thumbnail': comment['authorThumbnail']['thumbnails'][-1]['url'], - 'parent': 'root' - }) - if 'replies' not in meta_comment['commentThreadRenderer']: - continue - - reply_continuations = [rcn['nextContinuationData']['continuation'] for rcn in meta_comment['commentThreadRenderer']['replies']['commentRepliesRenderer']['continuations']] - while reply_continuations: - time.sleep(1) - continuation = reply_continuations.pop() - replies_data = get_continuation(continuation, comment_xsrf, True) - if not replies_data or 'continuationContents' not in replies_data[1]['response']: - continue - - if self._downloader.params.get('verbose', False): - chain_msg = ' (chain %s)' % comment['commentId'] - self.to_screen('Comments downloaded: %d of ~%d%s' % (len(video_comments), expected_video_comment_count, chain_msg)) - reply_comment_meta = replies_data[1]['response']['continuationContents']['commentRepliesContinuation'] - for reply_meta in reply_comment_meta.get('contents', {}): - reply_comment = reply_meta['commentRenderer'] - video_comments.append({ - 'id': reply_comment['commentId'], - 'text': ''.join([c['text'] for c in reply_comment['contentText']['runs']]), - 'time_text': ''.join([c['text'] for c in reply_comment['publishedTimeText']['runs']]), - 'author': reply_comment.get('authorText', {}).get('simpleText', ''), - 'votes': reply_comment.get('voteCount', {}).get('simpleText', '0'), - 'author_thumbnail': reply_comment['authorThumbnail']['thumbnails'][-1]['url'], - 'parent': comment['commentId'] - }) - if 'continuations' not in reply_comment_meta or len(reply_comment_meta['continuations']) == 0: - continue - reply_continuations += [rcn['nextContinuationData']['continuation'] for rcn in reply_comment_meta['continuations']] - - self.to_screen('Comments downloaded: %d of ~%d' % (len(video_comments), expected_video_comment_count)) - if 'continuations' in item_section: - continuations += [ncd['nextContinuationData']['continuation'] for ncd in item_section['continuations']] - time.sleep(1) - - self.to_screen('Total comments downloaded: %d of ~%d' % (len(video_comments), expected_video_comment_count)) - return { - 'comments': video_comments, - 'comment_count': expected_video_comment_count - } - if get_comments: - info['__post_extractor'] = extract_comments + info['__post_extractor'] = lambda: self._extract_comments(ytcfg, video_id, contents, webpage, xsrf_token) self.mark_watched(video_id, player_response) @@ -2223,6 +2509,15 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg', 'uploader': 'Игорь Клейнер', }, + }, { + # playlists, series + 'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3', + 'playlist_mincount': 5, + 'info_dict': { + 'id': 'UCYO_jab_esuFRV4b17AJtAw', + 'title': '3Blue1Brown - Playlists', + 'description': 'md5:e1384e8a133307dd10edee76e875d62f', + }, }, { # playlists, singlepage 'url': 'https://www.youtube.com/user/ThirstForScience/playlists', @@ -2367,6 +2662,29 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA', }, 'playlist_mincount': 21, + }, { + 'note': 'Playlist with "show unavailable videos" button', + 'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q', + 'info_dict': { + 'title': 'Uploads from Phim Siêu Nhân Nhật Bản', + 'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q', + 'uploader': 'Phim Siêu Nhân Nhật Bản', + 'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q', + }, + 'playlist_mincount': 1400, + 'expected_warnings': [ + 'YouTube said: INFO - Unavailable videos are hidden', + ] + }, { + 'note': 'Playlist with unavailable videos in a later page', + 'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w', + 'info_dict': { + 'title': 'Uploads from BlankTV', + 'id': 'UU8l9frL61Yl5KFOl87nIm2w', + 'uploader': 'BlankTV', + 'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w', + }, + 'playlist_mincount': 20000, }, { # https://github.com/ytdl-org/youtube-dl/issues/21844 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba', @@ -2499,6 +2817,16 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, { 'url': 'https://www.youtube.com/TheYoungTurks/live', 'only_matching': True, + }, { + 'url': 'https://www.youtube.com/hashtag/cctv9', + 'info_dict': { + 'id': 'cctv9', + 'title': '#cctv9', + }, + 'playlist_mincount': 350, + }, { + 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU', + 'only_matching': True, }] @classmethod @@ -2522,14 +2850,16 @@ def _extract_channel_id(self, webpage): @staticmethod def _extract_basic_item_renderer(item): # Modified from _extract_grid_item_renderer - known_renderers = ( - 'playlistRenderer', 'videoRenderer', 'channelRenderer' - 'gridPlaylistRenderer', 'gridVideoRenderer', 'gridChannelRenderer' + known_basic_renderers = ( + 'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer' ) for key, renderer in item.items(): - if key not in known_renderers: + if not isinstance(renderer, dict): continue - return renderer + elif key in known_basic_renderers: + return renderer + elif key.startswith('grid') and key.endswith('Renderer'): + return renderer def _grid_entries(self, grid_renderer): for item in grid_renderer['items']: @@ -2539,7 +2869,8 @@ def _grid_entries(self, grid_renderer): if not isinstance(renderer, dict): continue title = try_get( - renderer, lambda x: x['title']['runs'][0]['text'], compat_str) + renderer, (lambda x: x['title']['runs'][0]['text'], + lambda x: x['title']['simpleText']), compat_str) # playlist playlist_id = renderer.get('playlistId') if playlist_id: @@ -2547,10 +2878,12 @@ def _grid_entries(self, grid_renderer): 'https://www.youtube.com/playlist?list=%s' % playlist_id, ie=YoutubeTabIE.ie_key(), video_id=playlist_id, video_title=title) + continue # video video_id = renderer.get('videoId') if video_id: yield self._extract_video(renderer) + continue # channel channel_id = renderer.get('channelId') if channel_id: @@ -2559,6 +2892,17 @@ def _grid_entries(self, grid_renderer): yield self.url_result( 'https://www.youtube.com/channel/%s' % channel_id, ie=YoutubeTabIE.ie_key(), video_title=title) + continue + # generic endpoint URL support + ep_url = urljoin('https://www.youtube.com/', try_get( + renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'], + compat_str)) + if ep_url: + for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE): + if ie.suitable(ep_url): + yield self.url_result( + ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title) + break def _shelf_entries_from_content(self, shelf_renderer): content = shelf_renderer.get('content') @@ -2626,12 +2970,19 @@ def _post_thread_entries(self, post_thread_renderer): return # video attachment video_renderer = try_get( - post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) - video_id = None - if video_renderer: - entry = self._video_entry(video_renderer) + post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {} + video_id = video_renderer.get('videoId') + if video_id: + entry = self._extract_video(video_renderer) if entry: yield entry + # playlist attachment + playlist_id = try_get( + post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str) + if playlist_id: + yield self.url_result( + 'https://www.youtube.com/playlist?list=%s' % playlist_id, + ie=YoutubeTabIE.ie_key(), video_id=playlist_id) # inline video links runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or [] for run in runs: @@ -2646,7 +2997,7 @@ def _post_thread_entries(self, post_thread_renderer): ep_video_id = YoutubeIE._match_id(ep_url) if video_id == ep_video_id: continue - yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id) + yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id) def _post_thread_continuation_entries(self, post_thread_continuation): contents = post_thread_continuation.get('contents') @@ -2659,6 +3010,16 @@ def _post_thread_continuation_entries(self, post_thread_continuation): for entry in self._post_thread_entries(renderer): yield entry + r''' # unused + def _rich_grid_entries(self, contents): + for content in contents: + video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict) + if video_renderer: + entry = self._video_entry(video_renderer) + if entry: + yield entry + ''' + @staticmethod def _build_continuation_query(continuation, ctp=None): query = { @@ -2704,7 +3065,7 @@ def _extract_continuation(cls, renderer): ctp = continuation_ep.get('clickTrackingParams') return YoutubeTabIE._build_continuation_query(continuation, ctp) - def _entries(self, tab, identity_token, item_id): + def _entries(self, tab, item_id, identity_token, account_syncid, ytcfg): def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds contents = try_get(parent_renderer, lambda x: x['contents'], list) or [] @@ -2756,56 +3117,26 @@ def extract_entries(parent_renderer): # this needs to called again for continua for entry in extract_entries(parent_renderer): yield entry continuation = continuation_list[0] - - headers = { - 'x-youtube-client-name': '1', - 'x-youtube-client-version': '2.20201112.04.01', - } - if identity_token: - headers['x-youtube-identity-token'] = identity_token + context = self._extract_context(ytcfg) + visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str) for page_num in itertools.count(1): if not continuation: break - retries = self._downloader.params.get('extractor_retries', 3) - count = -1 - last_error = None - while count < retries: - count += 1 - if last_error: - self.report_warning('%s. Retrying ...' % last_error) - try: - response = self._call_api( - ep="browse", fatal=True, headers=headers, - video_id='%s page %s' % (item_id, page_num), - query={ - 'continuation': continuation['continuation'], - 'clickTracking': {'clickTrackingParams': continuation['itct']}, - }, - note='Downloading API JSON%s' % (' (retry #%d)' % count if count else '')) - except ExtractorError as e: - if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404): - # Downloading page may result in intermittent 5xx HTTP error - # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289 - last_error = 'HTTP Error %s' % e.cause.code - if count < retries: - continue - raise - else: - # Youtube sometimes sends incomplete data - # See: https://github.com/ytdl-org/youtube-dl/issues/28194 - if response.get('continuationContents') or response.get('onResponseReceivedActions'): - break - - # Youtube may send alerts if there was an issue with the continuation page - self._extract_alerts(response, expected=False) - - last_error = 'Incomplete data received' - if count >= retries: - self._downloader.report_error(last_error) + query = { + 'continuation': continuation['continuation'], + 'clickTracking': {'clickTrackingParams': continuation['itct']} + } + headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data) + response = self._extract_response( + item_id='%s page %s' % (item_id, page_num), + query=query, headers=headers, ytcfg=ytcfg, + check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints')) if not response: break + visitor_data = try_get( + response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data known_continuation_renderers = { 'playlistVideoListContinuation': self._playlist_entries, @@ -2834,9 +3165,11 @@ def extract_entries(parent_renderer): # this needs to called again for continua 'playlistVideoRenderer': (self._playlist_entries, 'contents'), 'itemSectionRenderer': (extract_entries, 'contents'), # for feeds 'richItemRenderer': (extract_entries, 'contents'), # for hashtag + 'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents') } + on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints')) continuation_items = try_get( - response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list) + on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list) continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {} video_items_renderer = None for key, value in continuation_item.items(): @@ -2883,7 +3216,7 @@ def _extract_uploader(data): try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str)) return {k: v for k, v in uploader.items() if v is not None} - def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token): + def _extract_from_tabs(self, item_id, webpage, data, tabs): playlist_id = title = description = channel_url = channel_name = channel_id = None thumbnails_list = tags = [] @@ -2894,10 +3227,10 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token): channel_name = renderer.get('title') channel_url = renderer.get('channelUrl') channel_id = renderer.get('externalId') - - if not renderer: + else: renderer = try_get( data, lambda x: x['metadata']['playlistMetadataRenderer'], dict) + if renderer: title = renderer.get('title') description = renderer.get('description', '') @@ -2923,11 +3256,12 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token): 'width': int_or_none(t.get('width')), 'height': int_or_none(t.get('height')), }) - if playlist_id is None: playlist_id = item_id if title is None: - title = playlist_id + title = ( + try_get(data, lambda x: x['header']['hashtagHeaderRenderer']['hashtag']['simpleText']) + or playlist_id) title += format_field(selected_tab, 'title', ' - %s') metadata = { @@ -2947,28 +3281,53 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token): 'channel_id': metadata['uploader_id'], 'channel_url': metadata['uploader_url']}) return self.playlist_result( - self._entries(selected_tab, identity_token, playlist_id), + self._entries( + selected_tab, playlist_id, + self._extract_identity_token(webpage, item_id), + self._extract_account_syncid(data), + self._extract_ytcfg(item_id, webpage)), **metadata) - def _extract_mix_playlist(self, playlist, playlist_id): - page_num = 0 - while True: + def _extract_mix_playlist(self, playlist, playlist_id, data, webpage): + first_id = last_id = None + ytcfg = self._extract_ytcfg(playlist_id, webpage) + headers = self._generate_api_headers( + ytcfg, account_syncid=self._extract_account_syncid(data), + identity_token=self._extract_identity_token(webpage, item_id=playlist_id), + visitor_data=try_get(self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str)) + for page_num in itertools.count(1): videos = list(self._playlist_entries(playlist)) if not videos: return - video_count = len(videos) - start = min(video_count - 24, 26) if video_count > 25 else 0 - for item in videos[start:]: - yield item - - page_num += 1 - _, data = self._extract_webpage( - 'https://www.youtube.com/watch?list=%s&v=%s' % (playlist_id, videos[-1]['id']), - '%s page %d' % (playlist_id, page_num)) + start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1 + if start >= len(videos): + return + for video in videos[start:]: + if video['id'] == first_id: + self.to_screen('First video %s found again; Assuming end of Mix' % first_id) + return + yield video + first_id = first_id or videos[0]['id'] + last_id = videos[-1]['id'] + watch_endpoint = try_get( + playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint']) + query = { + 'playlistId': playlist_id, + 'videoId': watch_endpoint.get('videoId') or last_id, + 'index': watch_endpoint.get('index') or len(videos), + 'params': watch_endpoint.get('params') or 'OAE%3D' + } + response = self._extract_response( + item_id='%s page %d' % (playlist_id, page_num), + query=query, + ep='next', + headers=headers, + check_get_keys='contents' + ) playlist = try_get( - data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict) + response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict) - def _extract_from_playlist(self, item_id, url, data, playlist): + def _extract_from_playlist(self, item_id, url, data, playlist, webpage): title = playlist.get('title') or try_get( data, lambda x: x['titleText']['simpleText'], compat_str) playlist_id = playlist.get('playlistId') or item_id @@ -2983,7 +3342,7 @@ def _extract_from_playlist(self, item_id, url, data, playlist): video_title=title) return self.playlist_result( - self._extract_mix_playlist(playlist, playlist_id), + self._extract_mix_playlist(playlist, playlist_id, data, webpage), playlist_id=playlist_id, playlist_title=title) def _extract_alerts(self, data, expected=False): @@ -2996,35 +3355,118 @@ def _real_extract_alerts(): alert_type = alert.get('type') if not alert_type: continue - message = try_get(alert, lambda x: x['text']['simpleText'], compat_str) + message = try_get(alert, lambda x: x['text']['simpleText'], compat_str) or '' if message: yield alert_type, message for run in try_get(alert, lambda x: x['text']['runs'], list) or []: - message = try_get(run, lambda x: x['text'], compat_str) - if message: - yield alert_type, message + message += try_get(run, lambda x: x['text'], compat_str) + if message: + yield alert_type, message - err_msg = None + errors = [] + warnings = [] for alert_type, alert_message in _real_extract_alerts(): if alert_type.lower() == 'error': - if err_msg: - self._downloader.report_warning('YouTube said: %s - %s' % ('ERROR', err_msg)) - err_msg = alert_message + errors.append([alert_type, alert_message]) else: - self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message)) + warnings.append([alert_type, alert_message]) - if err_msg: - raise ExtractorError('YouTube said: %s' % err_msg, expected=expected) + for alert_type, alert_message in (warnings + errors[:-1]): + self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message)) + if errors: + raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected) - def _extract_identity_token(self, webpage, item_id): - ytcfg = self._extract_ytcfg(item_id, webpage) - if ytcfg: - token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str) - if token: - return token - return self._search_regex( - r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage, - 'identity token', default=None) + def _reload_with_unavailable_videos(self, item_id, data, webpage): + """ + Get playlist with unavailable videos if the 'show unavailable videos' button exists. + """ + sidebar_renderer = try_get( + data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) + if not sidebar_renderer: + return + browse_id = params = None + for item in sidebar_renderer: + if not isinstance(item, dict): + continue + renderer = item.get('playlistSidebarPrimaryInfoRenderer') + menu_renderer = try_get( + renderer, lambda x: x['menu']['menuRenderer']['items'], list) or [] + for menu_item in menu_renderer: + if not isinstance(menu_item, dict): + continue + nav_item_renderer = menu_item.get('menuNavigationItemRenderer') + text = try_get( + nav_item_renderer, lambda x: x['text']['simpleText'], compat_str) + if not text or text.lower() != 'show unavailable videos': + continue + browse_endpoint = try_get( + nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {} + browse_id = browse_endpoint.get('browseId') + params = browse_endpoint.get('params') + break + + ytcfg = self._extract_ytcfg(item_id, webpage) + headers = self._generate_api_headers( + ytcfg, account_syncid=self._extract_account_syncid(ytcfg), + identity_token=self._extract_identity_token(webpage, item_id=item_id), + visitor_data=try_get( + self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str)) + query = { + 'params': params or 'wgYCCAA=', + 'browseId': browse_id or 'VL%s' % item_id + } + return self._extract_response( + item_id=item_id, headers=headers, query=query, + check_get_keys='contents', fatal=False, + note='Downloading API JSON with unavailable videos') + + def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None, + ytcfg=None, check_get_keys=None, ep='browse', fatal=True): + response = None + last_error = None + count = -1 + retries = self._downloader.params.get('extractor_retries', 3) + if check_get_keys is None: + check_get_keys = [] + while count < retries: + count += 1 + if last_error: + self.report_warning('%s. Retrying ...' % last_error) + try: + response = self._call_api( + ep=ep, fatal=True, headers=headers, + video_id=item_id, query=query, + context=self._extract_context(ytcfg), + api_key=self._extract_api_key(ytcfg), + note='%s%s' % (note, ' (retry #%d)' % count if count else '')) + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404): + # Downloading page may result in intermittent 5xx HTTP error + # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289 + last_error = 'HTTP Error %s' % e.cause.code + if count < retries: + continue + if fatal: + raise + else: + self.report_warning(error_to_compat_str(e)) + return + + else: + # Youtube may send alerts if there was an issue with the continuation page + self._extract_alerts(response, expected=False) + if not check_get_keys or dict_get(response, check_get_keys): + break + # Youtube sometimes sends incomplete data + # See: https://github.com/ytdl-org/youtube-dl/issues/28194 + last_error = 'Incomplete data received' + if count >= retries: + if fatal: + raise ExtractorError(last_error) + else: + self.report_warning(last_error) + return + return response def _extract_webpage(self, url, item_id): retries = self._downloader.params.get('extractor_retries', 3) @@ -3044,25 +3486,26 @@ def _extract_webpage(self, url, item_id): if data.get('contents') or data.get('currentVideoEndpoint'): break if count >= retries: - self._downloader.report_error(last_error) + raise ExtractorError(last_error) return webpage, data def _real_extract(self, url): item_id = self._match_id(url) url = compat_urlparse.urlunparse( compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com')) + compat_opts = self._downloader.params.get('compat_opts', []) # This is not matched in a channel page with a tab selected mobj = re.match(r'(?P
%s)(?P/?(?![^#?]).*$)' % self._VALID_URL, url)
         mobj = mobj.groupdict() if mobj else {}
-        if mobj and not mobj.get('not_channel'):
-            self._downloader.report_warning(
+        if mobj and not mobj.get('not_channel') and 'no-youtube-channel-redirect' not in compat_opts:
+            self.report_warning(
                 'A channel/user page was given. All the channel\'s videos will be downloaded. '
                 'To download only the videos in the home page, add a "/featured" to the URL')
             url = '%s/videos%s' % (mobj.get('pre'), mobj.get('post') or '')
 
         # Handle both video/playlist URLs
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        qs = parse_qs(url)
         video_id = qs.get('v', [None])[0]
         playlist_id = qs.get('list', [None])[0]
 
@@ -3071,7 +3514,7 @@ def _real_extract(self, url):
                 # If there is neither video or playlist ids,
                 # youtube redirects to home page, which is undesirable
                 raise ExtractorError('Unable to recognize tab page')
-            self._downloader.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
+            self.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
             url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
 
         if video_id and playlist_id:
@@ -3082,22 +3525,25 @@ def _real_extract(self, url):
 
         webpage, data = self._extract_webpage(url, item_id)
 
+        # YouTube sometimes provides a button to reload playlist with unavailable videos.
+        if 'no-youtube-unavailable-videos' not in compat_opts:
+            data = self._reload_with_unavailable_videos(item_id, data, webpage) or data
+
         tabs = try_get(
             data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
         if tabs:
-            identity_token = self._extract_identity_token(webpage, item_id)
-            return self._extract_from_tabs(item_id, webpage, data, tabs, identity_token)
+            return self._extract_from_tabs(item_id, webpage, data, tabs)
 
         playlist = try_get(
             data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
         if playlist:
-            return self._extract_from_playlist(item_id, url, data, playlist)
+            return self._extract_from_playlist(item_id, url, data, playlist, webpage)
 
         video_id = try_get(
             data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
             compat_str) or video_id
         if video_id:
-            self._downloader.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
+            self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
             return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
 
         raise ExtractorError('Unable to recognize tab page')
@@ -3166,12 +3612,19 @@ class YoutubePlaylistIE(InfoExtractor):
 
     @classmethod
     def suitable(cls, url):
-        return False if YoutubeTabIE.suitable(url) else super(
-            YoutubePlaylistIE, cls).suitable(url)
+        if YoutubeTabIE.suitable(url):
+            return False
+        # Hack for lazy extractors until more generic solution is implemented
+        # (see #28780)
+        from .youtube import parse_qs
+        qs = parse_qs(url)
+        if qs.get('v', [None])[0]:
+            return False
+        return super(YoutubePlaylistIE, cls).suitable(url)
 
     def _real_extract(self, url):
         playlist_id = self._match_id(url)
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        qs = parse_qs(url)
         if not qs:
             qs = {'list': playlist_id}
         return self.url_result(
@@ -3253,7 +3706,7 @@ def _real_extract(self, url):
             ie=YoutubeTabIE.ie_key())
 
 
-class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
+class YoutubeSearchIE(SearchInfoExtractor, YoutubeTabIE):
     IE_DESC = 'YouTube.com searches, "ytsearch" keyword'
     # there doesn't appear to be a real limit, for example if you search for
     # 'python' you get more than 8.000.000 results
@@ -3269,9 +3722,10 @@ def _entries(self, query, n):
             data['params'] = self._SEARCH_PARAMS
         total = 0
         for page_num in itertools.count(1):
-            search = self._call_api(
-                ep='search', video_id='query "%s"' % query, fatal=False,
-                note='Downloading page %s' % page_num, query=data)
+            search = self._extract_response(
+                item_id='query "%s" page %s' % (query, page_num), ep='search', query=data,
+                check_get_keys=('contents', 'onResponseReceivedCommands')
+            )
             if not search:
                 break
             slr_contents = try_get(