X-Git-Url: https://jfr.im/git/yt-dlp.git/blobdiff_plain/5ef7d9bdd8c4d480612d88bd0bd2a4196f2e2562..d9488f69c111c70e46dbe94773ff3b34c08b0298:/yt_dlp/extractor/youtube.py diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index 8b0d12bb5..dee2dbebc 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -2,6 +2,11 @@ from __future__ import unicode_literals +import base64 +import calendar +import copy +import datetime +import hashlib import itertools import json import os.path @@ -14,7 +19,6 @@ from ..compat import ( compat_chr, compat_HTTPError, - compat_kwargs, compat_parse_qs, compat_str, compat_urllib_parse_unquote_plus, @@ -24,13 +28,20 @@ ) from ..jsinterp import JSInterpreter from ..utils import ( + bool_or_none, + bytes_to_intlist, clean_html, + dict_get, + datetime_from_str, + error_to_compat_str, ExtractorError, format_field, float_or_none, int_or_none, + intlist_to_bytes, mimetype2ext, parse_codecs, + parse_count, parse_duration, qualities, remove_start, @@ -45,9 +56,14 @@ url_or_none, urlencode_postdata, urljoin, + variadic ) +def parse_qs(url): + return compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query) + + class YoutubeBaseInfoExtractor(InfoExtractor): """Provide base functions for Youtube extractors""" _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' @@ -58,9 +74,9 @@ class YoutubeBaseInfoExtractor(InfoExtractor): _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}' _RESERVED_NAMES = ( - r'embed|e|watch_popup|channel|c|user|playlist|watch|w|v|movies|results|shared|hashtag|' - r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout|' - r'feed/(?:watch_later|history|subscriptions|library|trending|recommended)') + r'channel|c|user|browse|playlist|watch|w|v|embed|e|watch_popup|shorts|' + r'movies|results|shared|hashtag|trending|feed|feeds|oembed|get_video_info|' + r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout') _NETRC_MACHINE = 'youtube' # If True it will raise an error if no login info is provided @@ -68,11 +84,6 @@ class YoutubeBaseInfoExtractor(InfoExtractor): _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)' - def _ids_to_results(self, ids): - return [ - self.url_result(vid_id, 'Youtube', video_id=vid_id) - for vid_id in ids] - def _login(self): """ Attempt to log in to YouTube. @@ -81,12 +92,26 @@ def _login(self): If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised. """ + + def warn(message): + self.report_warning(message) + + # username+password login is broken + if self._LOGIN_REQUIRED and self.get_param('cookiefile') is None: + self.raise_login_required( + 'Login details are needed to download this content', method='cookies') username, password = self._get_login_info() + if username: + warn('Logging in using username and password is broken. %s' % self._LOGIN_HINTS['cookies']) + return + + # Everything below this is broken! + r''' # No authentication to be performed if username is None: - if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None: + if self._LOGIN_REQUIRED and self.get_param('cookiefile') is None: raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True) - # if self._downloader.params.get('cookiefile'): # TODO remove 'and False' later - too many people using outdated cookies and open issues, remind them. + # if self.get_param('cookiefile'): # TODO remove 'and False' later - too many people using outdated cookies and open issues, remind them. # self.to_screen('[Cookies] Reminder - Make sure to always use up to date cookies!') return True @@ -122,9 +147,6 @@ def req(url, f_req, note, errnote): 'Google-Accounts-XSRF': 1, }) - def warn(message): - self._downloader.report_warning(message) - lookup_req = [ username, None, [], None, 'US', None, None, 2, False, True, @@ -257,42 +279,208 @@ def warn(message): return False return True + ''' - def _download_webpage_handle(self, *args, **kwargs): - query = kwargs.get('query', {}).copy() - kwargs['query'] = query - return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle( - *args, **compat_kwargs(kwargs)) + def _initialize_consent(self): + cookies = self._get_cookies('https://www.youtube.com/') + if cookies.get('__Secure-3PSID'): + return + consent_id = None + consent = cookies.get('CONSENT') + if consent: + if 'YES' in consent.value: + return + consent_id = self._search_regex( + r'PENDING\+(\d+)', consent.value, 'consent', default=None) + if not consent_id: + consent_id = random.randint(100, 999) + self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id) def _real_initialize(self): + self._initialize_consent() if self._downloader is None: return if not self._login(): return - _DEFAULT_API_DATA = { - 'context': { - 'client': { - 'clientName': 'WEB', - 'clientVersion': '2.20201021.03.00', - } - }, - } - _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;' _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;' _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|= 2 and sync_ids[1]: + # datasyncid is of the form "channel_syncid||user_syncid" for secondary channel + # and just "user_syncid||" for primary channel. We only want the channel_syncid + return sync_ids[0] + def _extract_ytcfg(self, video_id, webpage): + if not webpage: + return {} return self._parse_json( self._search_regex( r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg', - default='{}'), video_id, fatal=False) + default='{}'), video_id, fatal=False) or {} + + def _generate_api_headers(self, ytcfg=None, identity_token=None, account_syncid=None, + visitor_data=None, api_hostname=None, client='WEB', session_index=None): + origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(client)) + headers = { + 'X-YouTube-Client-Name': compat_str( + self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=client)), + 'X-YouTube-Client-Version': self._extract_client_version(ytcfg, client), + 'Origin': origin + } + if not visitor_data and ytcfg: + visitor_data = try_get( + self._extract_context(ytcfg, client), lambda x: x['client']['visitorData'], compat_str) + if identity_token: + headers['X-Youtube-Identity-Token'] = identity_token + if account_syncid: + headers['X-Goog-PageId'] = account_syncid + if session_index is None and ytcfg: + session_index = self._extract_session_index(ytcfg) + if account_syncid or session_index is not None: + headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0 + if visitor_data: + headers['X-Goog-Visitor-Id'] = visitor_data + auth = self._generate_sapisidhash_header(origin) + if auth is not None: + headers['Authorization'] = auth + headers['X-Origin'] = origin + return headers + + @staticmethod + def _build_api_continuation_query(continuation, ctp=None): + query = { + 'continuation': continuation + } + # TODO: Inconsistency with clickTrackingParams. + # Currently we have a fixed ctp contained within context (from ytcfg) + # and a ctp in root query for continuation. + if ctp: + query['clickTracking'] = {'clickTrackingParams': ctp} + return query + + @classmethod + def _extract_next_continuation_data(cls, renderer): + next_continuation = try_get( + renderer, (lambda x: x['continuations'][0]['nextContinuationData'], + lambda x: x['continuation']['reloadContinuationData']), dict) + if not next_continuation: + return + continuation = next_continuation.get('continuation') + if not continuation: + return + ctp = next_continuation.get('clickTrackingParams') + return cls._build_api_continuation_query(continuation, ctp) + + @classmethod + def _extract_continuation_ep_data(cls, continuation_ep: dict): + if isinstance(continuation_ep, dict): + continuation = try_get( + continuation_ep, lambda x: x['continuationCommand']['token'], compat_str) + if not continuation: + return + ctp = continuation_ep.get('clickTrackingParams') + return cls._build_api_continuation_query(continuation, ctp) + + @classmethod + def _extract_continuation(cls, renderer): + next_continuation = cls._extract_next_continuation_data(renderer) + if next_continuation: + return next_continuation + + contents = [] + for key in ('contents', 'items'): + contents.extend(try_get(renderer, lambda x: x[key], list) or []) + + for content in contents: + if not isinstance(content, dict): + continue + continuation_ep = try_get( + content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'], + lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']), + dict) + continuation = cls._extract_continuation_ep_data(continuation_ep) + if continuation: + return continuation + + @classmethod + def _extract_alerts(cls, data): + for alert_dict in try_get(data, lambda x: x['alerts'], list) or []: + if not isinstance(alert_dict, dict): + continue + for alert in alert_dict.values(): + alert_type = alert.get('type') + if not alert_type: + continue + message = cls._get_text(alert.get('text')) + if message: + yield alert_type, message + + def _report_alerts(self, alerts, expected=True): + errors = [] + warnings = [] + for alert_type, alert_message in alerts: + if alert_type.lower() == 'error': + errors.append([alert_type, alert_message]) + else: + warnings.append([alert_type, alert_message]) + + for alert_type, alert_message in (warnings + errors[:-1]): + self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message)) + if errors: + raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected) + + def _extract_and_report_alerts(self, data, *args, **kwargs): + return self._report_alerts(self._extract_alerts(data), *args, **kwargs) + + def _extract_badges(self, renderer: dict): + badges = set() + for badge in try_get(renderer, lambda x: x['badges'], list) or []: + label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label'], compat_str) + if label: + badges.add(label.lower()) + return badges + + @staticmethod + def _get_text(data, getter=None, max_runs=None): + for get in variadic(getter): + d = try_get(data, get) if get is not None else data + text = try_get(d, lambda x: x['simpleText'], compat_str) + if text: + return text + runs = try_get(d, lambda x: x['runs'], list) or [] + if not runs and isinstance(d, list): + runs = d + + def get_runs(runs): + for run in runs[:min(len(runs), max_runs or len(runs))]: + yield try_get(run, lambda x: x['text'], compat_str) or '' + + text = ''.join(get_runs(runs)) + if text: + return text + + def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None, + ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None, + default_client='WEB'): + response = None + last_error = None + count = -1 + retries = self.get_param('extractor_retries', 3) + if check_get_keys is None: + check_get_keys = [] + while count < retries: + count += 1 + if last_error: + self.report_warning('%s. Retrying ...' % last_error) + try: + response = self._call_api( + ep=ep, fatal=True, headers=headers, + video_id=item_id, query=query, + context=self._extract_context(ytcfg, default_client), + api_key=self._extract_api_key(ytcfg, default_client), + api_hostname=api_hostname, default_client=default_client, + note='%s%s' % (note, ' (retry #%d)' % count if count else '')) + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404): + # Downloading page may result in intermittent 5xx HTTP error + # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289 + last_error = 'HTTP Error %s' % e.cause.code + if count < retries: + continue + if fatal: + raise + else: + self.report_warning(error_to_compat_str(e)) + return + + else: + # Youtube may send alerts if there was an issue with the continuation page + try: + self._extract_and_report_alerts(response, expected=False) + except ExtractorError as e: + if fatal: + raise + self.report_warning(error_to_compat_str(e)) + return + if not check_get_keys or dict_get(response, check_get_keys): + break + # Youtube sometimes sends incomplete data + # See: https://github.com/ytdl-org/youtube-dl/issues/28194 + last_error = 'Incomplete data received' + if count >= retries: + if fatal: + raise ExtractorError(last_error) + else: + self.report_warning(last_error) + return + return response + + @staticmethod + def is_music_url(url): + return re.match(r'https?://music\.youtube\.com/', url) is not None def _extract_video(self, renderer): video_id = renderer.get('videoId') - title = try_get( - renderer, - (lambda x: x['title']['runs'][0]['text'], - lambda x: x['title']['simpleText']), compat_str) - description = try_get( - renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'], - compat_str) - duration = parse_duration(try_get( - renderer, lambda x: x['lengthText']['simpleText'], compat_str)) - view_count_text = try_get( - renderer, lambda x: x['viewCountText']['simpleText'], compat_str) or '' + title = self._get_text(renderer.get('title')) + description = self._get_text(renderer.get('descriptionSnippet')) + duration = parse_duration(self._get_text(renderer.get('lengthText'))) + view_count_text = self._get_text(renderer.get('viewCountText')) or '' view_count = str_to_int(self._search_regex( r'^([\d,]+)', re.sub(r'\s', '', view_count_text), 'view count', default=None)) - uploader = try_get( - renderer, - (lambda x: x['ownerText']['runs'][0]['text'], - lambda x: x['shortBylineText']['runs'][0]['text']), compat_str) + + uploader = self._get_text(renderer, (lambda x: x['ownerText'], lambda x: x['shortBylineText'])) + return { - '_type': 'url_transparent', + '_type': 'url', 'ie_key': YoutubeIE.ie_key(), 'id': video_id, 'url': video_id, @@ -348,44 +760,50 @@ class YoutubeIE(YoutubeBaseInfoExtractor): r'(?:(?:www|dev)\.)?invidio\.us', # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md r'(?:www\.)?invidious\.pussthecat\.org', - r'(?:www\.)?invidious\.048596\.xyz', r'(?:www\.)?invidious\.zee\.li', - r'(?:www\.)?vid\.puffyan\.us', - r'(?:(?:www|au)\.)?ytprivate\.com', - r'(?:www\.)?invidious\.namazso\.eu', r'(?:www\.)?invidious\.ethibox\.fr', - r'(?:www\.)?inv\.skyn3t\.in', - r'(?:www\.)?invidious\.himiko\.cloud', - r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion', - r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion', r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion', - r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion', # youtube-dl invidious instances list r'(?:(?:www|no)\.)?invidiou\.sh', r'(?:(?:www|fi)\.)?invidious\.snopyta\.org', r'(?:www\.)?invidious\.kabi\.tk', - r'(?:www\.)?invidious\.13ad\.de', r'(?:www\.)?invidious\.mastodon\.host', r'(?:www\.)?invidious\.zapashcanon\.fr', - r'(?:www\.)?invidious\.kavin\.rocks', + r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks', + r'(?:www\.)?invidious\.tinfoil-hat\.net', + r'(?:www\.)?invidious\.himiko\.cloud', + r'(?:www\.)?invidious\.reallyancient\.tech', r'(?:www\.)?invidious\.tube', r'(?:www\.)?invidiou\.site', r'(?:www\.)?invidious\.site', r'(?:www\.)?invidious\.xyz', r'(?:www\.)?invidious\.nixnet\.xyz', + r'(?:www\.)?invidious\.048596\.xyz', r'(?:www\.)?invidious\.drycat\.fr', + r'(?:www\.)?inv\.skyn3t\.in', r'(?:www\.)?tube\.poal\.co', r'(?:www\.)?tube\.connect\.cafe', r'(?:www\.)?vid\.wxzm\.sx', r'(?:www\.)?vid\.mint\.lgbt', + r'(?:www\.)?vid\.puffyan\.us', r'(?:www\.)?yewtu\.be', r'(?:www\.)?yt\.elukerio\.org', r'(?:www\.)?yt\.lelux\.fi', r'(?:www\.)?invidious\.ggc-project\.de', r'(?:www\.)?yt\.maisputain\.ovh', + r'(?:www\.)?ytprivate\.com', + r'(?:www\.)?invidious\.13ad\.de', r'(?:www\.)?invidious\.toot\.koeln', r'(?:www\.)?invidious\.fdn\.fr', r'(?:www\.)?watch\.nettohikari\.com', + r'(?:www\.)?invidious\.namazso\.eu', + r'(?:www\.)?invidious\.silkky\.cloud', + r'(?:www\.)?invidious\.exonip\.de', + r'(?:www\.)?invidious\.riverside\.rocks', + r'(?:www\.)?invidious\.blamefran\.net', + r'(?:www\.)?invidious\.moomoo\.de', + r'(?:www\.)?ytb\.trom\.tf', + r'(?:www\.)?yt\.cyberhost\.uk', r'(?:www\.)?kgg2m7yk5aybusll\.onion', r'(?:www\.)?qklhadlycap4cnod\.onion', r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion', @@ -394,6 +812,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor): r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion', r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p', r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion', + r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion', + r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion', + r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion', + r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion', ) _VALID_URL = r"""(?x)^ ( @@ -425,16 +847,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor): |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId= ) )? # all until now is optional -> you can pass the naked ID - (?P[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID - (?!.*?\blist= - (?: - %(playlist_id)s| # combined list/video URLs are handled by the playlist IE - WL # WL are handled by the watch later IE - ) - ) + (?P[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID (?(1).+)? # if we found the ID, everything can follow - $""" % { - 'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE, + (?:\#|$)""" % { 'invidious': '|'.join(_INVIDIOUS_SITES), } _PLAYER_INFO_RE = ( @@ -549,6 +964,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): } _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt') + _AGE_GATE_REASONS = ( + 'Sign in to confirm your age', + 'This video may be inappropriate for some users.', + 'Sorry, this content is age-restricted.') + _GEO_BYPASS = False IE_NAME = 'youtube' @@ -919,6 +1339,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): }, 'skip': 'This video does not exist.', }, + { + # Video with incomplete 'yt:stretch=16:' + 'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI', + 'only_matching': True, + }, { # Video licensed under Creative Commons 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA', @@ -1195,13 +1620,90 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg', 'only_matching': True, }, + { + # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685 + 'url': 'cBvYw8_A0vQ', + 'info_dict': { + 'id': 'cBvYw8_A0vQ', + 'ext': 'mp4', + 'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き', + 'description': 'md5:ea770e474b7cd6722b4c95b833c03630', + 'upload_date': '20201120', + 'uploader': 'Walk around Japan', + 'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw', + 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw', + }, + 'params': { + 'skip_download': True, + }, + }, { + # Has multiple audio streams + 'url': 'WaOKSUlf4TM', + 'only_matching': True + }, { + # Requires Premium: has format 141 when requested using YTM url + 'url': 'https://music.youtube.com/watch?v=XclachpHxis', + 'only_matching': True + }, { + # multiple subtitles with same lang_code + 'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug', + 'only_matching': True, + }, { + # Force use android client fallback + 'url': 'https://www.youtube.com/watch?v=YOelRv7fMxY', + 'info_dict': { + 'id': 'YOelRv7fMxY', + 'title': 'Digging a Secret Tunnel from my Workshop', + 'ext': '3gp', + 'upload_date': '20210624', + 'channel_id': 'UCp68_FLety0O-n9QU6phsgw', + 'uploader': 'colinfurze', + 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCp68_FLety0O-n9QU6phsgw', + 'description': 'md5:ecb672623246d98c6c562eed6ae798c3' + }, + 'params': { + 'format': '17', # 3gp format available on android + 'extractor_args': {'youtube': {'player_client': ['android']}}, + }, + }, + { + # Skip download of additional client configs (remix client config in this case) + 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs', + 'only_matching': True, + 'params': { + 'extractor_args': {'youtube': {'player_skip': ['configs']}}, + }, + } ] + @classmethod + def suitable(cls, url): + # Hack for lazy extractors until more generic solution is implemented + # (see #28780) + from .youtube import parse_qs + qs = parse_qs(url) + if qs.get('list', [None])[0]: + return False + return super(YoutubeIE, cls).suitable(url) + def __init__(self, *args, **kwargs): super(YoutubeIE, self).__init__(*args, **kwargs) self._code_cache = {} self._player_cache = {} + def _extract_player_url(self, ytcfg=None, webpage=None): + player_url = try_get(ytcfg, (lambda x: x['PLAYER_JS_URL']), str) + if not player_url: + player_url = self._search_regex( + r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"', + webpage, 'player URL', fatal=False) + if player_url.startswith('//'): + player_url = 'https:' + player_url + elif not re.match(r'https?://', player_url): + player_url = compat_urlparse.urljoin( + 'https://www.youtube.com', player_url) + return player_url + def _signature_cache_id(self, example_sig): """ Return a string representation of a signature """ return '.'.join(compat_str(len(part)) for part in example_sig.split('.')) @@ -1216,6 +1718,15 @@ def _extract_player_info(cls, player_url): raise ExtractorError('Cannot identify player %r' % player_url) return id_m.group('id') + def _load_player(self, video_id, player_url, fatal=True) -> bool: + player_id = self._extract_player_info(player_url) + if player_id not in self._code_cache: + self._code_cache[player_id] = self._download_webpage( + player_url, video_id, fatal=fatal, + note='Downloading player ' + player_id, + errnote='Download of %s failed' % player_url) + return player_id in self._code_cache + def _extract_signature_function(self, video_id, player_url, example_sig): player_id = self._extract_player_info(player_url) @@ -1228,20 +1739,16 @@ def _extract_signature_function(self, video_id, player_url, example_sig): if cache_spec is not None: return lambda s: ''.join(s[i] for i in cache_spec) - if player_id not in self._code_cache: - self._code_cache[player_id] = self._download_webpage( - player_url, video_id, - note='Downloading player ' + player_id, - errnote='Download of %s failed' % player_url) - code = self._code_cache[player_id] - res = self._parse_sig_js(code) + if self._load_player(video_id, player_url): + code = self._code_cache[player_id] + res = self._parse_sig_js(code) - test_string = ''.join(map(compat_chr, range(len(example_sig)))) - cache_res = res(test_string) - cache_spec = [ord(c) for c in cache_res] + test_string = ''.join(map(compat_chr, range(len(example_sig)))) + cache_res = res(test_string) + cache_spec = [ord(c) for c in cache_res] - self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec) - return res + self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec) + return res def _print_sig_code(self, func, example_sig): def gen_sig_code(idxs): @@ -1312,11 +1819,6 @@ def _decrypt_signature(self, s, video_id, player_url): if player_url is None: raise ExtractorError('Cannot decrypt signature without player_url') - if player_url.startswith('//'): - player_url = 'https:' + player_url - elif not re.match(r'https?://', player_url): - player_url = compat_urlparse.urljoin( - 'https://www.youtube.com', player_url) try: player_id = (player_url, self._signature_cache_id(s)) if player_id not in self._player_cache: @@ -1325,7 +1827,7 @@ def _decrypt_signature(self, s, video_id, player_url): ) self._player_cache[player_id] = func func = self._player_cache[player_id] - if self._downloader.params.get('youtube_print_sig_code'): + if self.get_param('youtube_print_sig_code'): self._print_sig_code(func, s) return func(s) except Exception as e: @@ -1333,6 +1835,31 @@ def _decrypt_signature(self, s, video_id, player_url): raise ExtractorError( 'Signature extraction failed: ' + tb, cause=e) + def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False): + """ + Extract signatureTimestamp (sts) + Required to tell API what sig/player version is in use. + """ + sts = None + if isinstance(ytcfg, dict): + sts = int_or_none(ytcfg.get('STS')) + + if not sts: + # Attempt to extract from player + if player_url is None: + error_msg = 'Cannot extract signature timestamp without player_url.' + if fatal: + raise ExtractorError(error_msg) + self.report_warning(error_msg) + return + if self._load_player(video_id, player_url, fatal=fatal): + player_id = self._extract_player_info(player_url) + code = self._code_cache[player_id] + sts = int_or_none(self._search_regex( + r'(?:signatureTimestamp|sts)\s*:\s*(?P[0-9]{5})', code, + 'JS player signature timestamp', group='sts', fatal=fatal)) + return sts + def _mark_watched(self, video_id, player_response): playback_url = url_or_none(try_get( player_response, @@ -1448,55 +1975,456 @@ def _extract_yt_initial_variable(self, webpage, regex, video_id, name): (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE), regex), webpage, name, default='{}'), video_id, fatal=False) - def _real_extract(self, url): - url, smuggled_data = unsmuggle_url(url, {}) - video_id = self._match_id(url) - base_url = self.http_scheme() + '//www.youtube.com/' - webpage_url = base_url + 'watch?v=' + video_id - webpage = self._download_webpage( - webpage_url + '&has_verified=1&bpctr=9999999999', - video_id, fatal=False) + @staticmethod + def parse_time_text(time_text): + """ + Parse the comment time text + time_text is in the format 'X units ago (edited)' + """ + time_text_split = time_text.split(' ') + if len(time_text_split) >= 3: + return datetime_from_str('now-%s%s' % (time_text_split[0], time_text_split[1]), precision='auto') - player_response = None - if webpage: - player_response = self._extract_yt_initial_variable( - webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, - video_id, 'initial player response') - if not player_response: - player_response = self._call_api( - 'player', {'videoId': video_id}, video_id) + def _extract_comment(self, comment_renderer, parent=None): + comment_id = comment_renderer.get('commentId') + if not comment_id: + return - playability_status = player_response.get('playabilityStatus') or {} - if playability_status.get('reason') == 'Sign in to confirm your age': - pr = self._parse_json(try_get(compat_parse_qs( - self._download_webpage( - base_url + 'get_video_info', video_id, - 'Refetching age-gated info webpage', - 'unable to download video info webpage', query={ - 'video_id': video_id, - 'eurl': 'https://youtube.googleapis.com/v/' + video_id, - }, fatal=False)), - lambda x: x['player_response'][0], - compat_str) or '{}', video_id) - if pr: - player_response = pr + text = self._get_text(comment_renderer.get('contentText')) + + # note: timestamp is an estimate calculated from the current time and time_text + time_text = self._get_text(comment_renderer.get('publishedTimeText')) or '' + time_text_dt = self.parse_time_text(time_text) + if isinstance(time_text_dt, datetime.datetime): + timestamp = calendar.timegm(time_text_dt.timetuple()) + author = self._get_text(comment_renderer.get('authorText')) + author_id = try_get(comment_renderer, + lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str) + + votes = parse_count(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'], + lambda x: x['likeCount']), compat_str)) or 0 + author_thumbnail = try_get(comment_renderer, + lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str) + + author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool) + is_favorited = 'creatorHeart' in (try_get( + comment_renderer, lambda x: x['actionButtons']['commentActionButtonsRenderer'], dict) or {}) + return { + 'id': comment_id, + 'text': text, + 'timestamp': timestamp, + 'time_text': time_text, + 'like_count': votes, + 'is_favorited': is_favorited, + 'author': author, + 'author_id': author_id, + 'author_thumbnail': author_thumbnail, + 'author_is_uploader': author_is_uploader, + 'parent': parent or 'root' + } - trailer_video_id = try_get( - playability_status, - lambda x: x['errorScreen']['playerLegacyDesktopYpcTrailerRenderer']['trailerVideoId'], - compat_str) - if trailer_video_id: - return self.url_result( - trailer_video_id, self.ie_key(), trailer_video_id) + def _comment_entries(self, root_continuation_data, identity_token, account_syncid, + ytcfg, video_id, parent=None, comment_counts=None): - def get_text(x): - if not x: - return - return x.get('simpleText') or ''.join([r['text'] for r in x['runs']]) + def extract_header(contents): + _total_comments = 0 + _continuation = None + for content in contents: + comments_header_renderer = try_get(content, lambda x: x['commentsHeaderRenderer']) + expected_comment_count = parse_count(self._get_text( + comments_header_renderer, (lambda x: x['countText'], lambda x: x['commentsCount']), max_runs=1)) + + if expected_comment_count: + comment_counts[1] = expected_comment_count + self.to_screen('Downloading ~%d comments' % expected_comment_count) + _total_comments = comment_counts[1] + sort_mode_str = self._configuration_arg('comment_sort', [''])[0] + comment_sort_index = int(sort_mode_str != 'top') # 1 = new, 0 = top + + sort_menu_item = try_get( + comments_header_renderer, + lambda x: x['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'][comment_sort_index], dict) or {} + sort_continuation_ep = sort_menu_item.get('serviceEndpoint') or {} + + _continuation = self._extract_continuation_ep_data(sort_continuation_ep) or self._extract_continuation(sort_menu_item) + if not _continuation: + continue - search_meta = ( - lambda x: self._html_search_meta(x, webpage, default=None)) \ - if webpage else lambda x: None + sort_text = sort_menu_item.get('title') + if isinstance(sort_text, compat_str): + sort_text = sort_text.lower() + else: + sort_text = 'top comments' if comment_sort_index == 0 else 'newest first' + self.to_screen('Sorting comments by %s' % sort_text) + break + return _total_comments, _continuation + + def extract_thread(contents): + if not parent: + comment_counts[2] = 0 + for content in contents: + comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer']) + comment_renderer = try_get( + comment_thread_renderer, (lambda x: x['comment']['commentRenderer'], dict)) or try_get( + content, (lambda x: x['commentRenderer'], dict)) + + if not comment_renderer: + continue + comment = self._extract_comment(comment_renderer, parent) + if not comment: + continue + comment_counts[0] += 1 + yield comment + # Attempt to get the replies + comment_replies_renderer = try_get( + comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict) + + if comment_replies_renderer: + comment_counts[2] += 1 + comment_entries_iter = self._comment_entries( + comment_replies_renderer, identity_token, account_syncid, ytcfg, + video_id, parent=comment.get('id'), comment_counts=comment_counts) + + for reply_comment in comment_entries_iter: + yield reply_comment + + # YouTube comments have a max depth of 2 + max_depth = int_or_none(self._configuration_arg('max_comment_depth', [''])[0]) or float('inf') + if max_depth == 1 and parent: + return + if not comment_counts: + # comment so far, est. total comments, current comment thread # + comment_counts = [0, 0, 0] + + continuation = self._extract_continuation(root_continuation_data) + if continuation and len(continuation['continuation']) < 27: + self.write_debug('Detected old API continuation token. Generating new API compatible token.') + continuation_token = self._generate_comment_continuation(video_id) + continuation = self._build_api_continuation_query(continuation_token, None) + + visitor_data = None + is_first_continuation = parent is None + + for page_num in itertools.count(0): + if not continuation: + break + headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data) + comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1]) + if page_num == 0: + if is_first_continuation: + note_prefix = 'Downloading comment section API JSON' + else: + note_prefix = ' Downloading comment API JSON reply thread %d %s' % ( + comment_counts[2], comment_prog_str) + else: + note_prefix = '%sDownloading comment%s API JSON page %d %s' % ( + ' ' if parent else '', ' replies' if parent else '', + page_num, comment_prog_str) + + response = self._extract_response( + item_id=None, query=continuation, + ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix, + check_get_keys=('onResponseReceivedEndpoints', 'continuationContents')) + if not response: + break + visitor_data = try_get( + response, + lambda x: x['responseContext']['webResponseContextExtensionData']['ytConfigData']['visitorData'], + compat_str) or visitor_data + + continuation_contents = dict_get(response, ('onResponseReceivedEndpoints', 'continuationContents')) + + continuation = None + if isinstance(continuation_contents, list): + for continuation_section in continuation_contents: + if not isinstance(continuation_section, dict): + continue + continuation_items = try_get( + continuation_section, + (lambda x: x['reloadContinuationItemsCommand']['continuationItems'], + lambda x: x['appendContinuationItemsAction']['continuationItems']), + list) or [] + if is_first_continuation: + total_comments, continuation = extract_header(continuation_items) + if total_comments: + yield total_comments + is_first_continuation = False + if continuation: + break + continue + count = 0 + for count, entry in enumerate(extract_thread(continuation_items)): + yield entry + continuation = self._extract_continuation({'contents': continuation_items}) + if continuation: + # Sometimes YouTube provides a continuation without any comments + # In most cases we end up just downloading these with very little comments to come. + if count == 0: + if not parent: + self.report_warning('No comments received - assuming end of comments') + continuation = None + break + + # Deprecated response structure + elif isinstance(continuation_contents, dict): + known_continuation_renderers = ('itemSectionContinuation', 'commentRepliesContinuation') + for key, continuation_renderer in continuation_contents.items(): + if key not in known_continuation_renderers: + continue + if not isinstance(continuation_renderer, dict): + continue + if is_first_continuation: + header_continuation_items = [continuation_renderer.get('header') or {}] + total_comments, continuation = extract_header(header_continuation_items) + if total_comments: + yield total_comments + is_first_continuation = False + if continuation: + break + + # Sometimes YouTube provides a continuation without any comments + # In most cases we end up just downloading these with very little comments to come. + count = 0 + for count, entry in enumerate(extract_thread(continuation_renderer.get('contents') or {})): + yield entry + continuation = self._extract_continuation(continuation_renderer) + if count == 0: + if not parent: + self.report_warning('No comments received - assuming end of comments') + continuation = None + break + + @staticmethod + def _generate_comment_continuation(video_id): + """ + Generates initial comment section continuation token from given video id + """ + b64_vid_id = base64.b64encode(bytes(video_id.encode('utf-8'))) + parts = ('Eg0SCw==', b64_vid_id, 'GAYyJyIRIgs=', b64_vid_id, 'MAB4AjAAQhBjb21tZW50cy1zZWN0aW9u') + new_continuation_intlist = list(itertools.chain.from_iterable( + [bytes_to_intlist(base64.b64decode(part)) for part in parts])) + return base64.b64encode(intlist_to_bytes(new_continuation_intlist)).decode('utf-8') + + def _extract_comments(self, ytcfg, video_id, contents, webpage): + """Entry for comment extraction""" + def _real_comment_extract(contents): + if isinstance(contents, list): + for entry in contents: + for key, renderer in entry.items(): + if key not in known_entry_comment_renderers: + continue + yield from self._comment_entries( + renderer, video_id=video_id, ytcfg=ytcfg, + identity_token=self._extract_identity_token(webpage, item_id=video_id), + account_syncid=self._extract_account_syncid(ytcfg)) + break + comments = [] + known_entry_comment_renderers = ('itemSectionRenderer',) + estimated_total = 0 + max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0]) or float('inf') + + try: + for comment in _real_comment_extract(contents): + if len(comments) >= max_comments: + break + if isinstance(comment, int): + estimated_total = comment + continue + comments.append(comment) + except KeyboardInterrupt: + self.to_screen('Interrupted by user') + self.to_screen('Downloaded %d/%d comments' % (len(comments), estimated_total)) + return { + 'comments': comments, + 'comment_count': len(comments), + } + + @staticmethod + def _generate_player_context(sts=None): + context = { + 'html5Preference': 'HTML5_PREF_WANTS', + } + if sts is not None: + context['signatureTimestamp'] = sts + return { + 'playbackContext': { + 'contentPlaybackContext': context + } + } + + @staticmethod + def _get_video_info_params(video_id, client='TVHTML5'): + GVI_CLIENTS = { + 'ANDROID': { + 'c': 'ANDROID', + 'cver': '16.20', + }, + 'TVHTML5': { + 'c': 'TVHTML5', + 'cver': '6.20180913', + } + } + query = { + 'video_id': video_id, + 'eurl': 'https://youtube.googleapis.com/v/' + video_id, + 'html5': '1' + } + query.update(GVI_CLIENTS.get(client)) + return query + + def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) + video_id = self._match_id(url) + + is_music_url = smuggled_data.get('is_music_url') or self.is_music_url(url) + + base_url = self.http_scheme() + '//www.youtube.com/' + webpage_url = base_url + 'watch?v=' + video_id + webpage = self._download_webpage( + webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False) + + ytcfg = self._extract_ytcfg(video_id, webpage) or self._get_default_ytcfg() + identity_token = self._extract_identity_token(webpage, video_id) + session_index = self._extract_session_index(ytcfg) + player_url = self._extract_player_url(ytcfg, webpage) + + player_client = self._configuration_arg('player_client', [''])[0] + if player_client not in ('web', 'android', ''): + self.report_warning(f'Invalid player_client {player_client} given. Falling back to android client.') + force_mobile_client = player_client != 'web' + player_skip = self._configuration_arg('player_skip') + player_response = None + if webpage: + player_response = self._extract_yt_initial_variable( + webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, + video_id, 'initial player response') + + syncid = self._extract_account_syncid(ytcfg, player_response) + headers = self._generate_api_headers(ytcfg, identity_token, syncid, session_index=session_index) + + ytm_streaming_data = {} + if is_music_url: + ytm_webpage = None + sts = self._extract_signature_timestamp(video_id, player_url, ytcfg, fatal=False) + if sts and not force_mobile_client and 'configs' not in player_skip: + ytm_webpage = self._download_webpage( + 'https://music.youtube.com', + video_id, fatal=False, note='Downloading remix client config') + + ytm_cfg = self._extract_ytcfg(video_id, ytm_webpage) or {} + ytm_client = 'WEB_REMIX' + if not sts or force_mobile_client: + # Android client already has signature descrambled + # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/562 + if not sts: + self.report_warning('Falling back to android remix client for player API.') + ytm_client = 'ANDROID_MUSIC' + ytm_cfg = {} + + ytm_headers = self._generate_api_headers( + ytm_cfg, identity_token, syncid, + client=ytm_client, session_index=session_index) + ytm_query = {'videoId': video_id} + ytm_query.update(self._generate_player_context(sts)) + + ytm_player_response = self._extract_response( + item_id=video_id, ep='player', query=ytm_query, + ytcfg=ytm_cfg, headers=ytm_headers, fatal=False, + default_client=ytm_client, + note='Downloading %sremix player API JSON' % ('android ' if force_mobile_client else '')) + ytm_streaming_data = try_get(ytm_player_response, lambda x: x['streamingData'], dict) or {} + + if not player_response or force_mobile_client: + sts = self._extract_signature_timestamp(video_id, player_url, ytcfg, fatal=False) + yt_client = 'WEB' + ytpcfg = ytcfg + ytp_headers = headers + if not sts or force_mobile_client: + # Android client already has signature descrambled + # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/562 + if not sts: + self.report_warning('Falling back to android client for player API.') + yt_client = 'ANDROID' + ytpcfg = {} + ytp_headers = self._generate_api_headers(ytpcfg, identity_token, syncid, + client=yt_client, session_index=session_index) + + yt_query = {'videoId': video_id} + yt_query.update(self._generate_player_context(sts)) + player_response = self._extract_response( + item_id=video_id, ep='player', query=yt_query, + ytcfg=ytpcfg, headers=ytp_headers, fatal=False, + default_client=yt_client, + note='Downloading %splayer API JSON' % ('android ' if force_mobile_client else '') + ) or player_response + + # Age-gate workarounds + playability_status = player_response.get('playabilityStatus') or {} + if playability_status.get('reason') in self._AGE_GATE_REASONS: + gvi_clients = ('ANDROID', 'TVHTML5') if force_mobile_client else ('TVHTML5', 'ANDROID') + for gvi_client in gvi_clients: + pr = self._parse_json(try_get(compat_parse_qs( + self._download_webpage( + base_url + 'get_video_info', video_id, + 'Refetching age-gated %s info webpage' % gvi_client.lower(), + 'unable to download video info webpage', fatal=False, + query=self._get_video_info_params(video_id, client=gvi_client))), + lambda x: x['player_response'][0], + compat_str) or '{}', video_id) + if pr: + break + if not pr: + self.report_warning('Falling back to embedded-only age-gate workaround.') + embed_webpage = None + sts = self._extract_signature_timestamp(video_id, player_url, ytcfg, fatal=False) + if sts and not force_mobile_client and 'configs' not in player_skip: + embed_webpage = self._download_webpage( + 'https://www.youtube.com/embed/%s?html5=1' % video_id, + video_id=video_id, note='Downloading age-gated embed config') + + ytcfg_age = self._extract_ytcfg(video_id, embed_webpage) or {} + # If we extracted the embed webpage, it'll tell us if we can view the video + embedded_pr = self._parse_json( + try_get(ytcfg_age, lambda x: x['PLAYER_VARS']['embedded_player_response'], str) or '{}', + video_id=video_id) + embedded_ps_reason = try_get(embedded_pr, lambda x: x['playabilityStatus']['reason'], str) or '' + if embedded_ps_reason not in self._AGE_GATE_REASONS: + yt_client = 'WEB_EMBEDDED_PLAYER' + if not sts or force_mobile_client: + # Android client already has signature descrambled + # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/562 + if not sts: + self.report_warning( + 'Falling back to android embedded client for player API (note: some formats may be missing).') + yt_client = 'ANDROID_EMBEDDED_PLAYER' + ytcfg_age = {} + + ytage_headers = self._generate_api_headers( + ytcfg_age, identity_token, syncid, + client=yt_client, session_index=session_index) + yt_age_query = {'videoId': video_id} + yt_age_query.update(self._generate_player_context(sts)) + pr = self._extract_response( + item_id=video_id, ep='player', query=yt_age_query, + ytcfg=ytcfg_age, headers=ytage_headers, fatal=False, + default_client=yt_client, + note='Downloading %sage-gated player API JSON' % ('android ' if force_mobile_client else '') + ) or {} + + if pr: + player_response = pr + + trailer_video_id = try_get( + playability_status, + lambda x: x['errorScreen']['playerLegacyDesktopYpcTrailerRenderer']['trailerVideoId'], + compat_str) + if trailer_video_id: + return self.url_result( + trailer_video_id, self.ie_key(), trailer_video_id) + + search_meta = ( + lambda x: self._html_search_meta(x, webpage, default=None)) \ + if webpage else lambda x: None video_details = player_response.get('videoDetails') or {} microformat = try_get( @@ -1504,12 +2432,12 @@ def get_text(x): lambda x: x['microformat']['playerMicroformatRenderer'], dict) or {} video_title = video_details.get('title') \ - or get_text(microformat.get('title')) \ + or self._get_text(microformat.get('title')) \ or search_meta(['og:title', 'twitter:title', 'title']) video_description = video_details.get('shortDescription') if not smuggled_data.get('force_singlefeed', False): - if not self._downloader.params.get('noplaylist'): + if not self.get_param('noplaylist'): multifeed_metadata_list = try_get( player_response, lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'], @@ -1552,20 +2480,34 @@ def feed_entry(name): else: self.to_screen('Downloading just video %s because of --no-playlist' % video_id) - formats = [] - itags = [] + formats, itags, stream_ids = [], [], [] itag_qualities = {} - player_url = None - q = qualities(['tiny', 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres']) + q = qualities([ + # "tiny" is the smallest video-only format. But some audio-only formats + # was also labeled "tiny". It is not clear if such formats still exist + 'tiny', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats + 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres' + ]) + streaming_data = player_response.get('streamingData') or {} streaming_formats = streaming_data.get('formats') or [] streaming_formats.extend(streaming_data.get('adaptiveFormats') or []) + streaming_formats.extend(ytm_streaming_data.get('formats') or []) + streaming_formats.extend(ytm_streaming_data.get('adaptiveFormats') or []) + for fmt in streaming_formats: if fmt.get('targetDurationSec') or fmt.get('drmFamilies'): continue itag = str_or_none(fmt.get('itag')) + audio_track = fmt.get('audioTrack') or {} + stream_id = '%s.%s' % (itag or '', audio_track.get('id', '')) + if stream_id in stream_ids: + continue + quality = fmt.get('quality') + if quality == 'tiny' or not quality: + quality = fmt.get('audioQuality', '').lower() or quality if itag and quality: itag_qualities[itag] = quality # FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment @@ -1581,12 +2523,6 @@ def feed_entry(name): encrypted_sig = try_get(sc, lambda x: x['s'][0]) if not (sc and fmt_url and encrypted_sig): continue - if not player_url: - if not webpage: - continue - player_url = self._search_regex( - r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"', - webpage, 'player URL', fatal=False) if not player_url: continue signature = self._decrypt_signature(sc['s'][0], video_id, player_url) @@ -1595,27 +2531,32 @@ def feed_entry(name): if itag: itags.append(itag) + stream_ids.append(stream_id) + tbr = float_or_none( fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) dct = { 'asr': int_or_none(fmt.get('audioSampleRate')), 'filesize': int_or_none(fmt.get('contentLength')), 'format_id': itag, - 'format_note': fmt.get('qualityLabel') or quality, + 'format_note': audio_track.get('displayName') or fmt.get('qualityLabel') or quality, 'fps': int_or_none(fmt.get('fps')), 'height': int_or_none(fmt.get('height')), 'quality': q(quality), 'tbr': tbr, 'url': fmt_url, 'width': fmt.get('width'), + 'language': audio_track.get('id', '').split('.')[0], } - mimetype = fmt.get('mimeType') - if mimetype: - mobj = re.match( - r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', mimetype) - if mobj: - dct['ext'] = mimetype2ext(mobj.group(1)) - dct.update(parse_codecs(mobj.group(2))) + mime_mobj = re.match( + r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '') + if mime_mobj: + dct['ext'] = mimetype2ext(mime_mobj.group(1)) + dct.update(parse_codecs(mime_mobj.group(2))) + # The 3gp format in android client has a quality of "small", + # but is actually worse than all other formats + if dct['ext'] == '3gp': + dct['quality'] = q('tiny') no_audio = dct.get('acodec') == 'none' no_video = dct.get('vcodec') == 'none' if no_audio: @@ -1631,18 +2572,22 @@ def feed_entry(name): dct['container'] = dct['ext'] + '_dash' formats.append(dct) - hls_manifest_url = streaming_data.get('hlsManifestUrl') - if hls_manifest_url: - for f in self._extract_m3u8_formats( - hls_manifest_url, video_id, 'mp4', fatal=False): - itag = self._search_regex( - r'/itag/(\d+)', f['url'], 'itag', default=None) - if itag: - f['format_id'] = itag - formats.append(f) - - if self._downloader.params.get('youtube_include_dash_manifest'): - dash_manifest_url = streaming_data.get('dashManifestUrl') + skip_manifests = self._configuration_arg('skip') + get_dash = 'dash' not in skip_manifests and self.get_param('youtube_include_dash_manifest', True) + get_hls = 'hls' not in skip_manifests and self.get_param('youtube_include_hls_manifest', True) + + for sd in (streaming_data, ytm_streaming_data): + hls_manifest_url = get_hls and sd.get('hlsManifestUrl') + if hls_manifest_url: + for f in self._extract_m3u8_formats( + hls_manifest_url, video_id, 'mp4', fatal=False): + itag = self._search_regex( + r'/itag/(\d+)', f['url'], 'itag', default=None) + if itag: + f['format_id'] = itag + formats.append(f) + + dash_manifest_url = get_dash and sd.get('dashManifestUrl') if dash_manifest_url: for f in self._extract_mpd_formats( dash_manifest_url, video_id, fatal=False): @@ -1650,9 +2595,6 @@ def feed_entry(name): if itag in itags: continue if itag in itag_qualities: - # Not actually usefull since the sorting is already done with "quality,res,fps,codec" - # but kept to maintain feature parity (and code similarity) with youtube-dl - # Remove if this causes any issues with sorting in future f['quality'] = q(itag_qualities[itag]) filesize = int_or_none(self._search_regex( r'/clen/(\d+)', f.get('fragment_base_url') @@ -1662,27 +2604,26 @@ def feed_entry(name): formats.append(f) if not formats: - if not self._downloader.params.get('allow_unplayable_formats') and streaming_data.get('licenseInfos'): - raise ExtractorError( + if not self.get_param('allow_unplayable_formats') and streaming_data.get('licenseInfos'): + self.raise_no_formats( 'This video is DRM protected.', expected=True) pemr = try_get( playability_status, lambda x: x['errorScreen']['playerErrorMessageRenderer'], dict) or {} - reason = get_text(pemr.get('reason')) or playability_status.get('reason') + reason = self._get_text(pemr.get('reason')) or playability_status.get('reason') subreason = pemr.get('subreason') if subreason: - subreason = clean_html(get_text(subreason)) + subreason = clean_html(self._get_text(subreason)) if subreason == 'The uploader has not made this video available in your country.': countries = microformat.get('availableCountries') if not countries: regions_allowed = search_meta('regionsAllowed') countries = regions_allowed.split(',') if regions_allowed else None - self.raise_geo_restricted( - subreason, countries) + self.raise_geo_restricted(subreason, countries, metadata_available=True) reason += '\n' + subreason if reason: - raise ExtractorError(reason, expected=True) + self.raise_no_formats(reason, expected=True) self._sort_formats(formats) @@ -1693,13 +2634,16 @@ def feed_entry(name): for m in re.finditer(self._meta_regex('og:video:tag'), webpage)] for keyword in keywords: if keyword.startswith('yt:stretch='): - w, h = keyword.split('=')[1].split(':') - w, h = int(w), int(h) - if w > 0 and h > 0: - ratio = w / h - for f in formats: - if f.get('vcodec') != 'none': - f['stretched_ratio'] = ratio + mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword) + if mobj: + # NB: float is intentional for forcing float division + w, h = (float(v) for v in mobj.groups()) + if w > 0 and h > 0: + ratio = w / h + for f in formats: + if f.get('vcodec') != 'none': + f['stretched_ratio'] = ratio + break thumbnails = [] for container in (video_details, microformat): @@ -1709,17 +2653,30 @@ def feed_entry(name): thumbnail_url = thumbnail.get('url') if not thumbnail_url: continue + # Sometimes youtube gives a wrong thumbnail URL. See: + # https://github.com/yt-dlp/yt-dlp/issues/233 + # https://github.com/ytdl-org/youtube-dl/issues/28023 + if 'maxresdefault' in thumbnail_url: + thumbnail_url = thumbnail_url.split('?')[0] thumbnails.append({ - 'height': int_or_none(thumbnail.get('height')), 'url': thumbnail_url, + 'height': int_or_none(thumbnail.get('height')), 'width': int_or_none(thumbnail.get('width')), + 'preference': 1 if 'maxresdefault' in thumbnail_url else -1 }) - if thumbnails: - break - else: - thumbnail = search_meta(['og:image', 'twitter:image']) - if thumbnail: - thumbnails = [{'url': thumbnail}] + thumbnail_url = search_meta(['og:image', 'twitter:image']) + if thumbnail_url: + thumbnails.append({ + 'url': thumbnail_url, + 'preference': 1 if 'maxresdefault' in thumbnail_url else -1 + }) + # All videos have a maxresdefault thumbnail, but sometimes it does not appear in the webpage + # See: https://github.com/ytdl-org/youtube-dl/issues/29049 + thumbnails.append({ + 'url': 'https://i.ytimg.com/vi/%s/maxresdefault.jpg' % video_id, + 'preference': 1, + }) + self._remove_duplicate_formats(thumbnails) category = microformat.get('category') or search_meta('genre') channel_id = video_details.get('channelId') \ @@ -1730,6 +2687,7 @@ def feed_entry(name): or microformat.get('lengthSeconds')) \ or parse_duration(search_meta('duration')) is_live = video_details.get('isLive') + is_upcoming = video_details.get('isUpcoming') owner_profile_url = microformat.get('ownerProfileUrl') info = { @@ -1761,7 +2719,7 @@ def feed_entry(name): 'tags': keywords, 'is_live': is_live, 'playable_in_embed': playability_status.get('playableInEmbed'), - 'was_live': video_details.get('isLiveContent') + 'was_live': video_details.get('isLiveContent'), } pctr = try_get( @@ -1769,8 +2727,8 @@ def feed_entry(name): lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict) subtitles = {} if pctr: - def process_language(container, base_url, lang_code, query): - lang_subs = [] + def process_language(container, base_url, lang_code, sub_name, query): + lang_subs = container.setdefault(lang_code, []) for fmt in self._SUBTITLE_FORMATS: query.update({ 'fmt': fmt, @@ -1778,19 +2736,23 @@ def process_language(container, base_url, lang_code, query): lang_subs.append({ 'ext': fmt, 'url': update_url_query(base_url, query), + 'name': sub_name, }) - container[lang_code] = lang_subs for caption_track in (pctr.get('captionTracks') or []): base_url = caption_track.get('baseUrl') if not base_url: continue if caption_track.get('kind') != 'asr': - lang_code = caption_track.get('languageCode') + lang_code = ( + remove_start(caption_track.get('vssId') or '', '.').replace('.', '-') + or caption_track.get('languageCode')) if not lang_code: continue process_language( - subtitles, base_url, lang_code, {}) + subtitles, base_url, lang_code, + try_get(caption_track, lambda x: x['name']['simpleText']), + {}) continue automatic_captions = {} for translation_language in (pctr.get('translationLanguages') or []): @@ -1799,6 +2761,7 @@ def process_language(container, base_url, lang_code, query): continue process_language( automatic_captions, base_url, translation_language_code, + self._get_text(translation_language.get('languageName'), max_runs=1), {'tlang': translation_language_code}) info['automatic_captions'] = automatic_captions info['subtitles'] = subtitles @@ -1836,20 +2799,22 @@ def process_language(container, base_url, lang_code, query): webpage, self._YT_INITIAL_DATA_RE, video_id, 'yt initial data') if not initial_data: - initial_data = self._call_api( - 'next', {'videoId': video_id}, video_id, fatal=False) + initial_data = self._extract_response( + item_id=video_id, ep='next', fatal=False, + ytcfg=ytcfg, headers=headers, query={'videoId': video_id}, + note='Downloading initial data API JSON') - if not is_live: - try: - # This will error if there is no livechat - initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation'] - info['subtitles']['live_chat'] = [{ - 'video_id': video_id, - 'ext': 'json', - 'protocol': 'youtube_live_chat_replay', - }] - except (KeyError, IndexError, TypeError): - pass + try: + # This will error if there is no livechat + initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation'] + info['subtitles']['live_chat'] = [{ + 'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies + 'video_id': video_id, + 'ext': 'json', + 'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay', + }] + except (KeyError, IndexError, TypeError): + pass if initial_data: chapters = self._extract_chapters_from_json( @@ -1864,7 +2829,7 @@ def process_language(container, base_url, lang_code, query): def chapter_time(mmlir): return parse_duration( - get_text(mmlir.get('timeDescription'))) + self._get_text(mmlir.get('timeDescription'))) chapters = [] for next_num, content in enumerate(contents, start=1): @@ -1878,7 +2843,7 @@ def chapter_time(mmlir): chapters.append({ 'start_time': start_time, 'end_time': end_time, - 'title': get_text(mmlir.get('title')), + 'title': self._get_text(mmlir.get('title')), }) if chapters: break @@ -1894,7 +2859,7 @@ def chapter_time(mmlir): if vpir: stl = vpir.get('superTitleLink') if stl: - stl = get_text(stl) + stl = self._get_text(stl) if try_get( vpir, lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN': @@ -1934,10 +2899,10 @@ def chapter_time(mmlir): }) vsir = content.get('videoSecondaryInfoRenderer') if vsir: - info['channel'] = get_text(try_get( + info['channel'] = self._get_text(try_get( vsir, lambda x: x['owner']['videoOwnerRenderer']['title'], - compat_str)) + dict)) rows = try_get( vsir, lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'], @@ -1952,8 +2917,8 @@ def chapter_time(mmlir): mrr_title = mrr.get('title') if not mrr_title: continue - mrr_title = get_text(mrr['title']) - mrr_contents_text = get_text(mrr['contents'][0]) + mrr_title = self._get_text(mrr['title']) + mrr_contents_text = self._get_text(mrr['contents'][0]) if mrr_title == 'License': info['license'] = mrr_contents_text elif not multiple_songs: @@ -1978,9 +2943,37 @@ def chapter_time(mmlir): if v: info[d_k] = v + is_private = bool_or_none(video_details.get('isPrivate')) + is_unlisted = bool_or_none(microformat.get('isUnlisted')) + is_membersonly = None + is_premium = None + if initial_data and is_private is not None: + is_membersonly = False + is_premium = False + contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or [] + badge_labels = set() + for content in contents: + if not isinstance(content, dict): + continue + badge_labels.update(self._extract_badges(content.get('videoPrimaryInfoRenderer'))) + for badge_label in badge_labels: + if badge_label.lower() == 'members only': + is_membersonly = True + elif badge_label.lower() == 'premium': + is_premium = True + elif badge_label.lower() == 'unlisted': + is_unlisted = True + + info['availability'] = self._availability( + is_private=is_private, + needs_premium=is_premium, + needs_subscription=is_membersonly, + needs_auth=info['age_limit'] >= 18, + is_unlisted=None if is_private is None else is_unlisted) + # get xsrf for annotations or comments - get_annotations = self._downloader.params.get('writeannotations', False) - get_comments = self._downloader.params.get('getcomments', False) + get_annotations = self.get_param('writeannotations', False) + get_comments = self.get_param('getcomments', False) if get_annotations or get_comments: xsrf_token = None ytcfg = self._extract_ytcfg(video_id, webpage) @@ -2010,156 +3003,8 @@ def chapter_time(mmlir): errnote='Unable to download video annotations', fatal=False, data=urlencode_postdata({xsrf_field_name: xsrf_token})) - # Get comments - # TODO: Refactor and move to seperate function - def extract_comments(): - expected_video_comment_count = 0 - video_comments = [] - comment_xsrf = xsrf_token - - def find_value(html, key, num_chars=2, separator='"'): - pos_begin = html.find(key) + len(key) + num_chars - pos_end = html.find(separator, pos_begin) - return html[pos_begin: pos_end] - - def search_dict(partial, key): - if isinstance(partial, dict): - for k, v in partial.items(): - if k == key: - yield v - else: - for o in search_dict(v, key): - yield o - elif isinstance(partial, list): - for i in partial: - for o in search_dict(i, key): - yield o - - continuations = [] - if initial_data: - try: - ncd = next(search_dict(initial_data, 'nextContinuationData')) - continuations = [ncd['continuation']] - # Handle videos where comments have been disabled entirely - except StopIteration: - pass - - def get_continuation(continuation, session_token, replies=False): - query = { - 'pbj': 1, - 'ctoken': continuation, - } - if replies: - query['action_get_comment_replies'] = 1 - else: - query['action_get_comments'] = 1 - - while True: - content, handle = self._download_webpage_handle( - 'https://www.youtube.com/comment_service_ajax', - video_id, - note=False, - expected_status=[413], - data=urlencode_postdata({ - 'session_token': session_token - }), - query=query, - headers={ - 'Accept': '*/*', - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0', - 'X-YouTube-Client-Name': '1', - 'X-YouTube-Client-Version': '2.20201202.06.01' - } - ) - - response_code = handle.getcode() - if (response_code == 200): - return self._parse_json(content, video_id) - if (response_code == 413): - return None - raise ExtractorError('Unexpected HTTP error code: %s' % response_code) - - first_continuation = True - chain_msg = '' - self.to_screen('Downloading comments') - while continuations: - continuation = continuations.pop() - comment_response = get_continuation(continuation, comment_xsrf) - if not comment_response: - continue - if list(search_dict(comment_response, 'externalErrorMessage')): - raise ExtractorError('Error returned from server: ' + next(search_dict(comment_response, 'externalErrorMessage'))) - - if 'continuationContents' not in comment_response['response']: - # Something is wrong here. Youtube won't accept this continuation token for some reason and responds with a user satisfaction dialog (error?) - continue - # not sure if this actually helps - if 'xsrf_token' in comment_response: - comment_xsrf = comment_response['xsrf_token'] - - item_section = comment_response['response']['continuationContents']['itemSectionContinuation'] - if first_continuation: - expected_video_comment_count = int(item_section['header']['commentsHeaderRenderer']['countText']['runs'][0]['text'].replace(' Comments', '').replace('1 Comment', '1').replace(',', '')) - first_continuation = False - if 'contents' not in item_section: - # continuation returned no comments? - # set an empty array as to not break the for loop - item_section['contents'] = [] - - for meta_comment in item_section['contents']: - comment = meta_comment['commentThreadRenderer']['comment']['commentRenderer'] - video_comments.append({ - 'id': comment['commentId'], - 'text': ''.join([c['text'] for c in try_get(comment, lambda x: x['contentText']['runs'], list) or []]), - 'time_text': ''.join([c['text'] for c in comment['publishedTimeText']['runs']]), - 'author': comment.get('authorText', {}).get('simpleText', ''), - 'votes': comment.get('voteCount', {}).get('simpleText', '0'), - 'author_thumbnail': comment['authorThumbnail']['thumbnails'][-1]['url'], - 'parent': 'root' - }) - if 'replies' not in meta_comment['commentThreadRenderer']: - continue - - reply_continuations = [rcn['nextContinuationData']['continuation'] for rcn in meta_comment['commentThreadRenderer']['replies']['commentRepliesRenderer']['continuations']] - while reply_continuations: - time.sleep(1) - continuation = reply_continuations.pop() - replies_data = get_continuation(continuation, comment_xsrf, True) - if not replies_data or 'continuationContents' not in replies_data[1]['response']: - continue - - if self._downloader.params.get('verbose', False): - chain_msg = ' (chain %s)' % comment['commentId'] - self.to_screen('Comments downloaded: %d of ~%d%s' % (len(video_comments), expected_video_comment_count, chain_msg)) - reply_comment_meta = replies_data[1]['response']['continuationContents']['commentRepliesContinuation'] - for reply_meta in reply_comment_meta.get('contents', {}): - reply_comment = reply_meta['commentRenderer'] - video_comments.append({ - 'id': reply_comment['commentId'], - 'text': ''.join([c['text'] for c in reply_comment['contentText']['runs']]), - 'time_text': ''.join([c['text'] for c in reply_comment['publishedTimeText']['runs']]), - 'author': reply_comment.get('authorText', {}).get('simpleText', ''), - 'votes': reply_comment.get('voteCount', {}).get('simpleText', '0'), - 'author_thumbnail': reply_comment['authorThumbnail']['thumbnails'][-1]['url'], - 'parent': comment['commentId'] - }) - if 'continuations' not in reply_comment_meta or len(reply_comment_meta['continuations']) == 0: - continue - reply_continuations += [rcn['nextContinuationData']['continuation'] for rcn in reply_comment_meta['continuations']] - - self.to_screen('Comments downloaded: %d of ~%d' % (len(video_comments), expected_video_comment_count)) - if 'continuations' in item_section: - continuations += [ncd['nextContinuationData']['continuation'] for ncd in item_section['continuations']] - time.sleep(1) - - self.to_screen('Total comments downloaded: %d of ~%d' % (len(video_comments), expected_video_comment_count)) - return { - 'comments': video_comments, - 'comment_count': expected_video_comment_count - } - if get_comments: - info['__post_extractor'] = extract_comments + info['__post_extractor'] = lambda: self._extract_comments(ytcfg, video_id, contents, webpage) self.mark_watched(video_id, player_response) @@ -2176,7 +3021,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): invidio\.us )/ (?: - (?:channel|c|user)/| + (?Pchannel|c|user|browse)/| (?P feed/|hashtag/| (?:playlist|watch)\?.*?\blist= @@ -2188,7 +3033,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): IE_NAME = 'youtube:tab' _TESTS = [{ - # playlists, multipage + 'note': 'playlists, multipage', 'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid', 'playlist_mincount': 94, 'info_dict': { @@ -2199,7 +3044,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg', }, }, { - # playlists, multipage, different order + 'note': 'playlists, multipage, different order', 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd', 'playlist_mincount': 94, 'info_dict': { @@ -2210,7 +3055,18 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'uploader': 'Игорь Клейнер', }, }, { - # playlists, singlepage + 'note': 'playlists, series', + 'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3', + 'playlist_mincount': 5, + 'info_dict': { + 'id': 'UCYO_jab_esuFRV4b17AJtAw', + 'title': '3Blue1Brown - Playlists', + 'description': 'md5:e1384e8a133307dd10edee76e875d62f', + 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw', + 'uploader': '3Blue1Brown', + }, + }, { + 'note': 'playlists, singlepage', 'url': 'https://www.youtube.com/user/ThirstForScience/playlists', 'playlist_mincount': 4, 'info_dict': { @@ -2224,7 +3080,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists', 'only_matching': True, }, { - # basic, single video playlist + 'note': 'basic, single video playlist', 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc', 'info_dict': { 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA', @@ -2234,7 +3090,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, 'playlist_count': 1, }, { - # empty playlist + 'note': 'empty playlist', 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf', 'info_dict': { 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA', @@ -2244,7 +3100,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, 'playlist_count': 0, }, { - # Home tab + 'note': 'Home tab', 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured', 'info_dict': { 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w', @@ -2255,7 +3111,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, 'playlist_mincount': 2, }, { - # Videos tab + 'note': 'Videos tab', 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos', 'info_dict': { 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w', @@ -2266,7 +3122,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, 'playlist_mincount': 975, }, { - # Videos tab, sorted by popular + 'note': 'Videos tab, sorted by popular', 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid', 'info_dict': { 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w', @@ -2277,7 +3133,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, 'playlist_mincount': 199, }, { - # Playlists tab + 'note': 'Playlists tab', 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists', 'info_dict': { 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w', @@ -2288,7 +3144,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, 'playlist_mincount': 17, }, { - # Community tab + 'note': 'Community tab', 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community', 'info_dict': { 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w', @@ -2299,7 +3155,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, 'playlist_mincount': 18, }, { - # Channels tab + 'note': 'Channels tab', 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels', 'info_dict': { 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w', @@ -2309,6 +3165,17 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', }, 'playlist_mincount': 12, + }, { + 'note': 'Search tab', + 'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra', + 'playlist_mincount': 40, + 'info_dict': { + 'id': 'UCYO_jab_esuFRV4b17AJtAw', + 'title': '3Blue1Brown - Search - linear algebra', + 'description': 'md5:e1384e8a133307dd10edee76e875d62f', + 'uploader': '3Blue1Brown', + 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw', + }, }, { 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA', 'only_matching': True, @@ -2340,7 +3207,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, 'playlist_mincount': 1123, }, { - # even larger playlist, 8832 videos + 'note': 'even larger playlist, 8832 videos', 'url': 'http://www.youtube.com/user/NASAgovVideo/videos', 'only_matching': True, }, { @@ -2354,7 +3221,27 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, 'playlist_mincount': 21, }, { - # https://github.com/ytdl-org/youtube-dl/issues/21844 + 'note': 'Playlist with "show unavailable videos" button', + 'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q', + 'info_dict': { + 'title': 'Uploads from Phim Siêu Nhân Nhật Bản', + 'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q', + 'uploader': 'Phim Siêu Nhân Nhật Bản', + 'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q', + }, + 'playlist_mincount': 200, + }, { + 'note': 'Playlist with unavailable videos in page 7', + 'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w', + 'info_dict': { + 'title': 'Uploads from BlankTV', + 'id': 'UU8l9frL61Yl5KFOl87nIm2w', + 'uploader': 'BlankTV', + 'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w', + }, + 'playlist_mincount': 1000, + }, { + 'note': 'https://github.com/ytdl-org/youtube-dl/issues/21844', 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba', 'info_dict': { 'title': 'Data Analysis with Dr Mike Pound', @@ -2368,7 +3255,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc', 'only_matching': True, }, { - # Playlist URL that does not actually serve a playlist + 'note': 'Playlist URL that does not actually serve a playlist', 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4', 'info_dict': { 'id': 'FqZTN594JQw', @@ -2400,14 +3287,14 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, { 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live', 'info_dict': { - 'id': '9Auq9mYxFEE', + 'id': 'X1whbWASnNQ', # This will keep changing 'ext': 'mp4', 'title': compat_str, 'uploader': 'Sky News', 'uploader_id': 'skynews', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews', - 'upload_date': '20191102', - 'description': 'md5:85ddd75d888674631aaf9599a9a0b0ae', + 'upload_date': r're:\d{8}', + 'description': compat_str, 'categories': ['News & Politics'], 'tags': list, 'like_count': int, @@ -2416,6 +3303,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): 'params': { 'skip_download': True, }, + 'expected_warnings': ['Downloading just video ', 'Ignoring subtitle tracks found in '], }, { 'url': 'https://www.youtube.com/user/TheYoungTurks/live', 'info_dict': { @@ -2443,31 +3331,31 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, { 'url': 'https://www.youtube.com/c/CommanderVideoHq/live', 'only_matching': True, + }, { + 'note': 'A channel that is not live. Should raise error', + 'url': 'https://www.youtube.com/user/numberphile/live', + 'only_matching': True, }, { 'url': 'https://www.youtube.com/feed/trending', 'only_matching': True, }, { - # needs auth 'url': 'https://www.youtube.com/feed/library', 'only_matching': True, }, { - # needs auth 'url': 'https://www.youtube.com/feed/history', 'only_matching': True, }, { - # needs auth 'url': 'https://www.youtube.com/feed/subscriptions', 'only_matching': True, }, { - # needs auth 'url': 'https://www.youtube.com/feed/watch_later', 'only_matching': True, }, { - # no longer available? + 'note': 'Recommended - redirects to home page', 'url': 'https://www.youtube.com/feed/recommended', 'only_matching': True, }, { - # inline playlist with not always working continuations + 'note': 'inline playlist with not always working continuations', 'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C', 'only_matching': True, }, { @@ -2485,6 +3373,81 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor): }, { 'url': 'https://www.youtube.com/TheYoungTurks/live', 'only_matching': True, + }, { + 'url': 'https://www.youtube.com/hashtag/cctv9', + 'info_dict': { + 'id': 'cctv9', + 'title': '#cctv9', + }, + 'playlist_mincount': 350, + }, { + 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU', + 'only_matching': True, + }, { + 'note': 'Requires Premium: should request additional YTM-info webpage (and have format 141) for videos in playlist', + 'url': 'https://music.youtube.com/playlist?list=PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq', + 'only_matching': True + }, { + 'note': '/browse/ should redirect to /channel/', + 'url': 'https://music.youtube.com/browse/UC1a8OFewdjuLq6KlF8M_8Ng', + 'only_matching': True + }, { + 'note': 'VLPL, should redirect to playlist?list=PL...', + 'url': 'https://music.youtube.com/browse/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq', + 'info_dict': { + 'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq', + 'uploader': 'NoCopyrightSounds', + 'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!', + 'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', + 'title': 'NCS Releases', + }, + 'playlist_mincount': 166, + }, { + 'note': 'Topic, should redirect to playlist?list=UU...', + 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw', + 'info_dict': { + 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw', + 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw', + 'title': 'Uploads from Royalty Free Music - Topic', + 'uploader': 'Royalty Free Music - Topic', + }, + 'expected_warnings': [ + 'A channel/user page was given', + 'The URL does not have a videos tab', + ], + 'playlist_mincount': 101, + }, { + 'note': 'Topic without a UU playlist', + 'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg', + 'info_dict': { + 'id': 'UCtFRv9O2AHqOZjjynzrv-xg', + 'title': 'UCtFRv9O2AHqOZjjynzrv-xg', + }, + 'expected_warnings': [ + 'A channel/user page was given', + 'The URL does not have a videos tab', + 'Falling back to channel URL', + ], + 'playlist_mincount': 9, + }, { + 'note': 'Youtube music Album', + 'url': 'https://music.youtube.com/browse/MPREb_gTAcphH99wE', + 'info_dict': { + 'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0', + 'title': 'Album - Royalty Free Music Library V2 (50 Songs)', + }, + 'playlist_count': 50, + }, { + 'note': 'unlisted single video playlist', + 'url': 'https://www.youtube.com/playlist?list=PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf', + 'info_dict': { + 'uploader_id': 'UC9zHu_mHU96r19o-wV5Qs1Q', + 'uploader': 'colethedj', + 'id': 'PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf', + 'title': 'yt-dlp unlisted playlist test', + 'availability': 'unlisted' + }, + 'playlist_count': 1, }] @classmethod @@ -2506,21 +3469,28 @@ def _extract_channel_id(self, webpage): channel_url, 'channel id') @staticmethod - def _extract_grid_item_renderer(item): - for item_kind in ('Playlist', 'Video', 'Channel'): - renderer = item.get('grid%sRenderer' % item_kind) - if renderer: + def _extract_basic_item_renderer(item): + # Modified from _extract_grid_item_renderer + known_basic_renderers = ( + 'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer' + ) + for key, renderer in item.items(): + if not isinstance(renderer, dict): + continue + elif key in known_basic_renderers: + return renderer + elif key.startswith('grid') and key.endswith('Renderer'): return renderer def _grid_entries(self, grid_renderer): for item in grid_renderer['items']: if not isinstance(item, dict): continue - renderer = self._extract_grid_item_renderer(item) + renderer = self._extract_basic_item_renderer(item) if not isinstance(renderer, dict): continue - title = try_get( - renderer, lambda x: x['title']['runs'][0]['text'], compat_str) + title = self._get_text(renderer.get('title')) + # playlist playlist_id = renderer.get('playlistId') if playlist_id: @@ -2528,24 +3498,35 @@ def _grid_entries(self, grid_renderer): 'https://www.youtube.com/playlist?list=%s' % playlist_id, ie=YoutubeTabIE.ie_key(), video_id=playlist_id, video_title=title) + continue # video video_id = renderer.get('videoId') if video_id: yield self._extract_video(renderer) + continue # channel channel_id = renderer.get('channelId') if channel_id: - title = try_get( - renderer, lambda x: x['title']['simpleText'], compat_str) yield self.url_result( 'https://www.youtube.com/channel/%s' % channel_id, ie=YoutubeTabIE.ie_key(), video_title=title) + continue + # generic endpoint URL support + ep_url = urljoin('https://www.youtube.com/', try_get( + renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'], + compat_str)) + if ep_url: + for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE): + if ie.suitable(ep_url): + yield self.url_result( + ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title) + break def _shelf_entries_from_content(self, shelf_renderer): content = shelf_renderer.get('content') if not isinstance(content, dict): return - renderer = content.get('gridRenderer') + renderer = content.get('gridRenderer') or content.get('expandedShelfContentsRenderer') if renderer: # TODO: add support for nested playlists so each shelf is processed # as separate playlist @@ -2568,8 +3549,7 @@ def _shelf_entries(self, shelf_renderer, skip_channels=False): # will not work if skip_channels and '/channels?' in shelf_url: return - title = try_get( - shelf_renderer, lambda x: x['title']['runs'][0]['text'], compat_str) + title = self._get_text(shelf_renderer, lambda x: x['title']) yield self.url_result(shelf_url, video_title=title) # Shelf may not contain shelf URL, fallback to extraction from content for entry in self._shelf_entries_from_content(shelf_renderer): @@ -2587,20 +3567,6 @@ def _playlist_entries(self, video_list_renderer): continue yield self._extract_video(renderer) - r""" # Not needed in the new implementation - def _itemSection_entries(self, item_sect_renderer): - for content in item_sect_renderer['contents']: - if not isinstance(content, dict): - continue - renderer = content.get('videoRenderer', {}) - if not isinstance(renderer, dict): - continue - video_id = renderer.get('videoId') - if not video_id: - continue - yield self._extract_video(renderer) - """ - def _rich_entries(self, rich_grid_renderer): renderer = try_get( rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {} @@ -2621,12 +3587,19 @@ def _post_thread_entries(self, post_thread_renderer): return # video attachment video_renderer = try_get( - post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) - video_id = None - if video_renderer: - entry = self._video_entry(video_renderer) + post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {} + video_id = video_renderer.get('videoId') + if video_id: + entry = self._extract_video(video_renderer) if entry: yield entry + # playlist attachment + playlist_id = try_get( + post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str) + if playlist_id: + yield self.url_result( + 'https://www.youtube.com/playlist?list=%s' % playlist_id, + ie=YoutubeTabIE.ie_key(), video_id=playlist_id) # inline video links runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or [] for run in runs: @@ -2641,7 +3614,7 @@ def _post_thread_entries(self, post_thread_renderer): ep_video_id = YoutubeIE._match_id(ep_url) if video_id == ep_video_id: continue - yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id) + yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id) def _post_thread_continuation_entries(self, post_thread_continuation): contents = post_thread_continuation.get('contents') @@ -2654,52 +3627,16 @@ def _post_thread_continuation_entries(self, post_thread_continuation): for entry in self._post_thread_entries(renderer): yield entry - @staticmethod - def _build_continuation_query(continuation, ctp=None): - query = { - 'ctoken': continuation, - 'continuation': continuation, - } - if ctp: - query['itct'] = ctp - return query - - @staticmethod - def _extract_next_continuation_data(renderer): - next_continuation = try_get( - renderer, lambda x: x['continuations'][0]['nextContinuationData'], dict) - if not next_continuation: - return - continuation = next_continuation.get('continuation') - if not continuation: - return - ctp = next_continuation.get('clickTrackingParams') - return YoutubeTabIE._build_continuation_query(continuation, ctp) - - @classmethod - def _extract_continuation(cls, renderer): - next_continuation = cls._extract_next_continuation_data(renderer) - if next_continuation: - return next_continuation - contents = [] - for key in ('contents', 'items'): - contents.extend(try_get(renderer, lambda x: x[key], list) or []) + r''' # unused + def _rich_grid_entries(self, contents): for content in contents: - if not isinstance(content, dict): - continue - continuation_ep = try_get( - content, lambda x: x['continuationItemRenderer']['continuationEndpoint'], - dict) - if not continuation_ep: - continue - continuation = try_get( - continuation_ep, lambda x: x['continuationCommand']['token'], compat_str) - if not continuation: - continue - ctp = continuation_ep.get('clickTrackingParams') - return YoutubeTabIE._build_continuation_query(continuation, ctp) - - def _entries(self, tab, identity_token): + video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict) + if video_renderer: + entry = self._video_entry(video_renderer) + if entry: + yield entry + ''' + def _entries(self, tab, item_id, identity_token, account_syncid, ytcfg): def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds contents = try_get(parent_renderer, lambda x: x['contents'], list) or [] @@ -2751,48 +3688,21 @@ def extract_entries(parent_renderer): # this needs to called again for continua for entry in extract_entries(parent_renderer): yield entry continuation = continuation_list[0] - - headers = { - 'x-youtube-client-name': '1', - 'x-youtube-client-version': '2.20201112.04.01', - } - if identity_token: - headers['x-youtube-identity-token'] = identity_token + visitor_data = None for page_num in itertools.count(1): if not continuation: break - retries = self._downloader.params.get('extractor_retries', 3) - count = -1 - last_error = None - while count < retries: - count += 1 - if last_error: - self.report_warning('%s. Retrying ...' % last_error) - try: - browse = self._download_json( - 'https://www.youtube.com/browse_ajax', None, - 'Downloading page %d%s' - % (page_num, ' (retry #%d)' % count if count else ''), - headers=headers, query=continuation) - except ExtractorError as e: - if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404): - # Downloading page may result in intermittent 5xx HTTP error - # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289 - last_error = 'HTTP Error %s' % e.cause.code - if count < retries: - continue - raise - else: - response = try_get(browse, lambda x: x[1]['response'], dict) + headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data) + response = self._extract_response( + item_id='%s page %s' % (item_id, page_num), + query=continuation, headers=headers, ytcfg=ytcfg, + check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints')) - # Youtube sometimes sends incomplete data - # See: https://github.com/ytdl-org/youtube-dl/issues/28194 - if response.get('continuationContents') or response.get('onResponseReceivedActions'): - break - last_error = 'Incomplete data recieved' - if not browse or not response: + if not response: break + visitor_data = try_get( + response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data known_continuation_renderers = { 'playlistVideoListContinuation': self._playlist_entries, @@ -2819,11 +3729,13 @@ def extract_entries(parent_renderer): # this needs to called again for continua 'gridPlaylistRenderer': (self._grid_entries, 'items'), 'gridVideoRenderer': (self._grid_entries, 'items'), 'playlistVideoRenderer': (self._playlist_entries, 'contents'), - 'itemSectionRenderer': (self._playlist_entries, 'contents'), + 'itemSectionRenderer': (extract_entries, 'contents'), # for feeds 'richItemRenderer': (extract_entries, 'contents'), # for hashtag + 'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents') } + on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints')) continuation_items = try_get( - response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list) + on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list) continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {} video_items_renderer = None for key, value in continuation_item.items(): @@ -2842,35 +3754,28 @@ def extract_entries(parent_renderer): # this needs to called again for continua @staticmethod def _extract_selected_tab(tabs): for tab in tabs: - if try_get(tab, lambda x: x['tabRenderer']['selected'], bool): - return tab['tabRenderer'] + renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {} + if renderer.get('selected') is True: + return renderer else: raise ExtractorError('Unable to find selected tab') - @staticmethod - def _extract_uploader(data): + @classmethod + def _extract_uploader(cls, data): uploader = {} - sidebar_renderer = try_get( - data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) - if sidebar_renderer: - for item in sidebar_renderer: - if not isinstance(item, dict): - continue - renderer = item.get('playlistSidebarSecondaryInfoRenderer') - if not isinstance(renderer, dict): - continue - owner = try_get( - renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict) - if owner: - uploader['uploader'] = owner.get('text') - uploader['uploader_id'] = try_get( - owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str) - uploader['uploader_url'] = urljoin( - 'https://www.youtube.com/', - try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str)) + renderer = cls._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {} + owner = try_get( + renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict) + if owner: + uploader['uploader'] = owner.get('text') + uploader['uploader_id'] = try_get( + owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str) + uploader['uploader_url'] = urljoin( + 'https://www.youtube.com/', + try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str)) return {k: v for k, v in uploader.items() if v is not None} - def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token): + def _extract_from_tabs(self, item_id, webpage, data, tabs): playlist_id = title = description = channel_url = channel_name = channel_id = None thumbnails_list = tags = [] @@ -2881,10 +3786,10 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token): channel_name = renderer.get('title') channel_url = renderer.get('channelUrl') channel_id = renderer.get('externalId') - - if not renderer: + else: renderer = try_get( data, lambda x: x['metadata']['playlistMetadataRenderer'], dict) + if renderer: title = renderer.get('title') description = renderer.get('description', '') @@ -2893,8 +3798,8 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token): thumbnails_list = ( try_get(renderer, lambda x: x['avatar']['thumbnails'], list) or try_get( - data, - lambda x: x['sidebar']['playlistSidebarRenderer']['items'][0]['playlistSidebarPrimaryInfoRenderer']['thumbnailRenderer']['playlistVideoThumbnailRenderer']['thumbnail']['thumbnails'], + self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer'), + lambda x: x['thumbnailRenderer']['playlistVideoThumbnailRenderer']['thumbnail']['thumbnails'], list) or []) @@ -2910,13 +3815,14 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token): 'width': int_or_none(t.get('width')), 'height': int_or_none(t.get('height')), }) - if playlist_id is None: playlist_id = item_id if title is None: - title = playlist_id + title = ( + try_get(data, lambda x: x['header']['hashtagHeaderRenderer']['hashtag']['simpleText']) + or playlist_id) title += format_field(selected_tab, 'title', ' - %s') - + title += format_field(selected_tab, 'expandedText', ' - %s') metadata = { 'playlist_id': playlist_id, 'playlist_title': title, @@ -2927,23 +3833,65 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token): 'thumbnails': thumbnails, 'tags': tags, } + availability = self._extract_availability(data) + if availability: + metadata['availability'] = availability if not channel_id: metadata.update(self._extract_uploader(data)) metadata.update({ 'channel': metadata['uploader'], 'channel_id': metadata['uploader_id'], 'channel_url': metadata['uploader_url']}) + ytcfg = self._extract_ytcfg(item_id, webpage) return self.playlist_result( - self._entries(selected_tab, identity_token), + self._entries( + selected_tab, playlist_id, + self._extract_identity_token(webpage, item_id), + self._extract_account_syncid(ytcfg, data), ytcfg), **metadata) - def _extract_from_playlist(self, item_id, url, data, playlist): + def _extract_mix_playlist(self, playlist, playlist_id, data, webpage): + first_id = last_id = None + ytcfg = self._extract_ytcfg(playlist_id, webpage) + headers = self._generate_api_headers( + ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data), + identity_token=self._extract_identity_token(webpage, item_id=playlist_id)) + for page_num in itertools.count(1): + videos = list(self._playlist_entries(playlist)) + if not videos: + return + start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1 + if start >= len(videos): + return + for video in videos[start:]: + if video['id'] == first_id: + self.to_screen('First video %s found again; Assuming end of Mix' % first_id) + return + yield video + first_id = first_id or videos[0]['id'] + last_id = videos[-1]['id'] + watch_endpoint = try_get( + playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint']) + query = { + 'playlistId': playlist_id, + 'videoId': watch_endpoint.get('videoId') or last_id, + 'index': watch_endpoint.get('index') or len(videos), + 'params': watch_endpoint.get('params') or 'OAE%3D' + } + response = self._extract_response( + item_id='%s page %d' % (playlist_id, page_num), + query=query, ep='next', headers=headers, ytcfg=ytcfg, + check_get_keys='contents' + ) + playlist = try_get( + response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict) + + def _extract_from_playlist(self, item_id, url, data, playlist, webpage): title = playlist.get('title') or try_get( data, lambda x: x['titleText']['simpleText'], compat_str) playlist_id = playlist.get('playlistId') or item_id - # Inline playlist rendition continuation does not always work - # at Youtube side, so delegating regular tab-based playlist URL - # processing whenever possible. + + # Delegating everything except mix playlists to regular tab-based playlist URL playlist_url = urljoin(url, try_get( playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'], compat_str)) @@ -2951,109 +3899,245 @@ def _extract_from_playlist(self, item_id, url, data, playlist): return self.url_result( playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id, video_title=title) + return self.playlist_result( - self._playlist_entries(playlist), playlist_id=playlist_id, - playlist_title=title) + self._extract_mix_playlist(playlist, playlist_id, data, webpage), + playlist_id=playlist_id, playlist_title=title) + + def _extract_availability(self, data): + """ + Gets the availability of a given playlist/tab. + Note: Unless YouTube tells us explicitly, we do not assume it is public + @param data: response + """ + is_private = is_unlisted = None + renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {} + badge_labels = self._extract_badges(renderer) + + # Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge + privacy_dropdown_entries = try_get( + renderer, lambda x: x['privacyForm']['dropdownFormFieldRenderer']['dropdown']['dropdownRenderer']['entries'], list) or [] + for renderer_dict in privacy_dropdown_entries: + is_selected = try_get( + renderer_dict, lambda x: x['privacyDropdownItemRenderer']['isSelected'], bool) or False + if not is_selected: + continue + label = self._get_text( + try_get(renderer_dict, lambda x: x['privacyDropdownItemRenderer']['label'], dict) or []) + if label: + badge_labels.add(label.lower()) + break + + for badge_label in badge_labels: + if badge_label == 'unlisted': + is_unlisted = True + elif badge_label == 'private': + is_private = True + elif badge_label == 'public': + is_unlisted = is_private = False + return self._availability(is_private, False, False, False, is_unlisted) @staticmethod - def _extract_alerts(data): - for alert_dict in try_get(data, lambda x: x['alerts'], list) or []: - if not isinstance(alert_dict, dict): + def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict): + sidebar_renderer = try_get( + data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) or [] + for item in sidebar_renderer: + renderer = try_get(item, lambda x: x[info_renderer], expected_type) + if renderer: + return renderer + + def _reload_with_unavailable_videos(self, item_id, data, webpage): + """ + Get playlist with unavailable videos if the 'show unavailable videos' button exists. + """ + browse_id = params = None + renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') + if not renderer: + return + menu_renderer = try_get( + renderer, lambda x: x['menu']['menuRenderer']['items'], list) or [] + for menu_item in menu_renderer: + if not isinstance(menu_item, dict): continue - for renderer in alert_dict: - alert = alert_dict[renderer] - alert_type = alert.get('type') - if not alert_type: - continue - message = try_get(alert, lambda x: x['text']['simpleText'], compat_str) - if message: - yield alert_type, message - for run in try_get(alert, lambda x: x['text']['runs'], list) or []: - message = try_get(run, lambda x: x['text'], compat_str) - if message: - yield alert_type, message + nav_item_renderer = menu_item.get('menuNavigationItemRenderer') + text = try_get( + nav_item_renderer, lambda x: x['text']['simpleText'], compat_str) + if not text or text.lower() != 'show unavailable videos': + continue + browse_endpoint = try_get( + nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {} + browse_id = browse_endpoint.get('browseId') + params = browse_endpoint.get('params') + break - def _extract_identity_token(self, webpage, item_id): ytcfg = self._extract_ytcfg(item_id, webpage) - if ytcfg: - token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str) - if token: - return token - return self._search_regex( - r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage, - 'identity token', default=None) + headers = self._generate_api_headers( + ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data), + identity_token=self._extract_identity_token(webpage, item_id=item_id), + visitor_data=try_get( + self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str)) + query = { + 'params': params or 'wgYCCAA=', + 'browseId': browse_id or 'VL%s' % item_id + } + return self._extract_response( + item_id=item_id, headers=headers, query=query, + check_get_keys='contents', fatal=False, ytcfg=ytcfg, + note='Downloading API JSON with unavailable videos') + + def _extract_webpage(self, url, item_id): + retries = self.get_param('extractor_retries', 3) + count = -1 + last_error = 'Incomplete yt initial data recieved' + while count < retries: + count += 1 + # Sometimes youtube returns a webpage with incomplete ytInitialData + # See: https://github.com/yt-dlp/yt-dlp/issues/116 + if count: + self.report_warning('%s. Retrying ...' % last_error) + webpage = self._download_webpage( + url, item_id, + 'Downloading webpage%s' % (' (retry #%d)' % count if count else '')) + data = self._extract_yt_initial_data(item_id, webpage) + if data.get('contents') or data.get('currentVideoEndpoint'): + break + # Extract alerts here only when there is error + self._extract_and_report_alerts(data) + if count >= retries: + raise ExtractorError(last_error) + return webpage, data + + @staticmethod + def _smuggle_data(entries, data): + for entry in entries: + if data: + entry['url'] = smuggle_url(entry['url'], data) + yield entry def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) + if self.is_music_url(url): + smuggled_data['is_music_url'] = True + info_dict = self.__real_extract(url, smuggled_data) + if info_dict.get('entries'): + info_dict['entries'] = self._smuggle_data(info_dict['entries'], smuggled_data) + return info_dict + + _url_re = re.compile(r'(?P
%s)(?(channel_type)(?P/\w+))?(?P.*)$' % _VALID_URL)
+
+    def __real_extract(self, url, smuggled_data):
         item_id = self._match_id(url)
         url = compat_urlparse.urlunparse(
             compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
-        is_home = re.match(r'(?P
%s)(?P/?(?![^#?]).*$)' % self._VALID_URL, url)
-        if is_home is not None and is_home.group('not_channel') is None and item_id != 'feed':
-            self._downloader.report_warning(
+        compat_opts = self.get_param('compat_opts', [])
+
+        def get_mobj(url):
+            mobj = self._url_re.match(url).groupdict()
+            mobj.update((k, '') for k, v in mobj.items() if v is None)
+            return mobj
+
+        mobj = get_mobj(url)
+        # Youtube returns incomplete data if tabname is not lower case
+        pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
+
+        if is_channel:
+            if smuggled_data.get('is_music_url'):
+                if item_id[:2] == 'VL':
+                    # Youtube music VL channels have an equivalent playlist
+                    item_id = item_id[2:]
+                    pre, tab, post, is_channel = 'https://www.youtube.com/playlist?list=%s' % item_id, '', '', False
+                elif item_id[:2] == 'MP':
+                    # Youtube music albums (/channel/MP...) have a OLAK playlist that can be extracted from the webpage
+                    item_id = self._search_regex(
+                        r'\\x22audioPlaylistId\\x22:\\x22([0-9A-Za-z_-]+)\\x22',
+                        self._download_webpage('https://music.youtube.com/channel/%s' % item_id, item_id),
+                        'playlist id')
+                    pre, tab, post, is_channel = 'https://www.youtube.com/playlist?list=%s' % item_id, '', '', False
+                elif mobj['channel_type'] == 'browse':
+                    # Youtube music /browse/ should be changed to /channel/
+                    pre = 'https://www.youtube.com/channel/%s' % item_id
+        if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
+            # Home URLs should redirect to /videos/
+            self.report_warning(
                 'A channel/user page was given. All the channel\'s videos will be downloaded. '
                 'To download only the videos in the home page, add a "/featured" to the URL')
-            url = '%s/videos%s' % (is_home.group('pre'), is_home.group('post') or '')
+            tab = '/videos'
+
+        url = ''.join((pre, tab, post))
+        mobj = get_mobj(url)
 
         # Handle both video/playlist URLs
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        qs = parse_qs(url)
         video_id = qs.get('v', [None])[0]
         playlist_id = qs.get('list', [None])[0]
 
-        if is_home is not None and is_home.group('not_channel') is not None and is_home.group('not_channel').startswith('watch') and not video_id:
-            if playlist_id:
-                self._downloader.report_warning('%s is not a valid Youtube URL. Trying to download playlist %s' % (url, playlist_id))
-                url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
-                # return self.url_result(playlist_id, ie=YoutubePlaylistIE.ie_key())
-            else:
+        if not video_id and mobj['not_channel'].startswith('watch'):
+            if not playlist_id:
+                # If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
                 raise ExtractorError('Unable to recognize tab page')
+            # Common mistake: https://www.youtube.com/watch?list=playlist_id
+            self.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
+            url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
+            mobj = get_mobj(url)
+
         if video_id and playlist_id:
-            if self._downloader.params.get('noplaylist'):
+            if self.get_param('noplaylist'):
                 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
                 return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
-            self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
+            self.to_screen('Downloading playlist %s; add --no-playlist to just download video %s' % (playlist_id, video_id))
 
-        retries = self._downloader.params.get('extractor_retries', 3)
-        count = -1
-        while count < retries:
-            count += 1
-            # Sometimes youtube returns a webpage with incomplete ytInitialData
-            # See: https://github.com/yt-dlp/yt-dlp/issues/116
-            if count:
-                self.report_warning('Incomplete yt initial data recieved. Retrying ...')
-            webpage = self._download_webpage(
-                url, item_id,
-                'Downloading webpage%s' % ' (retry #%d)' % count if count else '')
-            identity_token = self._extract_identity_token(webpage, item_id)
-            data = self._extract_yt_initial_data(item_id, webpage)
-            err_msg = None
-            for alert_type, alert_message in self._extract_alerts(data):
-                if alert_type.lower() == 'error':
-                    if err_msg:
-                        self._downloader.report_warning('YouTube said: %s - %s' % ('ERROR', err_msg))
-                    err_msg = alert_message
-                else:
-                    self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
-            if err_msg:
-                raise ExtractorError('YouTube said: %s' % err_msg, expected=True)
-            if data.get('contents') or data.get('currentVideoEndpoint'):
-                break
+        webpage, data = self._extract_webpage(url, item_id)
 
         tabs = try_get(
             data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
         if tabs:
-            return self._extract_from_tabs(item_id, webpage, data, tabs, identity_token)
+            selected_tab = self._extract_selected_tab(tabs)
+            tab_name = selected_tab.get('title', '')
+            if 'no-youtube-channel-redirect' not in compat_opts:
+                if mobj['tab'] == '/live':
+                    # Live tab should have redirected to the video
+                    raise ExtractorError('The channel is not currently live', expected=True)
+                if mobj['tab'] == '/videos' and tab_name.lower() != mobj['tab'][1:]:
+                    if not mobj['not_channel'] and item_id[:2] == 'UC':
+                        # Topic channels don't have /videos. Use the equivalent playlist instead
+                        self.report_warning('The URL does not have a %s tab. Trying to redirect to playlist UU%s instead' % (mobj['tab'][1:], item_id[2:]))
+                        pl_id = 'UU%s' % item_id[2:]
+                        pl_url = 'https://www.youtube.com/playlist?list=%s%s' % (pl_id, mobj['post'])
+                        try:
+                            pl_webpage, pl_data = self._extract_webpage(pl_url, pl_id)
+                            for alert_type, alert_message in self._extract_alerts(pl_data):
+                                if alert_type == 'error':
+                                    raise ExtractorError('Youtube said: %s' % alert_message)
+                            item_id, url, webpage, data = pl_id, pl_url, pl_webpage, pl_data
+                        except ExtractorError:
+                            self.report_warning('The playlist gave error. Falling back to channel URL')
+                    else:
+                        self.report_warning('The URL does not have a %s tab. %s is being downloaded instead' % (mobj['tab'][1:], tab_name))
+
+        self.write_debug('Final URL: %s' % url)
+
+        # YouTube sometimes provides a button to reload playlist with unavailable videos.
+        if 'no-youtube-unavailable-videos' not in compat_opts:
+            data = self._reload_with_unavailable_videos(item_id, data, webpage) or data
+        self._extract_and_report_alerts(data)
+        tabs = try_get(
+            data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
+        if tabs:
+            return self._extract_from_tabs(item_id, webpage, data, tabs)
+
         playlist = try_get(
             data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
         if playlist:
-            return self._extract_from_playlist(item_id, url, data, playlist)
-        # Fallback to video extraction if no playlist alike page is recognized.
-        # First check for the current video then try the v attribute of URL query.
+            return self._extract_from_playlist(item_id, url, data, playlist, webpage)
+
         video_id = try_get(
             data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
             compat_str) or video_id
         if video_id:
+            if mobj['tab'] != '/live':  # live tab is expected to redirect to video
+                self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
             return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
-        # Failed to recognize
+
         raise ExtractorError('Unable to recognize tab page')
 
 
@@ -3120,17 +4204,25 @@ class YoutubePlaylistIE(InfoExtractor):
 
     @classmethod
     def suitable(cls, url):
-        return False if YoutubeTabIE.suitable(url) else super(
-            YoutubePlaylistIE, cls).suitable(url)
+        if YoutubeTabIE.suitable(url):
+            return False
+        # Hack for lazy extractors until more generic solution is implemented
+        # (see #28780)
+        from .youtube import parse_qs
+        qs = parse_qs(url)
+        if qs.get('v', [None])[0]:
+            return False
+        return super(YoutubePlaylistIE, cls).suitable(url)
 
     def _real_extract(self, url):
         playlist_id = self._match_id(url)
-        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
-        if not qs:
-            qs = {'list': playlist_id}
-        return self.url_result(
-            update_url_query('https://www.youtube.com/playlist', qs),
-            ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
+        is_music_url = YoutubeBaseInfoExtractor.is_music_url(url)
+        url = update_url_query(
+            'https://www.youtube.com/playlist',
+            parse_qs(url) or {'list': playlist_id})
+        if is_music_url:
+            url = smuggle_url(url, {'is_music_url': True})
+        return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
 
 
 class YoutubeYtBeIE(InfoExtractor):
@@ -3207,7 +4299,7 @@ def _real_extract(self, url):
             ie=YoutubeTabIE.ie_key())
 
 
-class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
+class YoutubeSearchIE(SearchInfoExtractor, YoutubeTabIE):
     IE_DESC = 'YouTube.com searches, "ytsearch" keyword'
     # there doesn't appear to be a real limit, for example if you search for
     # 'python' you get more than 8.000.000 results
@@ -3218,26 +4310,17 @@ class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
     _TESTS = []
 
     def _entries(self, query, n):
-        data = {
-            'context': {
-                'client': {
-                    'clientName': 'WEB',
-                    'clientVersion': '2.20201021.03.00',
-                }
-            },
-            'query': query,
-        }
+        data = {'query': query}
         if self._SEARCH_PARAMS:
             data['params'] = self._SEARCH_PARAMS
         total = 0
+        continuation = {}
         for page_num in itertools.count(1):
-            search = self._download_json(
-                'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
-                video_id='query "%s"' % query,
-                note='Downloading page %s' % page_num,
-                errnote='Unable to download API page', fatal=False,
-                data=json.dumps(data).encode('utf8'),
-                headers={'content-type': 'application/json'})
+            data.update(continuation)
+            search = self._extract_response(
+                item_id='query "%s" page %s' % (query, page_num), ep='search', query=data,
+                check_get_keys=('contents', 'onResponseReceivedCommands')
+            )
             if not search:
                 break
             slr_contents = try_get(
@@ -3251,13 +4334,10 @@ def _entries(self, query, n):
             # Youtube sometimes adds promoted content to searches,
             # changing the index location of videos and token.
             # So we search through all entries till we find them.
-            continuation_token = None
+            continuation = None
             for slr_content in slr_contents:
-                if continuation_token is None:
-                    continuation_token = try_get(
-                        slr_content,
-                        lambda x: x['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
-                        compat_str)
+                if not continuation:
+                    continuation = self._extract_continuation({'contents': [slr_content]})
 
                 isr_contents = try_get(
                     slr_content,
@@ -3280,9 +4360,8 @@ def _entries(self, query, n):
                     if total == n:
                         return
 
-            if not continuation_token:
+            if not continuation:
                 break
-            data['continuation'] = continuation_token
 
     def _get_n_results(self, query, n):
         """Get a specified number of results for a query"""
@@ -3329,16 +4408,12 @@ class YoutubeFeedsInfoExtractor(YoutubeTabIE):
     Subclasses must define the _FEED_NAME property.
     """
     _LOGIN_REQUIRED = True
-    # _MAX_PAGES = 5
     _TESTS = []
 
     @property
     def IE_NAME(self):
         return 'youtube:%s' % self._FEED_NAME
 
-    def _real_initialize(self):
-        self._login()
-
     def _real_extract(self, url):
         return self.url_result(
             'https://www.youtube.com/feed/%s' % self._FEED_NAME,
@@ -3363,6 +4438,7 @@ class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
     IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
     _VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
     _FEED_NAME = 'recommended'
+    _LOGIN_REQUIRED = False
     _TESTS = [{
         'url': ':ytrec',
         'only_matching': True,
@@ -3389,8 +4465,8 @@ class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
 
 
 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
-    IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
-    _VALID_URL = r':ythistory'
+    IE_DESC = 'Youtube watch history, ":ythis" for short (requires authentication)'
+    _VALID_URL = r':ythis(?:tory)?'
     _FEED_NAME = 'history'
     _TESTS = [{
         'url': ':ythistory',