)
from ..jsinterp import JSInterpreter
from ..utils import (
+ bool_or_none,
clean_html,
+ dict_get,
ExtractorError,
format_field,
float_or_none,
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_RESERVED_NAMES = (
- r'embed|e|watch_popup|channel|c|user|playlist|watch|w|v|movies|results|shared|hashtag|'
- r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout|'
- r'feed/(?:watch_later|history|subscriptions|library|trending|recommended)')
+ r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|'
+ r'movies|results|shared|hashtag|trending|feed|feeds|'
+ r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
return True
- def _download_webpage_handle(self, *args, **kwargs):
- query = kwargs.get('query', {}).copy()
- kwargs['query'] = query
- return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
- *args, **compat_kwargs(kwargs))
+ def _initialize_consent(self):
+ cookies = self._get_cookies('https://www.youtube.com/')
+ if cookies.get('__Secure-3PSID'):
+ return
+ consent_id = None
+ consent = cookies.get('CONSENT')
+ if consent:
+ if 'YES' in consent.value:
+ return
+ consent_id = self._search_regex(
+ r'PENDING\+(\d+)', consent.value, 'consent', default=None)
+ if not consent_id:
+ consent_id = random.randint(100, 999)
+ self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
def _real_initialize(self):
+ self._initialize_consent()
if self._downloader is None:
return
if not self._login():
return
+ _YT_WEB_CLIENT_VERSION = '2.20210301.08.00'
_DEFAULT_API_DATA = {
'context': {
'client': {
'clientName': 'WEB',
- 'clientVersion': '2.20210301.08.00',
+ 'clientVersion': _YT_WEB_CLIENT_VERSION,
}
},
}
+ _DEFAULT_BASIC_API_HEADERS = {
+ 'X-YouTube-Client-Name': '1',
+ 'X-YouTube-Client-Version': _YT_WEB_CLIENT_VERSION
+ }
+
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
_YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
auth = self._generate_sapisidhash_header()
if auth is not None:
headers.update({'Authorization': auth, 'X-Origin': 'https://www.youtube.com'})
-
return self._download_json(
'https://www.youtube.com/youtubei/v1/%s' % ep,
video_id=video_id, fatal=fatal, note=note, errnote=errnote,
self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
video_id)
+ def _extract_identity_token(self, webpage, item_id):
+ ytcfg = self._extract_ytcfg(item_id, webpage)
+ if ytcfg:
+ token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
+ if token:
+ return token
+ return self._search_regex(
+ r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
+ 'identity token', default=None)
+
+ @staticmethod
+ def _extract_account_syncid(data):
+ """Extract syncId required to download private playlists of secondary channels"""
+ sync_ids = (
+ try_get(data, lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'], compat_str)
+ or '').split("||")
+ if len(sync_ids) >= 2 and sync_ids[1]:
+ # datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
+ # and just "user_syncid||" for primary channel. We only want the channel_syncid
+ return sync_ids[0]
+
def _extract_ytcfg(self, video_id, webpage):
return self._parse_json(
self._search_regex(
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
regex), webpage, name, default='{}'), video_id, fatal=False)
+ @staticmethod
+ def _join_text_entries(runs):
+ text = None
+ for run in runs:
+ if not isinstance(run, dict):
+ continue
+ sub_text = try_get(run, lambda x: x['text'], compat_str)
+ if sub_text:
+ if not text:
+ text = sub_text
+ continue
+ text += sub_text
+ return text
+
+ def _extract_comment(self, comment_renderer, parent=None):
+ comment_id = comment_renderer.get('commentId')
+ if not comment_id:
+ return
+ comment_text_runs = try_get(comment_renderer, lambda x: x['contentText']['runs']) or []
+ text = self._join_text_entries(comment_text_runs) or ''
+ comment_time_text = try_get(comment_renderer, lambda x: x['publishedTimeText']['runs']) or []
+ time_text = self._join_text_entries(comment_time_text)
+
+ author = try_get(comment_renderer, lambda x: x['authorText']['simpleText'], compat_str)
+ author_id = try_get(comment_renderer,
+ lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
+ votes = str_to_int(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'],
+ lambda x: x['likeCount']), compat_str)) or 0
+ author_thumbnail = try_get(comment_renderer,
+ lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str)
+
+ author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
+ is_liked = try_get(comment_renderer, lambda x: x['isLiked'], bool)
+
+ return {
+ 'id': comment_id,
+ 'text': text,
+ # TODO: This should be parsed to timestamp
+ 'time_text': time_text,
+ 'like_count': votes,
+ 'is_favorited': is_liked,
+ 'author': author,
+ 'author_id': author_id,
+ 'author_thumbnail': author_thumbnail,
+ 'author_is_uploader': author_is_uploader,
+ 'parent': parent or 'root'
+ }
+
+ def _comment_entries(self, root_continuation_data, identity_token, account_syncid,
+ session_token_list, parent=None, comment_counts=None):
+
+ def extract_thread(parent_renderer):
+ contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
+ if not parent:
+ comment_counts[2] = 0
+ for content in contents:
+ comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer'])
+ comment_renderer = try_get(
+ comment_thread_renderer, (lambda x: x['comment']['commentRenderer'], dict)) or try_get(
+ content, (lambda x: x['commentRenderer'], dict))
+
+ if not comment_renderer:
+ continue
+ comment = self._extract_comment(comment_renderer, parent)
+ if not comment:
+ continue
+ comment_counts[0] += 1
+ yield comment
+ # Attempt to get the replies
+ comment_replies_renderer = try_get(
+ comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict)
+
+ if comment_replies_renderer:
+ comment_counts[2] += 1
+ comment_entries_iter = self._comment_entries(
+ comment_replies_renderer, identity_token, account_syncid,
+ parent=comment.get('id'), session_token_list=session_token_list,
+ comment_counts=comment_counts)
+
+ for reply_comment in comment_entries_iter:
+ yield reply_comment
+
+ if not comment_counts:
+ # comment so far, est. total comments, current comment thread #
+ comment_counts = [0, 0, 0]
+ headers = self._DEFAULT_BASIC_API_HEADERS.copy()
+
+ # TODO: Generalize the download code with TabIE
+ if identity_token:
+ headers['x-youtube-identity-token'] = identity_token
+
+ if account_syncid:
+ headers['X-Goog-PageId'] = account_syncid
+ headers['X-Goog-AuthUser'] = 0
+
+ continuation = YoutubeTabIE._extract_continuation(root_continuation_data) # TODO
+ first_continuation = False
+ if parent is None:
+ first_continuation = True
+
+ for page_num in itertools.count(0):
+ if not continuation:
+ break
+ retries = self._downloader.params.get('extractor_retries', 3)
+ count = -1
+ last_error = None
+
+ while count < retries:
+ count += 1
+ if last_error:
+ self.report_warning('%s. Retrying ...' % last_error)
+ try:
+ query = {
+ 'ctoken': continuation['ctoken'],
+ 'pbj': 1,
+ 'type': 'next',
+ }
+ if parent:
+ query['action_get_comment_replies'] = 1
+ else:
+ query['action_get_comments'] = 1
+
+ comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1])
+ if page_num == 0:
+ if first_continuation:
+ note_prefix = "Downloading initial comment continuation page"
+ else:
+ note_prefix = " Downloading comment reply thread %d %s" % (comment_counts[2], comment_prog_str)
+ else:
+ note_prefix = "%sDownloading comment%s page %d %s" % (
+ " " if parent else "",
+ ' replies' if parent else '',
+ page_num,
+ comment_prog_str)
+
+ browse = self._download_json(
+ 'https://www.youtube.com/comment_service_ajax', None,
+ '%s %s' % (note_prefix, '(retry #%d)' % count if count else ''),
+ headers=headers, query=query,
+ data=urlencode_postdata({
+ 'session_token': session_token_list[0]
+ }))
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404, 413):
+ if e.cause.code == 413:
+ self.report_warning("Assumed end of comments (received HTTP Error 413)")
+ return
+ # Downloading page may result in intermittent 5xx HTTP error
+ # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
+ last_error = 'HTTP Error %s' % e.cause.code
+ if e.cause.code == 404:
+ last_error = last_error + " (this API is probably deprecated)"
+ if count < retries:
+ continue
+ raise
+ else:
+ session_token = try_get(browse, lambda x: x['xsrf_token'], compat_str)
+ if session_token:
+ session_token_list[0] = session_token
+
+ response = try_get(browse,
+ (lambda x: x['response'],
+ lambda x: x[1]['response'])) or {}
+
+ if response.get('continuationContents'):
+ break
+
+ # YouTube sometimes gives reload: now json if something went wrong (e.g. bad auth)
+ if browse.get('reload'):
+ raise ExtractorError("Invalid or missing params in continuation request", expected=False)
+
+ # TODO: not tested, merged from old extractor
+ err_msg = browse.get('externalErrorMessage')
+ if err_msg:
+ raise ExtractorError('YouTube said: %s' % err_msg, expected=False)
+
+ # Youtube sometimes sends incomplete data
+ # See: https://github.com/ytdl-org/youtube-dl/issues/28194
+ last_error = 'Incomplete data received'
+ if count >= retries:
+ self._downloader.report_error(last_error)
+
+ if not response:
+ break
+
+ known_continuation_renderers = {
+ 'itemSectionContinuation': extract_thread,
+ 'commentRepliesContinuation': extract_thread
+ }
+
+ # extract next root continuation from the results
+ continuation_contents = try_get(
+ response, lambda x: x['continuationContents'], dict) or {}
+
+ for key, value in continuation_contents.items():
+ if key not in known_continuation_renderers:
+ continue
+ continuation_renderer = value
+
+ if first_continuation:
+ first_continuation = False
+ expected_comment_count = try_get(
+ continuation_renderer,
+ (lambda x: x['header']['commentsHeaderRenderer']['countText']['runs'][0]['text'],
+ lambda x: x['header']['commentsHeaderRenderer']['commentsCount']['runs'][0]['text']),
+ compat_str)
+
+ if expected_comment_count:
+ comment_counts[1] = str_to_int(expected_comment_count)
+ self.to_screen("Downloading ~%d comments" % str_to_int(expected_comment_count))
+ yield comment_counts[1]
+
+ # TODO: cli arg.
+ # 1/True for newest, 0/False for popular (default)
+ comment_sort_index = int(True)
+ sort_continuation_renderer = try_get(
+ continuation_renderer,
+ lambda x: x['header']['commentsHeaderRenderer']['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems']
+ [comment_sort_index]['continuation']['reloadContinuationData'], dict)
+ # If this fails, the initial continuation page
+ # starts off with popular anyways.
+ if sort_continuation_renderer:
+ continuation = YoutubeTabIE._build_continuation_query(
+ continuation=sort_continuation_renderer.get('continuation'),
+ ctp=sort_continuation_renderer.get('clickTrackingParams'))
+ self.to_screen("Sorting comments by %s" % ('popular' if comment_sort_index == 0 else 'newest'))
+ break
+
+ for entry in known_continuation_renderers[key](continuation_renderer):
+ yield entry
+
+ continuation = YoutubeTabIE._extract_continuation(continuation_renderer) # TODO
+ break
+
+ def _extract_comments(self, ytcfg, video_id, contents, webpage, xsrf_token):
+ """Entry for comment extraction"""
+ comments = []
+ known_entry_comment_renderers = (
+ 'itemSectionRenderer',
+ )
+ estimated_total = 0
+ for entry in contents:
+ for key, renderer in entry.items():
+ if key not in known_entry_comment_renderers:
+ continue
+
+ comment_iter = self._comment_entries(
+ renderer,
+ identity_token=self._extract_identity_token(webpage, item_id=video_id),
+ account_syncid=self._extract_account_syncid(ytcfg),
+ session_token_list=[xsrf_token])
+
+ for comment in comment_iter:
+ if isinstance(comment, int):
+ estimated_total = comment
+ continue
+ comments.append(comment)
+ break
+ self.to_screen("Downloaded %d/%d comments" % (len(comments), estimated_total))
+ return {
+ 'comments': comments,
+ 'comment_count': len(comments),
+ }
+
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
base_url = self.http_scheme() + '//www.youtube.com/'
webpage_url = base_url + 'watch?v=' + video_id
webpage = self._download_webpage(
- webpage_url + '&has_verified=1&bpctr=9999999999',
- video_id, fatal=False)
+ webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
player_response = None
if webpage:
f['format_id'] = itag
formats.append(f)
- if self._downloader.params.get('youtube_include_dash_manifest'):
+ if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_manifest_url = streaming_data.get('dashManifestUrl')
if dash_manifest_url:
for f in self._extract_mpd_formats(
'tags': keywords,
'is_live': is_live,
'playable_in_embed': playability_status.get('playableInEmbed'),
- 'was_live': video_details.get('isLiveContent')
+ 'was_live': video_details.get('isLiveContent'),
}
pctr = try_get(
# This will error if there is no livechat
initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
info['subtitles']['live_chat'] = [{
+ 'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies
'video_id': video_id,
'ext': 'json',
'protocol': 'youtube_live_chat_replay',
info['channel'] = get_text(try_get(
vsir,
lambda x: x['owner']['videoOwnerRenderer']['title'],
- compat_str))
+ dict))
rows = try_get(
vsir,
lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
if v:
info[d_k] = v
+ is_private = bool_or_none(video_details.get('isPrivate'))
+ is_unlisted = bool_or_none(microformat.get('isUnlisted'))
+ is_membersonly = None
+ if initial_data and is_private is not None:
+ is_membersonly = False
+ contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list)
+ for content in contents or []:
+ badges = try_get(content, lambda x: x['videoPrimaryInfoRenderer']['badges'], list)
+ for badge in badges or []:
+ label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label']) or ''
+ if label.lower() == 'members only':
+ is_membersonly = True
+ break
+ if is_membersonly:
+ break
+
+ # TODO: Add this for playlists
+ info['availability'] = self._availability(
+ is_private=is_private,
+ needs_premium=False, # Youtube no longer have premium-only videos?
+ needs_subscription=is_membersonly,
+ needs_auth=info['age_limit'] >= 18,
+ is_unlisted=None if is_private is None else is_unlisted)
+
# get xsrf for annotations or comments
get_annotations = self._downloader.params.get('writeannotations', False)
get_comments = self._downloader.params.get('getcomments', False)
errnote='Unable to download video annotations', fatal=False,
data=urlencode_postdata({xsrf_field_name: xsrf_token}))
- # Get comments
- # TODO: Refactor and move to seperate function
- def extract_comments():
- expected_video_comment_count = 0
- video_comments = []
- comment_xsrf = xsrf_token
-
- def find_value(html, key, num_chars=2, separator='"'):
- pos_begin = html.find(key) + len(key) + num_chars
- pos_end = html.find(separator, pos_begin)
- return html[pos_begin: pos_end]
-
- def search_dict(partial, key):
- if isinstance(partial, dict):
- for k, v in partial.items():
- if k == key:
- yield v
- else:
- for o in search_dict(v, key):
- yield o
- elif isinstance(partial, list):
- for i in partial:
- for o in search_dict(i, key):
- yield o
-
- continuations = []
- if initial_data:
- try:
- ncd = next(search_dict(initial_data, 'nextContinuationData'))
- continuations = [ncd['continuation']]
- # Handle videos where comments have been disabled entirely
- except StopIteration:
- pass
-
- def get_continuation(continuation, session_token, replies=False):
- query = {
- 'pbj': 1,
- 'ctoken': continuation,
- }
- if replies:
- query['action_get_comment_replies'] = 1
- else:
- query['action_get_comments'] = 1
-
- while True:
- content, handle = self._download_webpage_handle(
- 'https://www.youtube.com/comment_service_ajax',
- video_id,
- note=False,
- expected_status=[413],
- data=urlencode_postdata({
- 'session_token': session_token
- }),
- query=query,
- headers={
- 'Accept': '*/*',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0',
- 'X-YouTube-Client-Name': '1',
- 'X-YouTube-Client-Version': '2.20201202.06.01'
- }
- )
-
- response_code = handle.getcode()
- if (response_code == 200):
- return self._parse_json(content, video_id)
- if (response_code == 413):
- return None
- raise ExtractorError('Unexpected HTTP error code: %s' % response_code)
-
- first_continuation = True
- chain_msg = ''
- self.to_screen('Downloading comments')
- while continuations:
- continuation = continuations.pop()
- comment_response = get_continuation(continuation, comment_xsrf)
- if not comment_response:
- continue
- if list(search_dict(comment_response, 'externalErrorMessage')):
- raise ExtractorError('Error returned from server: ' + next(search_dict(comment_response, 'externalErrorMessage')))
-
- if 'continuationContents' not in comment_response['response']:
- # Something is wrong here. Youtube won't accept this continuation token for some reason and responds with a user satisfaction dialog (error?)
- continue
- # not sure if this actually helps
- if 'xsrf_token' in comment_response:
- comment_xsrf = comment_response['xsrf_token']
-
- item_section = comment_response['response']['continuationContents']['itemSectionContinuation']
- if first_continuation:
- expected_video_comment_count = int(item_section['header']['commentsHeaderRenderer']['countText']['runs'][0]['text'].replace(' Comments', '').replace('1 Comment', '1').replace(',', ''))
- first_continuation = False
- if 'contents' not in item_section:
- # continuation returned no comments?
- # set an empty array as to not break the for loop
- item_section['contents'] = []
-
- for meta_comment in item_section['contents']:
- comment = meta_comment['commentThreadRenderer']['comment']['commentRenderer']
- video_comments.append({
- 'id': comment['commentId'],
- 'text': ''.join([c['text'] for c in try_get(comment, lambda x: x['contentText']['runs'], list) or []]),
- 'time_text': ''.join([c['text'] for c in comment['publishedTimeText']['runs']]),
- 'author': comment.get('authorText', {}).get('simpleText', ''),
- 'votes': comment.get('voteCount', {}).get('simpleText', '0'),
- 'author_thumbnail': comment['authorThumbnail']['thumbnails'][-1]['url'],
- 'parent': 'root'
- })
- if 'replies' not in meta_comment['commentThreadRenderer']:
- continue
-
- reply_continuations = [rcn['nextContinuationData']['continuation'] for rcn in meta_comment['commentThreadRenderer']['replies']['commentRepliesRenderer']['continuations']]
- while reply_continuations:
- time.sleep(1)
- continuation = reply_continuations.pop()
- replies_data = get_continuation(continuation, comment_xsrf, True)
- if not replies_data or 'continuationContents' not in replies_data[1]['response']:
- continue
-
- if self._downloader.params.get('verbose', False):
- chain_msg = ' (chain %s)' % comment['commentId']
- self.to_screen('Comments downloaded: %d of ~%d%s' % (len(video_comments), expected_video_comment_count, chain_msg))
- reply_comment_meta = replies_data[1]['response']['continuationContents']['commentRepliesContinuation']
- for reply_meta in reply_comment_meta.get('contents', {}):
- reply_comment = reply_meta['commentRenderer']
- video_comments.append({
- 'id': reply_comment['commentId'],
- 'text': ''.join([c['text'] for c in reply_comment['contentText']['runs']]),
- 'time_text': ''.join([c['text'] for c in reply_comment['publishedTimeText']['runs']]),
- 'author': reply_comment.get('authorText', {}).get('simpleText', ''),
- 'votes': reply_comment.get('voteCount', {}).get('simpleText', '0'),
- 'author_thumbnail': reply_comment['authorThumbnail']['thumbnails'][-1]['url'],
- 'parent': comment['commentId']
- })
- if 'continuations' not in reply_comment_meta or len(reply_comment_meta['continuations']) == 0:
- continue
- reply_continuations += [rcn['nextContinuationData']['continuation'] for rcn in reply_comment_meta['continuations']]
-
- self.to_screen('Comments downloaded: %d of ~%d' % (len(video_comments), expected_video_comment_count))
- if 'continuations' in item_section:
- continuations += [ncd['nextContinuationData']['continuation'] for ncd in item_section['continuations']]
- time.sleep(1)
-
- self.to_screen('Total comments downloaded: %d of ~%d' % (len(video_comments), expected_video_comment_count))
- return {
- 'comments': video_comments,
- 'comment_count': expected_video_comment_count
- }
-
if get_comments:
- info['__post_extractor'] = extract_comments
+ info['__post_extractor'] = lambda: self._extract_comments(ytcfg, video_id, contents, webpage, xsrf_token)
self.mark_watched(video_id, player_response)
channel_url, 'channel id')
@staticmethod
- def _extract_grid_item_renderer(item):
- for item_kind in ('Playlist', 'Video', 'Channel'):
- renderer = item.get('grid%sRenderer' % item_kind)
- if renderer:
- return renderer
+ def _extract_basic_item_renderer(item):
+ # Modified from _extract_grid_item_renderer
+ known_renderers = (
+ 'playlistRenderer', 'videoRenderer', 'channelRenderer',
+ 'gridPlaylistRenderer', 'gridVideoRenderer', 'gridChannelRenderer'
+ )
+ for key, renderer in item.items():
+ if key not in known_renderers:
+ continue
+ return renderer
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
- renderer = self._extract_grid_item_renderer(item)
+ renderer = self._extract_basic_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = try_get(
content = shelf_renderer.get('content')
if not isinstance(content, dict):
return
- renderer = content.get('gridRenderer')
+ renderer = content.get('gridRenderer') or content.get('expandedShelfContentsRenderer')
if renderer:
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
continue
yield self._extract_video(renderer)
- r""" # Not needed in the new implementation
- def _itemSection_entries(self, item_sect_renderer):
- for content in item_sect_renderer['contents']:
- if not isinstance(content, dict):
- continue
- renderer = content.get('videoRenderer', {})
- if not isinstance(renderer, dict):
- continue
- video_id = renderer.get('videoId')
- if not video_id:
- continue
- yield self._extract_video(renderer)
- """
-
def _rich_entries(self, rich_grid_renderer):
renderer = try_get(
rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
ctp = continuation_ep.get('clickTrackingParams')
return YoutubeTabIE._build_continuation_query(continuation, ctp)
- def _entries(self, tab, identity_token, item_id):
+ def _entries(self, tab, item_id, identity_token, account_syncid):
def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds
contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
if identity_token:
headers['x-youtube-identity-token'] = identity_token
+ if account_syncid:
+ headers['X-Goog-PageId'] = account_syncid
+ headers['X-Goog-AuthUser'] = 0
+
for page_num in itertools.count(1):
if not continuation:
break
else:
# Youtube sometimes sends incomplete data
# See: https://github.com/ytdl-org/youtube-dl/issues/28194
- if response.get('continuationContents') or response.get('onResponseReceivedActions'):
+ if dict_get(response,
+ ('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints')):
break
- last_error = 'Incomplete data recieved'
+
+ # Youtube may send alerts if there was an issue with the continuation page
+ self._extract_alerts(response, expected=False)
+
+ last_error = 'Incomplete data received'
if count >= retries:
self._downloader.report_error(last_error)
'gridPlaylistRenderer': (self._grid_entries, 'items'),
'gridVideoRenderer': (self._grid_entries, 'items'),
'playlistVideoRenderer': (self._playlist_entries, 'contents'),
- 'itemSectionRenderer': (self._playlist_entries, 'contents'),
+ 'itemSectionRenderer': (extract_entries, 'contents'), # for feeds
'richItemRenderer': (extract_entries, 'contents'), # for hashtag
+ 'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
}
+ on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
continuation_items = try_get(
- response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list)
+ on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
video_items_renderer = None
for key, value in continuation_item.items():
try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
return {k: v for k, v in uploader.items() if v is not None}
- def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token):
+ def _extract_from_tabs(self, item_id, webpage, data, tabs):
playlist_id = title = description = channel_url = channel_name = channel_id = None
thumbnails_list = tags = []
'channel_id': metadata['uploader_id'],
'channel_url': metadata['uploader_url']})
return self.playlist_result(
- self._entries(selected_tab, identity_token, playlist_id),
+ self._entries(
+ selected_tab, playlist_id,
+ self._extract_identity_token(webpage, item_id),
+ self._extract_account_syncid(data)),
**metadata)
+ def _extract_mix_playlist(self, playlist, playlist_id):
+ first_id = last_id = None
+ for page_num in itertools.count(1):
+ videos = list(self._playlist_entries(playlist))
+ if not videos:
+ return
+ start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1
+ if start >= len(videos):
+ return
+ for video in videos[start:]:
+ if video['id'] == first_id:
+ self.to_screen('First video %s found again; Assuming end of Mix' % first_id)
+ return
+ yield video
+ first_id = first_id or videos[0]['id']
+ last_id = videos[-1]['id']
+
+ _, data = self._extract_webpage(
+ 'https://www.youtube.com/watch?list=%s&v=%s' % (playlist_id, last_id),
+ '%s page %d' % (playlist_id, page_num))
+ playlist = try_get(
+ data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
+
def _extract_from_playlist(self, item_id, url, data, playlist):
title = playlist.get('title') or try_get(
data, lambda x: x['titleText']['simpleText'], compat_str)
playlist_id = playlist.get('playlistId') or item_id
- # Inline playlist rendition continuation does not always work
- # at Youtube side, so delegating regular tab-based playlist URL
- # processing whenever possible.
+
+ # Delegating everything except mix playlists to regular tab-based playlist URL
playlist_url = urljoin(url, try_get(
playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
return self.url_result(
playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
+
return self.playlist_result(
- self._playlist_entries(playlist), playlist_id=playlist_id,
- playlist_title=title)
+ self._extract_mix_playlist(playlist, playlist_id),
+ playlist_id=playlist_id, playlist_title=title)
- @staticmethod
- def _extract_alerts(data):
- for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
- if not isinstance(alert_dict, dict):
- continue
- for renderer in alert_dict:
- alert = alert_dict[renderer]
- alert_type = alert.get('type')
- if not alert_type:
+ def _extract_alerts(self, data, expected=False):
+
+ def _real_extract_alerts():
+ for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
+ if not isinstance(alert_dict, dict):
continue
- message = try_get(alert, lambda x: x['text']['simpleText'], compat_str)
- if message:
- yield alert_type, message
- for run in try_get(alert, lambda x: x['text']['runs'], list) or []:
- message = try_get(run, lambda x: x['text'], compat_str)
+ for alert in alert_dict.values():
+ alert_type = alert.get('type')
+ if not alert_type:
+ continue
+ message = try_get(alert, lambda x: x['text']['simpleText'], compat_str)
if message:
yield alert_type, message
+ for run in try_get(alert, lambda x: x['text']['runs'], list) or []:
+ message = try_get(run, lambda x: x['text'], compat_str)
+ if message:
+ yield alert_type, message
+
+ err_msg = None
+ for alert_type, alert_message in _real_extract_alerts():
+ if alert_type.lower() == 'error':
+ if err_msg:
+ self._downloader.report_warning('YouTube said: %s - %s' % ('ERROR', err_msg))
+ err_msg = alert_message
+ else:
+ self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
- def _extract_identity_token(self, webpage, item_id):
- ytcfg = self._extract_ytcfg(item_id, webpage)
- if ytcfg:
- token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
- if token:
- return token
- return self._search_regex(
- r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
- 'identity token', default=None)
+ if err_msg:
+ raise ExtractorError('YouTube said: %s' % err_msg, expected=expected)
+
+ def _extract_webpage(self, url, item_id):
+ retries = self._downloader.params.get('extractor_retries', 3)
+ count = -1
+ last_error = 'Incomplete yt initial data recieved'
+ while count < retries:
+ count += 1
+ # Sometimes youtube returns a webpage with incomplete ytInitialData
+ # See: https://github.com/yt-dlp/yt-dlp/issues/116
+ if count:
+ self.report_warning('%s. Retrying ...' % last_error)
+ webpage = self._download_webpage(
+ url, item_id,
+ 'Downloading webpage%s' % (' (retry #%d)' % count if count else ''))
+ data = self._extract_yt_initial_data(item_id, webpage)
+ self._extract_alerts(data, expected=True)
+ if data.get('contents') or data.get('currentVideoEndpoint'):
+ break
+ if count >= retries:
+ self._downloader.report_error(last_error)
+ return webpage, data
def _real_extract(self, url):
item_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
- is_home = re.match(r'(?P<pre>%s)(?P<post>/?(?![^#?]).*$)' % self._VALID_URL, url)
- if is_home is not None and is_home.group('not_channel') is None and item_id != 'feed':
+
+ # This is not matched in a channel page with a tab selected
+ mobj = re.match(r'(?P<pre>%s)(?P<post>/?(?![^#?]).*$)' % self._VALID_URL, url)
+ mobj = mobj.groupdict() if mobj else {}
+ if mobj and not mobj.get('not_channel'):
self._downloader.report_warning(
'A channel/user page was given. All the channel\'s videos will be downloaded. '
'To download only the videos in the home page, add a "/featured" to the URL')
- url = '%s/videos%s' % (is_home.group('pre'), is_home.group('post') or '')
+ url = '%s/videos%s' % (mobj.get('pre'), mobj.get('post') or '')
# Handle both video/playlist URLs
qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
video_id = qs.get('v', [None])[0]
playlist_id = qs.get('list', [None])[0]
- if is_home is not None and is_home.group('not_channel') is not None and is_home.group('not_channel').startswith('watch') and not video_id:
- if playlist_id:
- self._downloader.report_warning('%s is not a valid Youtube URL. Trying to download playlist %s' % (url, playlist_id))
- url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
- # return self.url_result(playlist_id, ie=YoutubePlaylistIE.ie_key())
- else:
+ if not video_id and (mobj.get('not_channel') or '').startswith('watch'):
+ if not playlist_id:
+ # If there is neither video or playlist ids,
+ # youtube redirects to home page, which is undesirable
raise ExtractorError('Unable to recognize tab page')
+ self._downloader.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
+ url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
+
if video_id and playlist_id:
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
- self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
+ self.to_screen('Downloading playlist %s; add --no-playlist to just download video %s' % (playlist_id, video_id))
- retries = self._downloader.params.get('extractor_retries', 3)
- count = -1
- last_error = 'Incomplete yt initial data recieved'
- while count < retries:
- count += 1
- # Sometimes youtube returns a webpage with incomplete ytInitialData
- # See: https://github.com/yt-dlp/yt-dlp/issues/116
- if count:
- self.report_warning('%s. Retrying ...' % last_error)
- webpage = self._download_webpage(
- url, item_id,
- 'Downloading webpage%s' % ' (retry #%d)' % count if count else '')
- identity_token = self._extract_identity_token(webpage, item_id)
- data = self._extract_yt_initial_data(item_id, webpage)
- err_msg = None
- for alert_type, alert_message in self._extract_alerts(data):
- if alert_type.lower() == 'error':
- if err_msg:
- self._downloader.report_warning('YouTube said: %s - %s' % ('ERROR', err_msg))
- err_msg = alert_message
- else:
- self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
- if err_msg:
- raise ExtractorError('YouTube said: %s' % err_msg, expected=True)
- if data.get('contents') or data.get('currentVideoEndpoint'):
- break
- if count >= retries:
- self._downloader.report_error(last_error)
+ webpage, data = self._extract_webpage(url, item_id)
tabs = try_get(
data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
if tabs:
- return self._extract_from_tabs(item_id, webpage, data, tabs, identity_token)
+ return self._extract_from_tabs(item_id, webpage, data, tabs)
+
playlist = try_get(
data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
if playlist:
return self._extract_from_playlist(item_id, url, data, playlist)
- # Fallback to video extraction if no playlist alike page is recognized.
- # First check for the current video then try the v attribute of URL query.
+
video_id = try_get(
data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
compat_str) or video_id
if video_id:
+ self._downloader.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
- # Failed to recognize
+
raise ExtractorError('Unable to recognize tab page')
Subclasses must define the _FEED_NAME property.
"""
_LOGIN_REQUIRED = True
- # _MAX_PAGES = 5
_TESTS = []
@property