from __future__ import unicode_literals
+import calendar
import hashlib
import itertools
import json
bool_or_none,
clean_html,
dict_get,
+ datetime_from_str,
+ error_to_compat_str,
ExtractorError,
format_field,
float_or_none,
update_url_query,
url_or_none,
urlencode_postdata,
- urljoin,
+ urljoin
)
+def parse_qs(url):
+ return compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+
+
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_RESERVED_NAMES = (
r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|'
- r'movies|results|shared|hashtag|trending|feed|feeds|'
+ r'movies|results|shared|hashtag|trending|feed|feeds|oembed|'
r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
_NETRC_MACHINE = 'youtube'
_PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
- def _ids_to_results(self, ids):
- return [
- self.url_result(vid_id, 'Youtube', video_id=vid_id)
- for vid_id in ids]
-
def _login(self):
"""
Attempt to log in to YouTube.
})
def warn(message):
- self._downloader.report_warning(message)
+ self.report_warning(message)
lookup_req = [
username,
if not self._login():
return
- _YT_WEB_CLIENT_VERSION = '2.20210301.08.00'
- _DEFAULT_API_DATA = {
- 'context': {
- 'client': {
- 'clientName': 'WEB',
- 'clientVersion': _YT_WEB_CLIENT_VERSION,
- }
- },
- }
-
- _DEFAULT_BASIC_API_HEADERS = {
- 'X-YouTube-Client-Name': '1',
- 'X-YouTube-Client-Version': _YT_WEB_CLIENT_VERSION
- }
-
+ _YT_WEB_CLIENT_VERSION = '2.20210407.08.00'
+ _YT_INNERTUBE_API_KEY = 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
_YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
return "SAPISIDHASH %s_%s" % (time_now, sapisidhash)
def _call_api(self, ep, query, video_id, fatal=True, headers=None,
- note='Downloading API JSON', errnote='Unable to download API page'):
- data = self._DEFAULT_API_DATA.copy()
+ note='Downloading API JSON', errnote='Unable to download API page',
+ context=None, api_key=None):
+
+ data = {'context': context} if context else {'context': self._extract_context()}
data.update(query)
- headers = headers or {}
- headers.update({'content-type': 'application/json'})
- auth = self._generate_sapisidhash_header()
- if auth is not None:
- headers.update({'Authorization': auth, 'X-Origin': 'https://www.youtube.com'})
+ real_headers = self._generate_api_headers()
+ real_headers.update({'content-type': 'application/json'})
+ if headers:
+ real_headers.update(headers)
return self._download_json(
'https://www.youtube.com/youtubei/v1/%s' % ep,
video_id=video_id, fatal=fatal, note=note, errnote=errnote,
- data=json.dumps(data).encode('utf8'), headers=headers,
- query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
+ data=json.dumps(data).encode('utf8'), headers=real_headers,
+ query={'key': api_key or self._extract_api_key()})
+
+ def _extract_api_key(self, ytcfg=None):
+ return try_get(ytcfg, lambda x: x['INNERTUBE_API_KEY'], compat_str) or self._YT_INNERTUBE_API_KEY
def _extract_yt_initial_data(self, video_id, webpage):
return self._parse_json(
@staticmethod
def _extract_account_syncid(data):
- """Extract syncId required to download private playlists of secondary channels"""
- sync_ids = (
- try_get(data, lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'], compat_str)
- or '').split("||")
+ """
+ Extract syncId required to download private playlists of secondary channels
+ @param data Either response or ytcfg
+ """
+ sync_ids = (try_get(
+ data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
+ lambda x: x['DATASYNC_ID']), compat_str) or '').split("||")
if len(sync_ids) >= 2 and sync_ids[1]:
# datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
# and just "user_syncid||" for primary channel. We only want the channel_syncid
return sync_ids[0]
+ # ytcfg includes channel_syncid if on secondary channel
+ return data.get('DELEGATED_SESSION_ID')
def _extract_ytcfg(self, video_id, webpage):
+ if not webpage:
+ return {}
return self._parse_json(
self._search_regex(
r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
- default='{}'), video_id, fatal=False)
+ default='{}'), video_id, fatal=False) or {}
+
+ def __extract_client_version(self, ytcfg):
+ return try_get(ytcfg, lambda x: x['INNERTUBE_CLIENT_VERSION'], compat_str) or self._YT_WEB_CLIENT_VERSION
+
+ def _extract_context(self, ytcfg=None):
+ context = try_get(ytcfg, lambda x: x['INNERTUBE_CONTEXT'], dict)
+ if context:
+ return context
+
+ # Recreate the client context (required)
+ client_version = self.__extract_client_version(ytcfg)
+ client_name = try_get(ytcfg, lambda x: x['INNERTUBE_CLIENT_NAME'], compat_str) or 'WEB'
+ context = {
+ 'client': {
+ 'clientName': client_name,
+ 'clientVersion': client_version,
+ }
+ }
+ visitor_data = try_get(ytcfg, lambda x: x['VISITOR_DATA'], compat_str)
+ if visitor_data:
+ context['client']['visitorData'] = visitor_data
+ return context
+
+ def _generate_api_headers(self, ytcfg=None, identity_token=None, account_syncid=None, visitor_data=None):
+ headers = {
+ 'X-YouTube-Client-Name': '1',
+ 'X-YouTube-Client-Version': self.__extract_client_version(ytcfg),
+ }
+ if identity_token:
+ headers['x-youtube-identity-token'] = identity_token
+ if account_syncid:
+ headers['X-Goog-PageId'] = account_syncid
+ headers['X-Goog-AuthUser'] = 0
+ if visitor_data:
+ headers['x-goog-visitor-id'] = visitor_data
+ auth = self._generate_sapisidhash_header()
+ if auth is not None:
+ headers['Authorization'] = auth
+ headers['X-Origin'] = 'https://www.youtube.com'
+ return headers
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
(lambda x: x['ownerText']['runs'][0]['text'],
lambda x: x['shortBylineText']['runs'][0]['text']), compat_str)
return {
- '_type': 'url_transparent',
+ '_type': 'url',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': video_id,
r'(?:(?:www|dev)\.)?invidio\.us',
# Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
r'(?:www\.)?invidious\.pussthecat\.org',
- r'(?:www\.)?invidious\.048596\.xyz',
r'(?:www\.)?invidious\.zee\.li',
- r'(?:www\.)?vid\.puffyan\.us',
r'(?:(?:www|au)\.)?ytprivate\.com',
r'(?:www\.)?invidious\.namazso\.eu',
r'(?:www\.)?invidious\.ethibox\.fr',
- r'(?:www\.)?inv\.skyn3t\.in',
- r'(?:www\.)?invidious\.himiko\.cloud',
r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
r'(?:(?:www|no)\.)?invidiou\.sh',
r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
r'(?:www\.)?invidious\.kabi\.tk',
- r'(?:www\.)?invidious\.13ad\.de',
r'(?:www\.)?invidious\.mastodon\.host',
r'(?:www\.)?invidious\.zapashcanon\.fr',
r'(?:www\.)?invidious\.kavin\.rocks',
+ r'(?:www\.)?invidious\.tinfoil-hat\.net',
+ r'(?:www\.)?invidious\.himiko\.cloud',
+ r'(?:www\.)?invidious\.reallyancient\.tech',
r'(?:www\.)?invidious\.tube',
r'(?:www\.)?invidiou\.site',
r'(?:www\.)?invidious\.site',
r'(?:www\.)?invidious\.xyz',
r'(?:www\.)?invidious\.nixnet\.xyz',
+ r'(?:www\.)?invidious\.048596\.xyz',
r'(?:www\.)?invidious\.drycat\.fr',
+ r'(?:www\.)?inv\.skyn3t\.in',
r'(?:www\.)?tube\.poal\.co',
r'(?:www\.)?tube\.connect\.cafe',
r'(?:www\.)?vid\.wxzm\.sx',
r'(?:www\.)?vid\.mint\.lgbt',
+ r'(?:www\.)?vid\.puffyan\.us',
r'(?:www\.)?yewtu\.be',
r'(?:www\.)?yt\.elukerio\.org',
r'(?:www\.)?yt\.lelux\.fi',
r'(?:www\.)?invidious\.ggc-project\.de',
r'(?:www\.)?yt\.maisputain\.ovh',
+ r'(?:www\.)?ytprivate\.com',
+ r'(?:www\.)?invidious\.13ad\.de',
r'(?:www\.)?invidious\.toot\.koeln',
r'(?:www\.)?invidious\.fdn\.fr',
r'(?:www\.)?watch\.nettohikari\.com',
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
- (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
- (?!.*?\blist=
- (?:
- %(playlist_id)s| # combined list/video URLs are handled by the playlist IE
- WL # WL are handled by the watch later IE
- )
- )
+ (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
$""" % {
- 'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE,
'invidious': '|'.join(_INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
},
'skip': 'This video does not exist.',
},
+ {
+ # Video with incomplete 'yt:stretch=16:'
+ 'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
+ 'only_matching': True,
+ },
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
'only_matching': True,
},
+ {
+ # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
+ 'url': 'cBvYw8_A0vQ',
+ 'info_dict': {
+ 'id': 'cBvYw8_A0vQ',
+ 'ext': 'mp4',
+ 'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
+ 'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
+ 'upload_date': '20201120',
+ 'uploader': 'Walk around Japan',
+ 'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
+ 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ },
]
+ @classmethod
+ def suitable(cls, url):
+ # Hack for lazy extractors until more generic solution is implemented
+ # (see #28780)
+ from .youtube import parse_qs
+ qs = parse_qs(url)
+ if qs.get('list', [None])[0]:
+ return False
+ return super(YoutubeIE, cls).suitable(url)
+
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._code_cache = {}
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
regex), webpage, name, default='{}'), video_id, fatal=False)
+ @staticmethod
+ def parse_time_text(time_text):
+ """
+ Parse the comment time text
+ time_text is in the format 'X units ago (edited)'
+ """
+ time_text_split = time_text.split(' ')
+ if len(time_text_split) >= 3:
+ return datetime_from_str('now-%s%s' % (time_text_split[0], time_text_split[1]), precision='auto')
+
@staticmethod
def _join_text_entries(runs):
text = None
text = self._join_text_entries(comment_text_runs) or ''
comment_time_text = try_get(comment_renderer, lambda x: x['publishedTimeText']['runs']) or []
time_text = self._join_text_entries(comment_time_text)
-
+ timestamp = calendar.timegm(self.parse_time_text(time_text).timetuple())
author = try_get(comment_renderer, lambda x: x['authorText']['simpleText'], compat_str)
author_id = try_get(comment_renderer,
lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
is_liked = try_get(comment_renderer, lambda x: x['isLiked'], bool)
-
return {
'id': comment_id,
'text': text,
- # TODO: This should be parsed to timestamp
+ 'timestamp': timestamp,
'time_text': time_text,
'like_count': votes,
'is_favorited': is_liked,
}
def _comment_entries(self, root_continuation_data, identity_token, account_syncid,
- session_token_list, parent=None, comment_counts=None):
+ ytcfg, session_token_list, parent=None, comment_counts=None):
def extract_thread(parent_renderer):
contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
if comment_replies_renderer:
comment_counts[2] += 1
comment_entries_iter = self._comment_entries(
- comment_replies_renderer, identity_token, account_syncid,
+ comment_replies_renderer, identity_token, account_syncid, ytcfg,
parent=comment.get('id'), session_token_list=session_token_list,
comment_counts=comment_counts)
if not comment_counts:
# comment so far, est. total comments, current comment thread #
comment_counts = [0, 0, 0]
- headers = self._DEFAULT_BASIC_API_HEADERS.copy()
# TODO: Generalize the download code with TabIE
- if identity_token:
- headers['x-youtube-identity-token'] = identity_token
-
- if account_syncid:
- headers['X-Goog-PageId'] = account_syncid
- headers['X-Goog-AuthUser'] = 0
-
+ context = self._extract_context(ytcfg)
+ visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
continuation = YoutubeTabIE._extract_continuation(root_continuation_data) # TODO
first_continuation = False
if parent is None:
for page_num in itertools.count(0):
if not continuation:
break
+ headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data)
retries = self._downloader.params.get('extractor_retries', 3)
count = -1
last_error = None
comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1])
if page_num == 0:
if first_continuation:
- note_prefix = "Downloading initial comment continuation page"
+ note_prefix = 'Downloading initial comment continuation page'
else:
- note_prefix = " Downloading comment reply thread %d %s" % (comment_counts[2], comment_prog_str)
+ note_prefix = ' Downloading comment reply thread %d %s' % (comment_counts[2], comment_prog_str)
else:
- note_prefix = "%sDownloading comment%s page %d %s" % (
- " " if parent else "",
+ note_prefix = '%sDownloading comment%s page %d %s' % (
+ ' ' if parent else '',
' replies' if parent else '',
page_num,
comment_prog_str)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404, 413):
if e.cause.code == 413:
- self.report_warning("Assumed end of comments (received HTTP Error 413)")
+ self.report_warning('Assumed end of comments (received HTTP Error 413)')
return
# Downloading page may result in intermittent 5xx HTTP error
# Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
last_error = 'HTTP Error %s' % e.cause.code
if e.cause.code == 404:
- last_error = last_error + " (this API is probably deprecated)"
+ last_error = last_error + ' (this API is probably deprecated)'
if count < retries:
continue
raise
# YouTube sometimes gives reload: now json if something went wrong (e.g. bad auth)
if browse.get('reload'):
- raise ExtractorError("Invalid or missing params in continuation request", expected=False)
+ raise ExtractorError('Invalid or missing params in continuation request', expected=False)
# TODO: not tested, merged from old extractor
err_msg = browse.get('externalErrorMessage')
# See: https://github.com/ytdl-org/youtube-dl/issues/28194
last_error = 'Incomplete data received'
if count >= retries:
- self._downloader.report_error(last_error)
+ raise ExtractorError(last_error)
if not response:
break
+ visitor_data = try_get(
+ response,
+ lambda x: x['responseContext']['webResponseContextExtensionData']['ytConfigData']['visitorData'],
+ compat_str) or visitor_data
known_continuation_renderers = {
'itemSectionContinuation': extract_thread,
if expected_comment_count:
comment_counts[1] = str_to_int(expected_comment_count)
- self.to_screen("Downloading ~%d comments" % str_to_int(expected_comment_count))
+ self.to_screen('Downloading ~%d comments' % str_to_int(expected_comment_count))
yield comment_counts[1]
# TODO: cli arg.
continuation = YoutubeTabIE._build_continuation_query(
continuation=sort_continuation_renderer.get('continuation'),
ctp=sort_continuation_renderer.get('clickTrackingParams'))
- self.to_screen("Sorting comments by %s" % ('popular' if comment_sort_index == 0 else 'newest'))
+ self.to_screen('Sorting comments by %s' % ('popular' if comment_sort_index == 0 else 'newest'))
break
for entry in known_continuation_renderers[key](continuation_renderer):
renderer,
identity_token=self._extract_identity_token(webpage, item_id=video_id),
account_syncid=self._extract_account_syncid(ytcfg),
+ ytcfg=ytcfg,
session_token_list=[xsrf_token])
for comment in comment_iter:
continue
comments.append(comment)
break
- self.to_screen("Downloaded %d/%d comments" % (len(comments), estimated_total))
+ self.to_screen('Downloaded %d/%d comments' % (len(comments), estimated_total))
return {
'comments': comments,
'comment_count': len(comments),
player_response = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
video_id, 'initial player response')
+
+ ytcfg = self._extract_ytcfg(video_id, webpage)
if not player_response:
player_response = self._call_api(
- 'player', {'videoId': video_id}, video_id)
+ 'player', {'videoId': video_id}, video_id, api_key=self._extract_api_key(ytcfg))
playability_status = player_response.get('playabilityStatus') or {}
if playability_status.get('reason') == 'Sign in to confirm your age':
def get_text(x):
if not x:
return
- return x.get('simpleText') or ''.join([r['text'] for r in x['runs']])
+ text = x.get('simpleText')
+ if text and isinstance(text, compat_str):
+ return text
+ runs = x.get('runs')
+ if not isinstance(runs, list):
+ return
+ return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
search_meta = (
lambda x: self._html_search_meta(x, webpage, default=None)) \
if not formats:
if not self._downloader.params.get('allow_unplayable_formats') and streaming_data.get('licenseInfos'):
- raise ExtractorError(
+ self.raise_no_formats(
'This video is DRM protected.', expected=True)
pemr = try_get(
playability_status,
if not countries:
regions_allowed = search_meta('regionsAllowed')
countries = regions_allowed.split(',') if regions_allowed else None
- self.raise_geo_restricted(
- subreason, countries)
+ self.raise_geo_restricted(subreason, countries, metadata_available=True)
reason += '\n' + subreason
if reason:
- raise ExtractorError(reason, expected=True)
+ self.raise_no_formats(reason, expected=True)
self._sort_formats(formats)
for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
for keyword in keywords:
if keyword.startswith('yt:stretch='):
- w, h = keyword.split('=')[1].split(':')
- w, h = int(w), int(h)
- if w > 0 and h > 0:
- ratio = w / h
- for f in formats:
- if f.get('vcodec') != 'none':
- f['stretched_ratio'] = ratio
+ mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
+ if mobj:
+ # NB: float is intentional for forcing float division
+ w, h = (float(v) for v in mobj.groups())
+ if w > 0 and h > 0:
+ ratio = w / h
+ for f in formats:
+ if f.get('vcodec') != 'none':
+ f['stretched_ratio'] = ratio
+ break
thumbnails = []
for container in (video_details, microformat):
thumbnail_url = thumbnail.get('url')
if not thumbnail_url:
continue
+ # Sometimes youtube gives a wrong thumbnail URL. See:
+ # https://github.com/yt-dlp/yt-dlp/issues/233
+ # https://github.com/ytdl-org/youtube-dl/issues/28023
+ if 'maxresdefault' in thumbnail_url:
+ thumbnail_url = thumbnail_url.split('?')[0]
thumbnails.append({
'height': int_or_none(thumbnail.get('height')),
'url': thumbnail_url,
'yt initial data')
if not initial_data:
initial_data = self._call_api(
- 'next', {'videoId': video_id}, video_id, fatal=False)
+ 'next', {'videoId': video_id}, video_id, fatal=False, api_key=self._extract_api_key(ytcfg))
if not is_live:
try:
is_private = bool_or_none(video_details.get('isPrivate'))
is_unlisted = bool_or_none(microformat.get('isUnlisted'))
is_membersonly = None
+ is_premium = None
if initial_data and is_private is not None:
is_membersonly = False
+ is_premium = False
contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list)
for content in contents or []:
badges = try_get(content, lambda x: x['videoPrimaryInfoRenderer']['badges'], list)
if label.lower() == 'members only':
is_membersonly = True
break
- if is_membersonly:
+ elif label.lower() == 'premium':
+ is_premium = True
+ break
+ if is_membersonly or is_premium:
break
# TODO: Add this for playlists
info['availability'] = self._availability(
is_private=is_private,
- needs_premium=False, # Youtube no longer have premium-only videos?
+ needs_premium=is_premium,
needs_subscription=is_membersonly,
needs_auth=info['age_limit'] >= 18,
is_unlisted=None if is_private is None else is_unlisted)
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
'uploader': 'Игорь Клейнер',
},
+ }, {
+ # playlists, series
+ 'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
+ 'playlist_mincount': 5,
+ 'info_dict': {
+ 'id': 'UCYO_jab_esuFRV4b17AJtAw',
+ 'title': '3Blue1Brown - Playlists',
+ 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
+ },
}, {
# playlists, singlepage
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
},
'playlist_mincount': 21,
+ }, {
+ 'note': 'Playlist with "show unavailable videos" button',
+ 'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q',
+ 'info_dict': {
+ 'title': 'Uploads from Phim Siêu Nhân Nhật Bản',
+ 'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q',
+ 'uploader': 'Phim Siêu Nhân Nhật Bản',
+ 'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
+ },
+ 'playlist_mincount': 1400,
+ 'expected_warnings': [
+ 'YouTube said: INFO - Unavailable videos are hidden',
+ ]
+ }, {
+ 'note': 'Playlist with unavailable videos in a later page',
+ 'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w',
+ 'info_dict': {
+ 'title': 'Uploads from BlankTV',
+ 'id': 'UU8l9frL61Yl5KFOl87nIm2w',
+ 'uploader': 'BlankTV',
+ 'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w',
+ },
+ 'playlist_mincount': 20000,
}, {
# https://github.com/ytdl-org/youtube-dl/issues/21844
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
+ }, {
+ 'url': 'https://www.youtube.com/hashtag/cctv9',
+ 'info_dict': {
+ 'id': 'cctv9',
+ 'title': '#cctv9',
+ },
+ 'playlist_mincount': 350,
+ }, {
+ 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
+ 'only_matching': True,
}]
@classmethod
@staticmethod
def _extract_basic_item_renderer(item):
# Modified from _extract_grid_item_renderer
- known_renderers = (
- 'playlistRenderer', 'videoRenderer', 'channelRenderer',
- 'gridPlaylistRenderer', 'gridVideoRenderer', 'gridChannelRenderer'
+ known_basic_renderers = (
+ 'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer'
)
for key, renderer in item.items():
- if key not in known_renderers:
+ if not isinstance(renderer, dict):
continue
- return renderer
+ elif key in known_basic_renderers:
+ return renderer
+ elif key.startswith('grid') and key.endswith('Renderer'):
+ return renderer
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(renderer, dict):
continue
title = try_get(
- renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
+ renderer, (lambda x: x['title']['runs'][0]['text'],
+ lambda x: x['title']['simpleText']), compat_str)
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
+ continue
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
+ continue
# channel
channel_id = renderer.get('channelId')
if channel_id:
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
+ continue
+ # generic endpoint URL support
+ ep_url = urljoin('https://www.youtube.com/', try_get(
+ renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
+ compat_str))
+ if ep_url:
+ for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
+ if ie.suitable(ep_url):
+ yield self.url_result(
+ ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
+ break
def _shelf_entries_from_content(self, shelf_renderer):
content = shelf_renderer.get('content')
return
# video attachment
video_renderer = try_get(
- post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
- video_id = None
- if video_renderer:
- entry = self._video_entry(video_renderer)
+ post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {}
+ video_id = video_renderer.get('videoId')
+ if video_id:
+ entry = self._extract_video(video_renderer)
if entry:
yield entry
+ # playlist attachment
+ playlist_id = try_get(
+ post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str)
+ if playlist_id:
+ yield self.url_result(
+ 'https://www.youtube.com/playlist?list=%s' % playlist_id,
+ ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
- yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
+ yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
for entry in self._post_thread_entries(renderer):
yield entry
+ r''' # unused
+ def _rich_grid_entries(self, contents):
+ for content in contents:
+ video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
+ if video_renderer:
+ entry = self._video_entry(video_renderer)
+ if entry:
+ yield entry
+ '''
+
@staticmethod
def _build_continuation_query(continuation, ctp=None):
query = {
ctp = continuation_ep.get('clickTrackingParams')
return YoutubeTabIE._build_continuation_query(continuation, ctp)
- def _entries(self, tab, item_id, identity_token, account_syncid):
+ def _entries(self, tab, item_id, identity_token, account_syncid, ytcfg):
def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds
contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
for entry in extract_entries(parent_renderer):
yield entry
continuation = continuation_list[0]
-
- headers = {
- 'x-youtube-client-name': '1',
- 'x-youtube-client-version': '2.20201112.04.01',
- }
- if identity_token:
- headers['x-youtube-identity-token'] = identity_token
-
- if account_syncid:
- headers['X-Goog-PageId'] = account_syncid
- headers['X-Goog-AuthUser'] = 0
+ context = self._extract_context(ytcfg)
+ visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
for page_num in itertools.count(1):
if not continuation:
break
- retries = self._downloader.params.get('extractor_retries', 3)
- count = -1
- last_error = None
- while count < retries:
- count += 1
- if last_error:
- self.report_warning('%s. Retrying ...' % last_error)
- try:
- response = self._call_api(
- ep="browse", fatal=True, headers=headers,
- video_id='%s page %s' % (item_id, page_num),
- query={
- 'continuation': continuation['continuation'],
- 'clickTracking': {'clickTrackingParams': continuation['itct']},
- },
- note='Downloading API JSON%s' % (' (retry #%d)' % count if count else ''))
- except ExtractorError as e:
- if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404):
- # Downloading page may result in intermittent 5xx HTTP error
- # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
- last_error = 'HTTP Error %s' % e.cause.code
- if count < retries:
- continue
- raise
- else:
- # Youtube sometimes sends incomplete data
- # See: https://github.com/ytdl-org/youtube-dl/issues/28194
- if dict_get(response,
- ('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints')):
- break
-
- # Youtube may send alerts if there was an issue with the continuation page
- self._extract_alerts(response, expected=False)
-
- last_error = 'Incomplete data received'
- if count >= retries:
- self._downloader.report_error(last_error)
+ query = {
+ 'continuation': continuation['continuation'],
+ 'clickTracking': {'clickTrackingParams': continuation['itct']}
+ }
+ headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data)
+ response = self._extract_response(
+ item_id='%s page %s' % (item_id, page_num),
+ query=query, headers=headers, ytcfg=ytcfg,
+ check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints'))
if not response:
break
+ visitor_data = try_get(
+ response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data
known_continuation_renderers = {
'playlistVideoListContinuation': self._playlist_entries,
channel_name = renderer.get('title')
channel_url = renderer.get('channelUrl')
channel_id = renderer.get('externalId')
-
- if not renderer:
+ else:
renderer = try_get(
data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
+
if renderer:
title = renderer.get('title')
description = renderer.get('description', '')
'width': int_or_none(t.get('width')),
'height': int_or_none(t.get('height')),
})
-
if playlist_id is None:
playlist_id = item_id
if title is None:
- title = playlist_id
+ title = (
+ try_get(data, lambda x: x['header']['hashtagHeaderRenderer']['hashtag']['simpleText'])
+ or playlist_id)
title += format_field(selected_tab, 'title', ' - %s')
metadata = {
self._entries(
selected_tab, playlist_id,
self._extract_identity_token(webpage, item_id),
- self._extract_account_syncid(data)),
+ self._extract_account_syncid(data),
+ self._extract_ytcfg(item_id, webpage)),
**metadata)
- def _extract_mix_playlist(self, playlist, playlist_id):
+ def _extract_mix_playlist(self, playlist, playlist_id, data, webpage):
first_id = last_id = None
+ ytcfg = self._extract_ytcfg(playlist_id, webpage)
+ headers = self._generate_api_headers(
+ ytcfg, account_syncid=self._extract_account_syncid(data),
+ identity_token=self._extract_identity_token(webpage, item_id=playlist_id),
+ visitor_data=try_get(self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str))
for page_num in itertools.count(1):
videos = list(self._playlist_entries(playlist))
if not videos:
yield video
first_id = first_id or videos[0]['id']
last_id = videos[-1]['id']
-
- _, data = self._extract_webpage(
- 'https://www.youtube.com/watch?list=%s&v=%s' % (playlist_id, last_id),
- '%s page %d' % (playlist_id, page_num))
+ watch_endpoint = try_get(
+ playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
+ query = {
+ 'playlistId': playlist_id,
+ 'videoId': watch_endpoint.get('videoId') or last_id,
+ 'index': watch_endpoint.get('index') or len(videos),
+ 'params': watch_endpoint.get('params') or 'OAE%3D'
+ }
+ response = self._extract_response(
+ item_id='%s page %d' % (playlist_id, page_num),
+ query=query,
+ ep='next',
+ headers=headers,
+ check_get_keys='contents'
+ )
playlist = try_get(
- data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
+ response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
- def _extract_from_playlist(self, item_id, url, data, playlist):
+ def _extract_from_playlist(self, item_id, url, data, playlist, webpage):
title = playlist.get('title') or try_get(
data, lambda x: x['titleText']['simpleText'], compat_str)
playlist_id = playlist.get('playlistId') or item_id
video_title=title)
return self.playlist_result(
- self._extract_mix_playlist(playlist, playlist_id),
+ self._extract_mix_playlist(playlist, playlist_id, data, webpage),
playlist_id=playlist_id, playlist_title=title)
def _extract_alerts(self, data, expected=False):
alert_type = alert.get('type')
if not alert_type:
continue
- message = try_get(alert, lambda x: x['text']['simpleText'], compat_str)
+ message = try_get(alert, lambda x: x['text']['simpleText'], compat_str) or ''
if message:
yield alert_type, message
for run in try_get(alert, lambda x: x['text']['runs'], list) or []:
- message = try_get(run, lambda x: x['text'], compat_str)
- if message:
- yield alert_type, message
+ message += try_get(run, lambda x: x['text'], compat_str)
+ if message:
+ yield alert_type, message
- err_msg = None
+ errors = []
+ warnings = []
for alert_type, alert_message in _real_extract_alerts():
if alert_type.lower() == 'error':
- if err_msg:
- self._downloader.report_warning('YouTube said: %s - %s' % ('ERROR', err_msg))
- err_msg = alert_message
+ errors.append([alert_type, alert_message])
else:
- self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
+ warnings.append([alert_type, alert_message])
+
+ for alert_type, alert_message in (warnings + errors[:-1]):
+ self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
+ if errors:
+ raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
+
+ def _reload_with_unavailable_videos(self, item_id, data, webpage):
+ """
+ Get playlist with unavailable videos if the 'show unavailable videos' button exists.
+ """
+ sidebar_renderer = try_get(
+ data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list)
+ if not sidebar_renderer:
+ return
+ browse_id = params = None
+ for item in sidebar_renderer:
+ if not isinstance(item, dict):
+ continue
+ renderer = item.get('playlistSidebarPrimaryInfoRenderer')
+ menu_renderer = try_get(
+ renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
+ for menu_item in menu_renderer:
+ if not isinstance(menu_item, dict):
+ continue
+ nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
+ text = try_get(
+ nav_item_renderer, lambda x: x['text']['simpleText'], compat_str)
+ if not text or text.lower() != 'show unavailable videos':
+ continue
+ browse_endpoint = try_get(
+ nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
+ browse_id = browse_endpoint.get('browseId')
+ params = browse_endpoint.get('params')
+ break
+
+ ytcfg = self._extract_ytcfg(item_id, webpage)
+ headers = self._generate_api_headers(
+ ytcfg, account_syncid=self._extract_account_syncid(ytcfg),
+ identity_token=self._extract_identity_token(webpage, item_id=item_id),
+ visitor_data=try_get(
+ self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str))
+ query = {
+ 'params': params or 'wgYCCAA=',
+ 'browseId': browse_id or 'VL%s' % item_id
+ }
+ return self._extract_response(
+ item_id=item_id, headers=headers, query=query,
+ check_get_keys='contents', fatal=False,
+ note='Downloading API JSON with unavailable videos')
+
+ def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None,
+ ytcfg=None, check_get_keys=None, ep='browse', fatal=True):
+ response = None
+ last_error = None
+ count = -1
+ retries = self._downloader.params.get('extractor_retries', 3)
+ if check_get_keys is None:
+ check_get_keys = []
+ while count < retries:
+ count += 1
+ if last_error:
+ self.report_warning('%s. Retrying ...' % last_error)
+ try:
+ response = self._call_api(
+ ep=ep, fatal=True, headers=headers,
+ video_id=item_id, query=query,
+ context=self._extract_context(ytcfg),
+ api_key=self._extract_api_key(ytcfg),
+ note='%s%s' % (note, ' (retry #%d)' % count if count else ''))
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404):
+ # Downloading page may result in intermittent 5xx HTTP error
+ # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
+ last_error = 'HTTP Error %s' % e.cause.code
+ if count < retries:
+ continue
+ if fatal:
+ raise
+ else:
+ self.report_warning(error_to_compat_str(e))
+ return
- if err_msg:
- raise ExtractorError('YouTube said: %s' % err_msg, expected=expected)
+ else:
+ # Youtube may send alerts if there was an issue with the continuation page
+ self._extract_alerts(response, expected=False)
+ if not check_get_keys or dict_get(response, check_get_keys):
+ break
+ # Youtube sometimes sends incomplete data
+ # See: https://github.com/ytdl-org/youtube-dl/issues/28194
+ last_error = 'Incomplete data received'
+ if count >= retries:
+ if fatal:
+ raise ExtractorError(last_error)
+ else:
+ self.report_warning(last_error)
+ return
+ return response
def _extract_webpage(self, url, item_id):
retries = self._downloader.params.get('extractor_retries', 3)
if data.get('contents') or data.get('currentVideoEndpoint'):
break
if count >= retries:
- self._downloader.report_error(last_error)
+ raise ExtractorError(last_error)
return webpage, data
def _real_extract(self, url):
item_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
+ compat_opts = self._downloader.params.get('compat_opts', [])
# This is not matched in a channel page with a tab selected
mobj = re.match(r'(?P<pre>%s)(?P<post>/?(?![^#?]).*$)' % self._VALID_URL, url)
mobj = mobj.groupdict() if mobj else {}
- if mobj and not mobj.get('not_channel'):
- self._downloader.report_warning(
+ if mobj and not mobj.get('not_channel') and 'no-youtube-channel-redirect' not in compat_opts:
+ self.report_warning(
'A channel/user page was given. All the channel\'s videos will be downloaded. '
'To download only the videos in the home page, add a "/featured" to the URL')
url = '%s/videos%s' % (mobj.get('pre'), mobj.get('post') or '')
# Handle both video/playlist URLs
- qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+ qs = parse_qs(url)
video_id = qs.get('v', [None])[0]
playlist_id = qs.get('list', [None])[0]
# If there is neither video or playlist ids,
# youtube redirects to home page, which is undesirable
raise ExtractorError('Unable to recognize tab page')
- self._downloader.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
+ self.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
if video_id and playlist_id:
webpage, data = self._extract_webpage(url, item_id)
+ # YouTube sometimes provides a button to reload playlist with unavailable videos.
+ if 'no-youtube-unavailable-videos' not in compat_opts:
+ data = self._reload_with_unavailable_videos(item_id, data, webpage) or data
+
tabs = try_get(
data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
if tabs:
playlist = try_get(
data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
if playlist:
- return self._extract_from_playlist(item_id, url, data, playlist)
+ return self._extract_from_playlist(item_id, url, data, playlist, webpage)
video_id = try_get(
data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
compat_str) or video_id
if video_id:
- self._downloader.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
+ self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
raise ExtractorError('Unable to recognize tab page')
@classmethod
def suitable(cls, url):
- return False if YoutubeTabIE.suitable(url) else super(
- YoutubePlaylistIE, cls).suitable(url)
+ if YoutubeTabIE.suitable(url):
+ return False
+ # Hack for lazy extractors until more generic solution is implemented
+ # (see #28780)
+ from .youtube import parse_qs
+ qs = parse_qs(url)
+ if qs.get('v', [None])[0]:
+ return False
+ return super(YoutubePlaylistIE, cls).suitable(url)
def _real_extract(self, url):
playlist_id = self._match_id(url)
- qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+ qs = parse_qs(url)
if not qs:
qs = {'list': playlist_id}
return self.url_result(
ie=YoutubeTabIE.ie_key())
-class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
+class YoutubeSearchIE(SearchInfoExtractor, YoutubeTabIE):
IE_DESC = 'YouTube.com searches, "ytsearch" keyword'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
data['params'] = self._SEARCH_PARAMS
total = 0
for page_num in itertools.count(1):
- search = self._call_api(
- ep='search', video_id='query "%s"' % query, fatal=False,
- note='Downloading page %s' % page_num, query=data)
+ search = self._extract_response(
+ item_id='query "%s" page %s' % (query, page_num), ep='search', query=data,
+ check_get_keys=('contents', 'onResponseReceivedCommands')
+ )
if not search:
break
slr_contents = try_get(