from __future__ import unicode_literals
+import calendar
+import hashlib
import itertools
import json
import os.path
from ..compat import (
compat_chr,
compat_HTTPError,
- compat_kwargs,
compat_parse_qs,
compat_str,
compat_urllib_parse_unquote_plus,
)
from ..jsinterp import JSInterpreter
from ..utils import (
+ bool_or_none,
clean_html,
+ dict_get,
+ datetime_from_str,
+ error_to_compat_str,
ExtractorError,
format_field,
float_or_none,
update_url_query,
url_or_none,
urlencode_postdata,
- urljoin,
+ urljoin
)
+def parse_qs(url):
+ return compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+
+
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
_RESERVED_NAMES = (
- r'embed|e|watch_popup|channel|c|user|playlist|watch|w|v|movies|results|shared|hashtag|'
- r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout|'
- r'feed/(?:watch_later|history|subscriptions|library|trending|recommended)')
+ r'channel|c|user|browse|playlist|watch|w|v|embed|e|watch_popup|'
+ r'movies|results|shared|hashtag|trending|feed|feeds|oembed|get_video_info|'
+ r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
- def _ids_to_results(self, ids):
- return [
- self.url_result(vid_id, 'Youtube', video_id=vid_id)
- for vid_id in ids]
-
def _login(self):
"""
Attempt to log in to YouTube.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
+
+ def warn(message):
+ self.report_warning(message)
+
+ # username+password login is broken
+ if self._LOGIN_REQUIRED and self.get_param('cookiefile') is None:
+ self.raise_login_required(
+ 'Login details are needed to download this content', method='cookies')
username, password = self._get_login_info()
+ if username:
+ warn('Logging in using username and password is broken. %s' % self._LOGIN_HINTS['cookies'])
+ return
+ # Everything below this is broken!
+
# No authentication to be performed
if username is None:
- if self._LOGIN_REQUIRED and self._downloader.params.get('cookiefile') is None:
+ if self._LOGIN_REQUIRED and self.get_param('cookiefile') is None:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
- # if self._downloader.params.get('cookiefile'): # TODO remove 'and False' later - too many people using outdated cookies and open issues, remind them.
+ # if self.get_param('cookiefile'): # TODO remove 'and False' later - too many people using outdated cookies and open issues, remind them.
# self.to_screen('[Cookies] Reminder - Make sure to always use up to date cookies!')
return True
'Google-Accounts-XSRF': 1,
})
- def warn(message):
- self._downloader.report_warning(message)
-
lookup_req = [
username,
None, [], None, 'US', None, None, 2, False, True,
return True
- def _download_webpage_handle(self, *args, **kwargs):
- query = kwargs.get('query', {}).copy()
- kwargs['query'] = query
- return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
- *args, **compat_kwargs(kwargs))
+ def _initialize_consent(self):
+ cookies = self._get_cookies('https://www.youtube.com/')
+ if cookies.get('__Secure-3PSID'):
+ return
+ consent_id = None
+ consent = cookies.get('CONSENT')
+ if consent:
+ if 'YES' in consent.value:
+ return
+ consent_id = self._search_regex(
+ r'PENDING\+(\d+)', consent.value, 'consent', default=None)
+ if not consent_id:
+ consent_id = random.randint(100, 999)
+ self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
def _real_initialize(self):
+ self._initialize_consent()
if self._downloader is None:
return
if not self._login():
return
- _DEFAULT_API_DATA = {
- 'context': {
- 'client': {
- 'clientName': 'WEB',
- 'clientVersion': '2.20201021.03.00',
- }
- },
- }
-
+ _YT_WEB_CLIENT_VERSION = '2.20210407.08.00'
+ _YT_INNERTUBE_API_KEY = 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
_YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
- def _call_api(self, ep, query, video_id, fatal=True):
- data = self._DEFAULT_API_DATA.copy()
- data.update(query)
+ def _generate_sapisidhash_header(self):
+ sapisid_cookie = self._get_cookies('https://www.youtube.com').get('SAPISID')
+ if sapisid_cookie is None:
+ return
+ time_now = round(time.time())
+ sapisidhash = hashlib.sha1((str(time_now) + " " + sapisid_cookie.value + " " + "https://www.youtube.com").encode("utf-8")).hexdigest()
+ return "SAPISIDHASH %s_%s" % (time_now, sapisidhash)
+
+ def _call_api(self, ep, query, video_id, fatal=True, headers=None,
+ note='Downloading API JSON', errnote='Unable to download API page',
+ context=None, api_key=None):
+ data = {'context': context} if context else {'context': self._extract_context()}
+ data.update(query)
+ real_headers = self._generate_api_headers()
+ real_headers.update({'content-type': 'application/json'})
+ if headers:
+ real_headers.update(headers)
return self._download_json(
- 'https://www.youtube.com/youtubei/v1/%s' % ep, video_id=video_id,
- note='Downloading API JSON', errnote='Unable to download API page',
- data=json.dumps(data).encode('utf8'), fatal=fatal,
- headers={'content-type': 'application/json'},
- query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
+ 'https://www.youtube.com/youtubei/v1/%s' % ep,
+ video_id=video_id, fatal=fatal, note=note, errnote=errnote,
+ data=json.dumps(data).encode('utf8'), headers=real_headers,
+ query={'key': api_key or self._extract_api_key()})
+
+ def _extract_api_key(self, ytcfg=None):
+ return try_get(ytcfg, lambda x: x['INNERTUBE_API_KEY'], compat_str) or self._YT_INNERTUBE_API_KEY
def _extract_yt_initial_data(self, video_id, webpage):
return self._parse_json(
self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
video_id)
+ def _extract_identity_token(self, webpage, item_id):
+ ytcfg = self._extract_ytcfg(item_id, webpage)
+ if ytcfg:
+ token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
+ if token:
+ return token
+ return self._search_regex(
+ r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
+ 'identity token', default=None)
+
+ @staticmethod
+ def _extract_account_syncid(data):
+ """
+ Extract syncId required to download private playlists of secondary channels
+ @param data Either response or ytcfg
+ """
+ sync_ids = (try_get(
+ data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
+ lambda x: x['DATASYNC_ID']), compat_str) or '').split("||")
+ if len(sync_ids) >= 2 and sync_ids[1]:
+ # datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
+ # and just "user_syncid||" for primary channel. We only want the channel_syncid
+ return sync_ids[0]
+ # ytcfg includes channel_syncid if on secondary channel
+ return data.get('DELEGATED_SESSION_ID')
+
def _extract_ytcfg(self, video_id, webpage):
+ if not webpage:
+ return {}
return self._parse_json(
self._search_regex(
r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
- default='{}'), video_id, fatal=False)
+ default='{}'), video_id, fatal=False) or {}
+
+ def __extract_client_version(self, ytcfg):
+ return try_get(ytcfg, lambda x: x['INNERTUBE_CLIENT_VERSION'], compat_str) or self._YT_WEB_CLIENT_VERSION
+
+ def _extract_context(self, ytcfg=None):
+ context = try_get(ytcfg, lambda x: x['INNERTUBE_CONTEXT'], dict)
+ if context:
+ return context
+
+ # Recreate the client context (required)
+ client_version = self.__extract_client_version(ytcfg)
+ client_name = try_get(ytcfg, lambda x: x['INNERTUBE_CLIENT_NAME'], compat_str) or 'WEB'
+ context = {
+ 'client': {
+ 'clientName': client_name,
+ 'clientVersion': client_version,
+ }
+ }
+ visitor_data = try_get(ytcfg, lambda x: x['VISITOR_DATA'], compat_str)
+ if visitor_data:
+ context['client']['visitorData'] = visitor_data
+ return context
+
+ def _generate_api_headers(self, ytcfg=None, identity_token=None, account_syncid=None, visitor_data=None):
+ headers = {
+ 'X-YouTube-Client-Name': '1',
+ 'X-YouTube-Client-Version': self.__extract_client_version(ytcfg),
+ }
+ if identity_token:
+ headers['x-youtube-identity-token'] = identity_token
+ if account_syncid:
+ headers['X-Goog-PageId'] = account_syncid
+ headers['X-Goog-AuthUser'] = 0
+ if visitor_data:
+ headers['x-goog-visitor-id'] = visitor_data
+ auth = self._generate_sapisidhash_header()
+ if auth is not None:
+ headers['Authorization'] = auth
+ headers['X-Origin'] = 'https://www.youtube.com'
+ return headers
+
+ @staticmethod
+ def is_music_url(url):
+ return re.match(r'https?://music\.youtube\.com/', url) is not None
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
(lambda x: x['ownerText']['runs'][0]['text'],
lambda x: x['shortBylineText']['runs'][0]['text']), compat_str)
return {
- '_type': 'url_transparent',
+ '_type': 'url',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
'url': video_id,
r'(?:(?:www|dev)\.)?invidio\.us',
# Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
r'(?:www\.)?invidious\.pussthecat\.org',
- r'(?:www\.)?invidious\.048596\.xyz',
r'(?:www\.)?invidious\.zee\.li',
- r'(?:www\.)?vid\.puffyan\.us',
r'(?:(?:www|au)\.)?ytprivate\.com',
r'(?:www\.)?invidious\.namazso\.eu',
r'(?:www\.)?invidious\.ethibox\.fr',
- r'(?:www\.)?inv\.skyn3t\.in',
- r'(?:www\.)?invidious\.himiko\.cloud',
r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
r'(?:(?:www|no)\.)?invidiou\.sh',
r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
r'(?:www\.)?invidious\.kabi\.tk',
- r'(?:www\.)?invidious\.13ad\.de',
r'(?:www\.)?invidious\.mastodon\.host',
r'(?:www\.)?invidious\.zapashcanon\.fr',
r'(?:www\.)?invidious\.kavin\.rocks',
+ r'(?:www\.)?invidious\.tinfoil-hat\.net',
+ r'(?:www\.)?invidious\.himiko\.cloud',
+ r'(?:www\.)?invidious\.reallyancient\.tech',
r'(?:www\.)?invidious\.tube',
r'(?:www\.)?invidiou\.site',
r'(?:www\.)?invidious\.site',
r'(?:www\.)?invidious\.xyz',
r'(?:www\.)?invidious\.nixnet\.xyz',
+ r'(?:www\.)?invidious\.048596\.xyz',
r'(?:www\.)?invidious\.drycat\.fr',
+ r'(?:www\.)?inv\.skyn3t\.in',
r'(?:www\.)?tube\.poal\.co',
r'(?:www\.)?tube\.connect\.cafe',
r'(?:www\.)?vid\.wxzm\.sx',
r'(?:www\.)?vid\.mint\.lgbt',
+ r'(?:www\.)?vid\.puffyan\.us',
r'(?:www\.)?yewtu\.be',
r'(?:www\.)?yt\.elukerio\.org',
r'(?:www\.)?yt\.lelux\.fi',
r'(?:www\.)?invidious\.ggc-project\.de',
r'(?:www\.)?yt\.maisputain\.ovh',
+ r'(?:www\.)?ytprivate\.com',
+ r'(?:www\.)?invidious\.13ad\.de',
r'(?:www\.)?invidious\.toot\.koeln',
r'(?:www\.)?invidious\.fdn\.fr',
r'(?:www\.)?watch\.nettohikari\.com',
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
- (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
- (?!.*?\blist=
- (?:
- %(playlist_id)s| # combined list/video URLs are handled by the playlist IE
- WL # WL are handled by the watch later IE
- )
- )
+ (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
- $""" % {
- 'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE,
+ (?:\#|$)""" % {
'invidious': '|'.join(_INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
},
'skip': 'This video does not exist.',
},
+ {
+ # Video with incomplete 'yt:stretch=16:'
+ 'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
+ 'only_matching': True,
+ },
{
# Video licensed under Creative Commons
'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
'only_matching': True,
},
+ {
+ # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
+ 'url': 'cBvYw8_A0vQ',
+ 'info_dict': {
+ 'id': 'cBvYw8_A0vQ',
+ 'ext': 'mp4',
+ 'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
+ 'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
+ 'upload_date': '20201120',
+ 'uploader': 'Walk around Japan',
+ 'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
+ 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
+ }, {
+ # Has multiple audio streams
+ 'url': 'WaOKSUlf4TM',
+ 'only_matching': True
+ }, {
+ # Requires Premium: has format 141 when requested using YTM url
+ 'url': 'https://music.youtube.com/watch?v=XclachpHxis',
+ 'only_matching': True
+ }, {
+ # multiple subtitles with same lang_code
+ 'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug',
+ 'only_matching': True,
+ },
]
+ @classmethod
+ def suitable(cls, url):
+ # Hack for lazy extractors until more generic solution is implemented
+ # (see #28780)
+ from .youtube import parse_qs
+ qs = parse_qs(url)
+ if qs.get('list', [None])[0]:
+ return False
+ return super(YoutubeIE, cls).suitable(url)
+
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._code_cache = {}
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
- if self._downloader.params.get('youtube_print_sig_code'):
+ if self.get_param('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
(r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
regex), webpage, name, default='{}'), video_id, fatal=False)
+ @staticmethod
+ def parse_time_text(time_text):
+ """
+ Parse the comment time text
+ time_text is in the format 'X units ago (edited)'
+ """
+ time_text_split = time_text.split(' ')
+ if len(time_text_split) >= 3:
+ return datetime_from_str('now-%s%s' % (time_text_split[0], time_text_split[1]), precision='auto')
+
+ @staticmethod
+ def _join_text_entries(runs):
+ text = None
+ for run in runs:
+ if not isinstance(run, dict):
+ continue
+ sub_text = try_get(run, lambda x: x['text'], compat_str)
+ if sub_text:
+ if not text:
+ text = sub_text
+ continue
+ text += sub_text
+ return text
+
+ def _extract_comment(self, comment_renderer, parent=None):
+ comment_id = comment_renderer.get('commentId')
+ if not comment_id:
+ return
+ comment_text_runs = try_get(comment_renderer, lambda x: x['contentText']['runs']) or []
+ text = self._join_text_entries(comment_text_runs) or ''
+ comment_time_text = try_get(comment_renderer, lambda x: x['publishedTimeText']['runs']) or []
+ time_text = self._join_text_entries(comment_time_text)
+ timestamp = calendar.timegm(self.parse_time_text(time_text).timetuple())
+ author = try_get(comment_renderer, lambda x: x['authorText']['simpleText'], compat_str)
+ author_id = try_get(comment_renderer,
+ lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
+ votes = str_to_int(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'],
+ lambda x: x['likeCount']), compat_str)) or 0
+ author_thumbnail = try_get(comment_renderer,
+ lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str)
+
+ author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
+ is_liked = try_get(comment_renderer, lambda x: x['isLiked'], bool)
+ return {
+ 'id': comment_id,
+ 'text': text,
+ 'timestamp': timestamp,
+ 'time_text': time_text,
+ 'like_count': votes,
+ 'is_favorited': is_liked,
+ 'author': author,
+ 'author_id': author_id,
+ 'author_thumbnail': author_thumbnail,
+ 'author_is_uploader': author_is_uploader,
+ 'parent': parent or 'root'
+ }
+
+ def _comment_entries(self, root_continuation_data, identity_token, account_syncid,
+ ytcfg, session_token_list, parent=None, comment_counts=None):
+
+ def extract_thread(parent_renderer):
+ contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
+ if not parent:
+ comment_counts[2] = 0
+ for content in contents:
+ comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer'])
+ comment_renderer = try_get(
+ comment_thread_renderer, (lambda x: x['comment']['commentRenderer'], dict)) or try_get(
+ content, (lambda x: x['commentRenderer'], dict))
+
+ if not comment_renderer:
+ continue
+ comment = self._extract_comment(comment_renderer, parent)
+ if not comment:
+ continue
+ comment_counts[0] += 1
+ yield comment
+ # Attempt to get the replies
+ comment_replies_renderer = try_get(
+ comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict)
+
+ if comment_replies_renderer:
+ comment_counts[2] += 1
+ comment_entries_iter = self._comment_entries(
+ comment_replies_renderer, identity_token, account_syncid, ytcfg,
+ parent=comment.get('id'), session_token_list=session_token_list,
+ comment_counts=comment_counts)
+
+ for reply_comment in comment_entries_iter:
+ yield reply_comment
+
+ if not comment_counts:
+ # comment so far, est. total comments, current comment thread #
+ comment_counts = [0, 0, 0]
+
+ # TODO: Generalize the download code with TabIE
+ context = self._extract_context(ytcfg)
+ visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
+ continuation = YoutubeTabIE._extract_continuation(root_continuation_data) # TODO
+ first_continuation = False
+ if parent is None:
+ first_continuation = True
+
+ for page_num in itertools.count(0):
+ if not continuation:
+ break
+ headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data)
+ retries = self.get_param('extractor_retries', 3)
+ count = -1
+ last_error = None
+
+ while count < retries:
+ count += 1
+ if last_error:
+ self.report_warning('%s. Retrying ...' % last_error)
+ try:
+ query = {
+ 'ctoken': continuation['ctoken'],
+ 'pbj': 1,
+ 'type': 'next',
+ }
+ if parent:
+ query['action_get_comment_replies'] = 1
+ else:
+ query['action_get_comments'] = 1
+
+ comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1])
+ if page_num == 0:
+ if first_continuation:
+ note_prefix = 'Downloading initial comment continuation page'
+ else:
+ note_prefix = ' Downloading comment reply thread %d %s' % (comment_counts[2], comment_prog_str)
+ else:
+ note_prefix = '%sDownloading comment%s page %d %s' % (
+ ' ' if parent else '',
+ ' replies' if parent else '',
+ page_num,
+ comment_prog_str)
+
+ browse = self._download_json(
+ 'https://www.youtube.com/comment_service_ajax', None,
+ '%s %s' % (note_prefix, '(retry #%d)' % count if count else ''),
+ headers=headers, query=query,
+ data=urlencode_postdata({
+ 'session_token': session_token_list[0]
+ }))
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404, 413):
+ if e.cause.code == 413:
+ self.report_warning('Assumed end of comments (received HTTP Error 413)')
+ return
+ # Downloading page may result in intermittent 5xx HTTP error
+ # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
+ last_error = 'HTTP Error %s' % e.cause.code
+ if e.cause.code == 404:
+ last_error = last_error + ' (this API is probably deprecated)'
+ if count < retries:
+ continue
+ raise
+ else:
+ session_token = try_get(browse, lambda x: x['xsrf_token'], compat_str)
+ if session_token:
+ session_token_list[0] = session_token
+
+ response = try_get(browse,
+ (lambda x: x['response'],
+ lambda x: x[1]['response'])) or {}
+
+ if response.get('continuationContents'):
+ break
+
+ # YouTube sometimes gives reload: now json if something went wrong (e.g. bad auth)
+ if browse.get('reload'):
+ raise ExtractorError('Invalid or missing params in continuation request', expected=False)
+
+ # TODO: not tested, merged from old extractor
+ err_msg = browse.get('externalErrorMessage')
+ if err_msg:
+ raise ExtractorError('YouTube said: %s' % err_msg, expected=False)
+
+ # Youtube sometimes sends incomplete data
+ # See: https://github.com/ytdl-org/youtube-dl/issues/28194
+ last_error = 'Incomplete data received'
+ if count >= retries:
+ raise ExtractorError(last_error)
+
+ if not response:
+ break
+ visitor_data = try_get(
+ response,
+ lambda x: x['responseContext']['webResponseContextExtensionData']['ytConfigData']['visitorData'],
+ compat_str) or visitor_data
+
+ known_continuation_renderers = {
+ 'itemSectionContinuation': extract_thread,
+ 'commentRepliesContinuation': extract_thread
+ }
+
+ # extract next root continuation from the results
+ continuation_contents = try_get(
+ response, lambda x: x['continuationContents'], dict) or {}
+
+ for key, value in continuation_contents.items():
+ if key not in known_continuation_renderers:
+ continue
+ continuation_renderer = value
+
+ if first_continuation:
+ first_continuation = False
+ expected_comment_count = try_get(
+ continuation_renderer,
+ (lambda x: x['header']['commentsHeaderRenderer']['countText']['runs'][0]['text'],
+ lambda x: x['header']['commentsHeaderRenderer']['commentsCount']['runs'][0]['text']),
+ compat_str)
+
+ if expected_comment_count:
+ comment_counts[1] = str_to_int(expected_comment_count)
+ self.to_screen('Downloading ~%d comments' % str_to_int(expected_comment_count))
+ yield comment_counts[1]
+
+ # TODO: cli arg.
+ # 1/True for newest, 0/False for popular (default)
+ comment_sort_index = int(True)
+ sort_continuation_renderer = try_get(
+ continuation_renderer,
+ lambda x: x['header']['commentsHeaderRenderer']['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems']
+ [comment_sort_index]['continuation']['reloadContinuationData'], dict)
+ # If this fails, the initial continuation page
+ # starts off with popular anyways.
+ if sort_continuation_renderer:
+ continuation = YoutubeTabIE._build_continuation_query(
+ continuation=sort_continuation_renderer.get('continuation'),
+ ctp=sort_continuation_renderer.get('clickTrackingParams'))
+ self.to_screen('Sorting comments by %s' % ('popular' if comment_sort_index == 0 else 'newest'))
+ break
+
+ for entry in known_continuation_renderers[key](continuation_renderer):
+ yield entry
+
+ continuation = YoutubeTabIE._extract_continuation(continuation_renderer) # TODO
+ break
+
+ def _extract_comments(self, ytcfg, video_id, contents, webpage, xsrf_token):
+ """Entry for comment extraction"""
+ comments = []
+ known_entry_comment_renderers = (
+ 'itemSectionRenderer',
+ )
+ estimated_total = 0
+ for entry in contents:
+ for key, renderer in entry.items():
+ if key not in known_entry_comment_renderers:
+ continue
+
+ comment_iter = self._comment_entries(
+ renderer,
+ identity_token=self._extract_identity_token(webpage, item_id=video_id),
+ account_syncid=self._extract_account_syncid(ytcfg),
+ ytcfg=ytcfg,
+ session_token_list=[xsrf_token])
+
+ for comment in comment_iter:
+ if isinstance(comment, int):
+ estimated_total = comment
+ continue
+ comments.append(comment)
+ break
+ self.to_screen('Downloaded %d/%d comments' % (len(comments), estimated_total))
+ return {
+ 'comments': comments,
+ 'comment_count': len(comments),
+ }
+
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
+
+ is_music_url = smuggled_data.get('is_music_url') or self.is_music_url(url)
+
base_url = self.http_scheme() + '//www.youtube.com/'
webpage_url = base_url + 'watch?v=' + video_id
webpage = self._download_webpage(
- webpage_url + '&has_verified=1&bpctr=9999999999',
- video_id, fatal=False)
+ webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
+
+ def get_text(x):
+ if not x:
+ return
+ text = x.get('simpleText')
+ if text and isinstance(text, compat_str):
+ return text
+ runs = x.get('runs')
+ if not isinstance(runs, list):
+ return
+ return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
+
+ ytm_streaming_data = {}
+ if is_music_url:
+ # we are forcing to use parse_json because 141 only appeared in get_video_info.
+ # el, c, cver, cplayer field required for 141(aac 256kbps) codec
+ # maybe paramter of youtube music player?
+ ytm_player_response = self._parse_json(try_get(compat_parse_qs(
+ self._download_webpage(
+ base_url + 'get_video_info', video_id,
+ 'Fetching youtube music info webpage',
+ 'unable to download youtube music info webpage', query={
+ 'video_id': video_id,
+ 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
+ 'el': 'detailpage',
+ 'c': 'WEB_REMIX',
+ 'cver': '0.1',
+ 'cplayer': 'UNIPLAYER',
+ 'html5': '1',
+ }, fatal=False)),
+ lambda x: x['player_response'][0],
+ compat_str) or '{}', video_id)
+ ytm_streaming_data = ytm_player_response.get('streamingData') or {}
player_response = None
if webpage:
player_response = self._extract_yt_initial_variable(
webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
video_id, 'initial player response')
+
+ ytcfg = self._extract_ytcfg(video_id, webpage)
if not player_response:
player_response = self._call_api(
- 'player', {'videoId': video_id}, video_id)
+ 'player', {'videoId': video_id}, video_id, api_key=self._extract_api_key(ytcfg))
playability_status = player_response.get('playabilityStatus') or {}
if playability_status.get('reason') == 'Sign in to confirm your age':
'unable to download video info webpage', query={
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
+ 'html5': '1',
}, fatal=False)),
lambda x: x['player_response'][0],
compat_str) or '{}', video_id)
return self.url_result(
trailer_video_id, self.ie_key(), trailer_video_id)
- def get_text(x):
- if not x:
- return
- return x.get('simpleText') or ''.join([r['text'] for r in x['runs']])
-
search_meta = (
lambda x: self._html_search_meta(x, webpage, default=None)) \
if webpage else lambda x: None
video_description = video_details.get('shortDescription')
if not smuggled_data.get('force_singlefeed', False):
- if not self._downloader.params.get('noplaylist'):
+ if not self.get_param('noplaylist'):
multifeed_metadata_list = try_get(
player_response,
lambda x: x['multicamera']['playerLegacyMulticameraRenderer']['metadataList'],
else:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
- formats = []
- itags = []
+ formats, itags, stream_ids = [], [], []
itag_qualities = {}
player_url = None
- q = qualities(['tiny', 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'])
+ q = qualities([
+ 'tiny', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats
+ 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
+ ])
+
streaming_data = player_response.get('streamingData') or {}
streaming_formats = streaming_data.get('formats') or []
streaming_formats.extend(streaming_data.get('adaptiveFormats') or [])
+ streaming_formats.extend(ytm_streaming_data.get('formats') or [])
+ streaming_formats.extend(ytm_streaming_data.get('adaptiveFormats') or [])
+
for fmt in streaming_formats:
if fmt.get('targetDurationSec') or fmt.get('drmFamilies'):
continue
itag = str_or_none(fmt.get('itag'))
+ audio_track = fmt.get('audioTrack') or {}
+ stream_id = '%s.%s' % (itag or '', audio_track.get('id', ''))
+ if stream_id in stream_ids:
+ continue
+
quality = fmt.get('quality')
+ if quality == 'tiny' or not quality:
+ quality = fmt.get('audioQuality', '').lower() or quality
if itag and quality:
itag_qualities[itag] = quality
# FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
if itag:
itags.append(itag)
+ stream_ids.append(stream_id)
+
tbr = float_or_none(
fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
dct = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_id': itag,
- 'format_note': fmt.get('qualityLabel') or quality,
+ 'format_note': audio_track.get('displayName') or fmt.get('qualityLabel') or quality,
'fps': int_or_none(fmt.get('fps')),
'height': int_or_none(fmt.get('height')),
'quality': q(quality),
'tbr': tbr,
'url': fmt_url,
'width': fmt.get('width'),
+ 'language': audio_track.get('id', '').split('.')[0],
}
mimetype = fmt.get('mimeType')
if mimetype:
dct['container'] = dct['ext'] + '_dash'
formats.append(dct)
- hls_manifest_url = streaming_data.get('hlsManifestUrl')
- if hls_manifest_url:
- for f in self._extract_m3u8_formats(
- hls_manifest_url, video_id, 'mp4', fatal=False):
- itag = self._search_regex(
- r'/itag/(\d+)', f['url'], 'itag', default=None)
- if itag:
- f['format_id'] = itag
- formats.append(f)
-
- if self._downloader.params.get('youtube_include_dash_manifest'):
- dash_manifest_url = streaming_data.get('dashManifestUrl')
- if dash_manifest_url:
- for f in self._extract_mpd_formats(
- dash_manifest_url, video_id, fatal=False):
- itag = f['format_id']
- if itag in itags:
- continue
- if itag in itag_qualities:
- # Not actually usefull since the sorting is already done with "quality,res,fps,codec"
- # but kept to maintain feature parity (and code similarity) with youtube-dl
- # Remove if this causes any issues with sorting in future
- f['quality'] = q(itag_qualities[itag])
- filesize = int_or_none(self._search_regex(
- r'/clen/(\d+)', f.get('fragment_base_url')
- or f['url'], 'file size', default=None))
- if filesize:
- f['filesize'] = filesize
+ for sd in (streaming_data, ytm_streaming_data):
+ hls_manifest_url = sd.get('hlsManifestUrl')
+ if hls_manifest_url:
+ for f in self._extract_m3u8_formats(
+ hls_manifest_url, video_id, 'mp4', fatal=False):
+ itag = self._search_regex(
+ r'/itag/(\d+)', f['url'], 'itag', default=None)
+ if itag:
+ f['format_id'] = itag
formats.append(f)
+ if self.get_param('youtube_include_dash_manifest', True):
+ for sd in (streaming_data, ytm_streaming_data):
+ dash_manifest_url = sd.get('dashManifestUrl')
+ if dash_manifest_url:
+ for f in self._extract_mpd_formats(
+ dash_manifest_url, video_id, fatal=False):
+ itag = f['format_id']
+ if itag in itags:
+ continue
+ if itag in itag_qualities:
+ f['quality'] = q(itag_qualities[itag])
+ filesize = int_or_none(self._search_regex(
+ r'/clen/(\d+)', f.get('fragment_base_url')
+ or f['url'], 'file size', default=None))
+ if filesize:
+ f['filesize'] = filesize
+ formats.append(f)
+
if not formats:
- if not self._downloader.params.get('allow_unplayable_formats') and streaming_data.get('licenseInfos'):
- raise ExtractorError(
+ if not self.get_param('allow_unplayable_formats') and streaming_data.get('licenseInfos'):
+ self.raise_no_formats(
'This video is DRM protected.', expected=True)
pemr = try_get(
playability_status,
if not countries:
regions_allowed = search_meta('regionsAllowed')
countries = regions_allowed.split(',') if regions_allowed else None
- self.raise_geo_restricted(
- subreason, countries)
+ self.raise_geo_restricted(subreason, countries, metadata_available=True)
reason += '\n' + subreason
if reason:
- raise ExtractorError(reason, expected=True)
+ self.raise_no_formats(reason, expected=True)
self._sort_formats(formats)
for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
for keyword in keywords:
if keyword.startswith('yt:stretch='):
- w, h = keyword.split('=')[1].split(':')
- w, h = int(w), int(h)
- if w > 0 and h > 0:
- ratio = w / h
- for f in formats:
- if f.get('vcodec') != 'none':
- f['stretched_ratio'] = ratio
+ mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
+ if mobj:
+ # NB: float is intentional for forcing float division
+ w, h = (float(v) for v in mobj.groups())
+ if w > 0 and h > 0:
+ ratio = w / h
+ for f in formats:
+ if f.get('vcodec') != 'none':
+ f['stretched_ratio'] = ratio
+ break
thumbnails = []
for container in (video_details, microformat):
thumbnail_url = thumbnail.get('url')
if not thumbnail_url:
continue
+ # Sometimes youtube gives a wrong thumbnail URL. See:
+ # https://github.com/yt-dlp/yt-dlp/issues/233
+ # https://github.com/ytdl-org/youtube-dl/issues/28023
+ if 'maxresdefault' in thumbnail_url:
+ thumbnail_url = thumbnail_url.split('?')[0]
thumbnails.append({
- 'height': int_or_none(thumbnail.get('height')),
'url': thumbnail_url,
+ 'height': int_or_none(thumbnail.get('height')),
'width': int_or_none(thumbnail.get('width')),
+ 'preference': 1 if 'maxresdefault' in thumbnail_url else -1
})
- if thumbnails:
- break
- else:
- thumbnail = search_meta(['og:image', 'twitter:image'])
- if thumbnail:
- thumbnails = [{'url': thumbnail}]
+ thumbnail_url = search_meta(['og:image', 'twitter:image'])
+ if thumbnail_url:
+ thumbnails.append({
+ 'url': thumbnail_url,
+ 'preference': 1 if 'maxresdefault' in thumbnail_url else -1
+ })
+ # All videos have a maxresdefault thumbnail, but sometimes it does not appear in the webpage
+ # See: https://github.com/ytdl-org/youtube-dl/issues/29049
+ thumbnails.append({
+ 'url': 'https://i.ytimg.com/vi/%s/maxresdefault.jpg' % video_id,
+ 'preference': 1,
+ })
+ self._remove_duplicate_formats(thumbnails)
category = microformat.get('category') or search_meta('genre')
channel_id = video_details.get('channelId') \
'tags': keywords,
'is_live': is_live,
'playable_in_embed': playability_status.get('playableInEmbed'),
- 'was_live': video_details.get('isLiveContent')
+ 'was_live': video_details.get('isLiveContent'),
}
pctr = try_get(
lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict)
subtitles = {}
if pctr:
- def process_language(container, base_url, lang_code, query):
- lang_subs = []
+ def process_language(container, base_url, lang_code, sub_name, query):
+ lang_subs = container.setdefault(lang_code, [])
for fmt in self._SUBTITLE_FORMATS:
query.update({
'fmt': fmt,
lang_subs.append({
'ext': fmt,
'url': update_url_query(base_url, query),
+ 'name': sub_name,
})
- container[lang_code] = lang_subs
for caption_track in (pctr.get('captionTracks') or []):
base_url = caption_track.get('baseUrl')
if not base_url:
continue
if caption_track.get('kind') != 'asr':
- lang_code = caption_track.get('languageCode')
+ lang_code = (
+ remove_start(caption_track.get('vssId') or '', '.').replace('.', '-')
+ or caption_track.get('languageCode'))
if not lang_code:
continue
process_language(
- subtitles, base_url, lang_code, {})
+ subtitles, base_url, lang_code,
+ try_get(caption_track, lambda x: x.get('name').get('simpleText')),
+ {})
continue
automatic_captions = {}
for translation_language in (pctr.get('translationLanguages') or []):
continue
process_language(
automatic_captions, base_url, translation_language_code,
+ try_get(translation_language, lambda x: x['languageName']['simpleText']),
{'tlang': translation_language_code})
info['automatic_captions'] = automatic_captions
info['subtitles'] = subtitles
'yt initial data')
if not initial_data:
initial_data = self._call_api(
- 'next', {'videoId': video_id}, video_id, fatal=False)
+ 'next', {'videoId': video_id}, video_id, fatal=False, api_key=self._extract_api_key(ytcfg))
if not is_live:
try:
# This will error if there is no livechat
initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
info['subtitles']['live_chat'] = [{
+ 'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies
'video_id': video_id,
'ext': 'json',
'protocol': 'youtube_live_chat_replay',
info['channel'] = get_text(try_get(
vsir,
lambda x: x['owner']['videoOwnerRenderer']['title'],
- compat_str))
+ dict))
rows = try_get(
vsir,
lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
if v:
info[d_k] = v
+ is_private = bool_or_none(video_details.get('isPrivate'))
+ is_unlisted = bool_or_none(microformat.get('isUnlisted'))
+ is_membersonly = None
+ is_premium = None
+ if initial_data and is_private is not None:
+ is_membersonly = False
+ is_premium = False
+ contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list)
+ for content in contents or []:
+ badges = try_get(content, lambda x: x['videoPrimaryInfoRenderer']['badges'], list)
+ for badge in badges or []:
+ label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label']) or ''
+ if label.lower() == 'members only':
+ is_membersonly = True
+ break
+ elif label.lower() == 'premium':
+ is_premium = True
+ break
+ if is_membersonly or is_premium:
+ break
+
+ # TODO: Add this for playlists
+ info['availability'] = self._availability(
+ is_private=is_private,
+ needs_premium=is_premium,
+ needs_subscription=is_membersonly,
+ needs_auth=info['age_limit'] >= 18,
+ is_unlisted=None if is_private is None else is_unlisted)
+
# get xsrf for annotations or comments
- get_annotations = self._downloader.params.get('writeannotations', False)
- get_comments = self._downloader.params.get('getcomments', False)
+ get_annotations = self.get_param('writeannotations', False)
+ get_comments = self.get_param('getcomments', False)
if get_annotations or get_comments:
xsrf_token = None
ytcfg = self._extract_ytcfg(video_id, webpage)
errnote='Unable to download video annotations', fatal=False,
data=urlencode_postdata({xsrf_field_name: xsrf_token}))
- # Get comments
- # TODO: Refactor and move to seperate function
- def extract_comments():
- expected_video_comment_count = 0
- video_comments = []
- comment_xsrf = xsrf_token
-
- def find_value(html, key, num_chars=2, separator='"'):
- pos_begin = html.find(key) + len(key) + num_chars
- pos_end = html.find(separator, pos_begin)
- return html[pos_begin: pos_end]
-
- def search_dict(partial, key):
- if isinstance(partial, dict):
- for k, v in partial.items():
- if k == key:
- yield v
- else:
- for o in search_dict(v, key):
- yield o
- elif isinstance(partial, list):
- for i in partial:
- for o in search_dict(i, key):
- yield o
-
- continuations = []
- if initial_data:
- try:
- ncd = next(search_dict(initial_data, 'nextContinuationData'))
- continuations = [ncd['continuation']]
- # Handle videos where comments have been disabled entirely
- except StopIteration:
- pass
-
- def get_continuation(continuation, session_token, replies=False):
- query = {
- 'pbj': 1,
- 'ctoken': continuation,
- }
- if replies:
- query['action_get_comment_replies'] = 1
- else:
- query['action_get_comments'] = 1
-
- while True:
- content, handle = self._download_webpage_handle(
- 'https://www.youtube.com/comment_service_ajax',
- video_id,
- note=False,
- expected_status=[413],
- data=urlencode_postdata({
- 'session_token': session_token
- }),
- query=query,
- headers={
- 'Accept': '*/*',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0',
- 'X-YouTube-Client-Name': '1',
- 'X-YouTube-Client-Version': '2.20201202.06.01'
- }
- )
-
- response_code = handle.getcode()
- if (response_code == 200):
- return self._parse_json(content, video_id)
- if (response_code == 413):
- return None
- raise ExtractorError('Unexpected HTTP error code: %s' % response_code)
-
- first_continuation = True
- chain_msg = ''
- self.to_screen('Downloading comments')
- while continuations:
- continuation = continuations.pop()
- comment_response = get_continuation(continuation, comment_xsrf)
- if not comment_response:
- continue
- if list(search_dict(comment_response, 'externalErrorMessage')):
- raise ExtractorError('Error returned from server: ' + next(search_dict(comment_response, 'externalErrorMessage')))
-
- if 'continuationContents' not in comment_response['response']:
- # Something is wrong here. Youtube won't accept this continuation token for some reason and responds with a user satisfaction dialog (error?)
- continue
- # not sure if this actually helps
- if 'xsrf_token' in comment_response:
- comment_xsrf = comment_response['xsrf_token']
-
- item_section = comment_response['response']['continuationContents']['itemSectionContinuation']
- if first_continuation:
- expected_video_comment_count = int(item_section['header']['commentsHeaderRenderer']['countText']['runs'][0]['text'].replace(' Comments', '').replace('1 Comment', '1').replace(',', ''))
- first_continuation = False
- if 'contents' not in item_section:
- # continuation returned no comments?
- # set an empty array as to not break the for loop
- item_section['contents'] = []
-
- for meta_comment in item_section['contents']:
- comment = meta_comment['commentThreadRenderer']['comment']['commentRenderer']
- video_comments.append({
- 'id': comment['commentId'],
- 'text': ''.join([c['text'] for c in try_get(comment, lambda x: x['contentText']['runs'], list) or []]),
- 'time_text': ''.join([c['text'] for c in comment['publishedTimeText']['runs']]),
- 'author': comment.get('authorText', {}).get('simpleText', ''),
- 'votes': comment.get('voteCount', {}).get('simpleText', '0'),
- 'author_thumbnail': comment['authorThumbnail']['thumbnails'][-1]['url'],
- 'parent': 'root'
- })
- if 'replies' not in meta_comment['commentThreadRenderer']:
- continue
-
- reply_continuations = [rcn['nextContinuationData']['continuation'] for rcn in meta_comment['commentThreadRenderer']['replies']['commentRepliesRenderer']['continuations']]
- while reply_continuations:
- time.sleep(1)
- continuation = reply_continuations.pop()
- replies_data = get_continuation(continuation, comment_xsrf, True)
- if not replies_data or 'continuationContents' not in replies_data[1]['response']:
- continue
-
- if self._downloader.params.get('verbose', False):
- chain_msg = ' (chain %s)' % comment['commentId']
- self.to_screen('Comments downloaded: %d of ~%d%s' % (len(video_comments), expected_video_comment_count, chain_msg))
- reply_comment_meta = replies_data[1]['response']['continuationContents']['commentRepliesContinuation']
- for reply_meta in reply_comment_meta.get('contents', {}):
- reply_comment = reply_meta['commentRenderer']
- video_comments.append({
- 'id': reply_comment['commentId'],
- 'text': ''.join([c['text'] for c in reply_comment['contentText']['runs']]),
- 'time_text': ''.join([c['text'] for c in reply_comment['publishedTimeText']['runs']]),
- 'author': reply_comment.get('authorText', {}).get('simpleText', ''),
- 'votes': reply_comment.get('voteCount', {}).get('simpleText', '0'),
- 'author_thumbnail': reply_comment['authorThumbnail']['thumbnails'][-1]['url'],
- 'parent': comment['commentId']
- })
- if 'continuations' not in reply_comment_meta or len(reply_comment_meta['continuations']) == 0:
- continue
- reply_continuations += [rcn['nextContinuationData']['continuation'] for rcn in reply_comment_meta['continuations']]
-
- self.to_screen('Comments downloaded: %d of ~%d' % (len(video_comments), expected_video_comment_count))
- if 'continuations' in item_section:
- continuations += [ncd['nextContinuationData']['continuation'] for ncd in item_section['continuations']]
- time.sleep(1)
-
- self.to_screen('Total comments downloaded: %d of ~%d' % (len(video_comments), expected_video_comment_count))
- return {
- 'comments': video_comments,
- 'comment_count': expected_video_comment_count
- }
-
if get_comments:
- info['__post_extractor'] = extract_comments
+ info['__post_extractor'] = lambda: self._extract_comments(ytcfg, video_id, contents, webpage, xsrf_token)
self.mark_watched(video_id, player_response)
invidio\.us
)/
(?:
- (?:channel|c|user)/|
+ (?P<channel_type>channel|c|user|browse)/|
(?P<not_channel>
feed/|hashtag/|
(?:playlist|watch)\?.*?\blist=
IE_NAME = 'youtube:tab'
_TESTS = [{
- # playlists, multipage
+ 'note': 'playlists, multipage',
'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
'playlist_mincount': 94,
'info_dict': {
'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
},
}, {
- # playlists, multipage, different order
+ 'note': 'playlists, multipage, different order',
'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
'playlist_mincount': 94,
'info_dict': {
'uploader': 'Игорь Клейнер',
},
}, {
- # playlists, singlepage
+ 'note': 'playlists, series',
+ 'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
+ 'playlist_mincount': 5,
+ 'info_dict': {
+ 'id': 'UCYO_jab_esuFRV4b17AJtAw',
+ 'title': '3Blue1Brown - Playlists',
+ 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
+ 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
+ 'uploader': '3Blue1Brown',
+ },
+ }, {
+ 'note': 'playlists, singlepage',
'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
'playlist_mincount': 4,
'info_dict': {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
}, {
- # basic, single video playlist
+ 'note': 'basic, single video playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 1,
}, {
- # empty playlist
+ 'note': 'empty playlist',
'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
'info_dict': {
'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
},
'playlist_count': 0,
}, {
- # Home tab
+ 'note': 'Home tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
},
'playlist_mincount': 2,
}, {
- # Videos tab
+ 'note': 'Videos tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
},
'playlist_mincount': 975,
}, {
- # Videos tab, sorted by popular
+ 'note': 'Videos tab, sorted by popular',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
},
'playlist_mincount': 199,
}, {
- # Playlists tab
+ 'note': 'Playlists tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
},
'playlist_mincount': 17,
}, {
- # Community tab
+ 'note': 'Community tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
},
'playlist_mincount': 18,
}, {
- # Channels tab
+ 'note': 'Channels tab',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
},
'playlist_mincount': 12,
+ }, {
+ 'note': 'Search tab',
+ 'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
+ 'playlist_mincount': 40,
+ 'info_dict': {
+ 'id': 'UCYO_jab_esuFRV4b17AJtAw',
+ 'title': '3Blue1Brown - Search - linear algebra',
+ 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
+ 'uploader': '3Blue1Brown',
+ 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
+ },
}, {
'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True,
},
'playlist_mincount': 1123,
}, {
- # even larger playlist, 8832 videos
+ 'note': 'even larger playlist, 8832 videos',
'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
'only_matching': True,
}, {
},
'playlist_mincount': 21,
}, {
- # https://github.com/ytdl-org/youtube-dl/issues/21844
+ 'note': 'Playlist with "show unavailable videos" button',
+ 'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q',
+ 'info_dict': {
+ 'title': 'Uploads from Phim Siêu Nhân Nhật Bản',
+ 'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q',
+ 'uploader': 'Phim Siêu Nhân Nhật Bản',
+ 'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
+ },
+ 'playlist_mincount': 200,
+ }, {
+ 'note': 'Playlist with unavailable videos in page 7',
+ 'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w',
+ 'info_dict': {
+ 'title': 'Uploads from BlankTV',
+ 'id': 'UU8l9frL61Yl5KFOl87nIm2w',
+ 'uploader': 'BlankTV',
+ 'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w',
+ },
+ 'playlist_mincount': 1000,
+ }, {
+ 'note': 'https://github.com/ytdl-org/youtube-dl/issues/21844',
'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
'info_dict': {
'title': 'Data Analysis with Dr Mike Pound',
'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
'only_matching': True,
}, {
- # Playlist URL that does not actually serve a playlist
+ 'note': 'Playlist URL that does not actually serve a playlist',
'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
'info_dict': {
'id': 'FqZTN594JQw',
}, {
'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
'info_dict': {
- 'id': '9Auq9mYxFEE',
+ 'id': 'X1whbWASnNQ', # This will keep changing
'ext': 'mp4',
'title': compat_str,
'uploader': 'Sky News',
'uploader_id': 'skynews',
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
- 'upload_date': '20191102',
- 'description': 'md5:85ddd75d888674631aaf9599a9a0b0ae',
+ 'upload_date': r're:\d{8}',
+ 'description': compat_str,
'categories': ['News & Politics'],
'tags': list,
'like_count': int,
'params': {
'skip_download': True,
},
+ 'expected_warnings': ['Downloading just video ', 'Ignoring subtitle tracks found in '],
}, {
'url': 'https://www.youtube.com/user/TheYoungTurks/live',
'info_dict': {
}, {
'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
'only_matching': True,
+ }, {
+ 'note': 'A channel that is not live. Should raise error',
+ 'url': 'https://www.youtube.com/user/numberphile/live',
+ 'only_matching': True,
}, {
'url': 'https://www.youtube.com/feed/trending',
'only_matching': True,
}, {
- # needs auth
'url': 'https://www.youtube.com/feed/library',
'only_matching': True,
}, {
- # needs auth
'url': 'https://www.youtube.com/feed/history',
'only_matching': True,
}, {
- # needs auth
'url': 'https://www.youtube.com/feed/subscriptions',
'only_matching': True,
}, {
- # needs auth
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}, {
- # no longer available?
+ 'note': 'Recommended - redirects to home page',
'url': 'https://www.youtube.com/feed/recommended',
'only_matching': True,
}, {
- # inline playlist with not always working continuations
+ 'note': 'inline playlist with not always working continuations',
'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
'only_matching': True,
}, {
}, {
'url': 'https://www.youtube.com/TheYoungTurks/live',
'only_matching': True,
+ }, {
+ 'url': 'https://www.youtube.com/hashtag/cctv9',
+ 'info_dict': {
+ 'id': 'cctv9',
+ 'title': '#cctv9',
+ },
+ 'playlist_mincount': 350,
+ }, {
+ 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
+ 'only_matching': True,
+ }, {
+ 'note': 'Requires Premium: should request additional YTM-info webpage (and have format 141) for videos in playlist',
+ 'url': 'https://music.youtube.com/playlist?list=PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
+ 'only_matching': True
+ }, {
+ 'note': '/browse/ should redirect to /channel/',
+ 'url': 'https://music.youtube.com/browse/UC1a8OFewdjuLq6KlF8M_8Ng',
+ 'only_matching': True
+ }, {
+ 'note': 'VLPL, should redirect to playlist?list=PL...',
+ 'url': 'https://music.youtube.com/browse/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
+ 'info_dict': {
+ 'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
+ 'uploader': 'NoCopyrightSounds',
+ 'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
+ 'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
+ 'title': 'NCS Releases',
+ },
+ 'playlist_mincount': 166,
+ }, {
+ 'note': 'Topic, should redirect to playlist?list=UU...',
+ 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
+ 'info_dict': {
+ 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
+ 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
+ 'title': 'Uploads from Royalty Free Music - Topic',
+ 'uploader': 'Royalty Free Music - Topic',
+ },
+ 'expected_warnings': [
+ 'A channel/user page was given',
+ 'The URL does not have a videos tab',
+ ],
+ 'playlist_mincount': 101,
+ }, {
+ 'note': 'Topic without a UU playlist',
+ 'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg',
+ 'info_dict': {
+ 'id': 'UCtFRv9O2AHqOZjjynzrv-xg',
+ 'title': 'UCtFRv9O2AHqOZjjynzrv-xg',
+ },
+ 'expected_warnings': [
+ 'A channel/user page was given',
+ 'The URL does not have a videos tab',
+ 'Falling back to channel URL',
+ ],
+ 'playlist_mincount': 9,
+ }, {
+ 'note': 'Youtube music Album',
+ 'url': 'https://music.youtube.com/browse/MPREb_gTAcphH99wE',
+ 'info_dict': {
+ 'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0',
+ 'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
+ },
+ 'playlist_count': 50,
}]
@classmethod
channel_url, 'channel id')
@staticmethod
- def _extract_grid_item_renderer(item):
- for item_kind in ('Playlist', 'Video', 'Channel'):
- renderer = item.get('grid%sRenderer' % item_kind)
- if renderer:
+ def _extract_basic_item_renderer(item):
+ # Modified from _extract_grid_item_renderer
+ known_basic_renderers = (
+ 'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer'
+ )
+ for key, renderer in item.items():
+ if not isinstance(renderer, dict):
+ continue
+ elif key in known_basic_renderers:
+ return renderer
+ elif key.startswith('grid') and key.endswith('Renderer'):
return renderer
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
continue
- renderer = self._extract_grid_item_renderer(item)
+ renderer = self._extract_basic_item_renderer(item)
if not isinstance(renderer, dict):
continue
title = try_get(
- renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
+ renderer, (lambda x: x['title']['runs'][0]['text'],
+ lambda x: x['title']['simpleText']), compat_str)
# playlist
playlist_id = renderer.get('playlistId')
if playlist_id:
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
+ continue
# video
video_id = renderer.get('videoId')
if video_id:
yield self._extract_video(renderer)
+ continue
# channel
channel_id = renderer.get('channelId')
if channel_id:
yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title)
+ continue
+ # generic endpoint URL support
+ ep_url = urljoin('https://www.youtube.com/', try_get(
+ renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
+ compat_str))
+ if ep_url:
+ for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
+ if ie.suitable(ep_url):
+ yield self.url_result(
+ ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
+ break
def _shelf_entries_from_content(self, shelf_renderer):
content = shelf_renderer.get('content')
if not isinstance(content, dict):
return
- renderer = content.get('gridRenderer')
+ renderer = content.get('gridRenderer') or content.get('expandedShelfContentsRenderer')
if renderer:
# TODO: add support for nested playlists so each shelf is processed
# as separate playlist
continue
yield self._extract_video(renderer)
- r""" # Not needed in the new implementation
- def _itemSection_entries(self, item_sect_renderer):
- for content in item_sect_renderer['contents']:
- if not isinstance(content, dict):
- continue
- renderer = content.get('videoRenderer', {})
- if not isinstance(renderer, dict):
- continue
- video_id = renderer.get('videoId')
- if not video_id:
- continue
- yield self._extract_video(renderer)
- """
-
def _rich_entries(self, rich_grid_renderer):
renderer = try_get(
rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
return
# video attachment
video_renderer = try_get(
- post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict)
- video_id = None
- if video_renderer:
- entry = self._video_entry(video_renderer)
+ post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {}
+ video_id = video_renderer.get('videoId')
+ if video_id:
+ entry = self._extract_video(video_renderer)
if entry:
yield entry
+ # playlist attachment
+ playlist_id = try_get(
+ post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str)
+ if playlist_id:
+ yield self.url_result(
+ 'https://www.youtube.com/playlist?list=%s' % playlist_id,
+ ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
# inline video links
runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
for run in runs:
ep_video_id = YoutubeIE._match_id(ep_url)
if video_id == ep_video_id:
continue
- yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=video_id)
+ yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id)
def _post_thread_continuation_entries(self, post_thread_continuation):
contents = post_thread_continuation.get('contents')
for entry in self._post_thread_entries(renderer):
yield entry
+ r''' # unused
+ def _rich_grid_entries(self, contents):
+ for content in contents:
+ video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
+ if video_renderer:
+ entry = self._video_entry(video_renderer)
+ if entry:
+ yield entry
+ '''
+
@staticmethod
def _build_continuation_query(continuation, ctp=None):
query = {
ctp = continuation_ep.get('clickTrackingParams')
return YoutubeTabIE._build_continuation_query(continuation, ctp)
- def _entries(self, tab, identity_token):
+ def _entries(self, tab, item_id, identity_token, account_syncid, ytcfg):
def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds
contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
for entry in extract_entries(parent_renderer):
yield entry
continuation = continuation_list[0]
-
- headers = {
- 'x-youtube-client-name': '1',
- 'x-youtube-client-version': '2.20201112.04.01',
- }
- if identity_token:
- headers['x-youtube-identity-token'] = identity_token
+ context = self._extract_context(ytcfg)
+ visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
for page_num in itertools.count(1):
if not continuation:
break
- retries = self._downloader.params.get('extractor_retries', 3)
- count = -1
- last_error = None
- while count < retries:
- count += 1
- if last_error:
- self.report_warning('%s. Retrying ...' % last_error)
- try:
- browse = self._download_json(
- 'https://www.youtube.com/browse_ajax', None,
- 'Downloading page %d%s'
- % (page_num, ' (retry #%d)' % count if count else ''),
- headers=headers, query=continuation)
- except ExtractorError as e:
- if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404):
- # Downloading page may result in intermittent 5xx HTTP error
- # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
- last_error = 'HTTP Error %s' % e.cause.code
- if count < retries:
- continue
- raise
- else:
- response = try_get(browse, lambda x: x[1]['response'], dict)
+ query = {
+ 'continuation': continuation['continuation'],
+ 'clickTracking': {'clickTrackingParams': continuation['itct']}
+ }
+ headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data)
+ response = self._extract_response(
+ item_id='%s page %s' % (item_id, page_num),
+ query=query, headers=headers, ytcfg=ytcfg,
+ check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints'))
- # Youtube sometimes sends incomplete data
- # See: https://github.com/ytdl-org/youtube-dl/issues/28194
- if response.get('continuationContents') or response.get('onResponseReceivedActions'):
- break
- last_error = 'Incomplete data recieved'
- if not browse or not response:
+ if not response:
break
+ visitor_data = try_get(
+ response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data
known_continuation_renderers = {
'playlistVideoListContinuation': self._playlist_entries,
'gridPlaylistRenderer': (self._grid_entries, 'items'),
'gridVideoRenderer': (self._grid_entries, 'items'),
'playlistVideoRenderer': (self._playlist_entries, 'contents'),
- 'itemSectionRenderer': (self._playlist_entries, 'contents'),
+ 'itemSectionRenderer': (extract_entries, 'contents'), # for feeds
'richItemRenderer': (extract_entries, 'contents'), # for hashtag
+ 'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
}
+ on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
continuation_items = try_get(
- response, lambda x: x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'], list)
+ on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
video_items_renderer = None
for key, value in continuation_item.items():
@staticmethod
def _extract_selected_tab(tabs):
for tab in tabs:
- if try_get(tab, lambda x: x['tabRenderer']['selected'], bool):
- return tab['tabRenderer']
+ renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
+ if renderer.get('selected') is True:
+ return renderer
else:
raise ExtractorError('Unable to find selected tab')
try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
return {k: v for k, v in uploader.items() if v is not None}
- def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token):
+ def _extract_from_tabs(self, item_id, webpage, data, tabs):
playlist_id = title = description = channel_url = channel_name = channel_id = None
thumbnails_list = tags = []
channel_name = renderer.get('title')
channel_url = renderer.get('channelUrl')
channel_id = renderer.get('externalId')
-
- if not renderer:
+ else:
renderer = try_get(
data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
+
if renderer:
title = renderer.get('title')
description = renderer.get('description', '')
'width': int_or_none(t.get('width')),
'height': int_or_none(t.get('height')),
})
-
if playlist_id is None:
playlist_id = item_id
if title is None:
- title = playlist_id
+ title = (
+ try_get(data, lambda x: x['header']['hashtagHeaderRenderer']['hashtag']['simpleText'])
+ or playlist_id)
title += format_field(selected_tab, 'title', ' - %s')
+ title += format_field(selected_tab, 'expandedText', ' - %s')
metadata = {
'playlist_id': playlist_id,
'channel_id': metadata['uploader_id'],
'channel_url': metadata['uploader_url']})
return self.playlist_result(
- self._entries(selected_tab, identity_token),
+ self._entries(
+ selected_tab, playlist_id,
+ self._extract_identity_token(webpage, item_id),
+ self._extract_account_syncid(data),
+ self._extract_ytcfg(item_id, webpage)),
**metadata)
- def _extract_from_playlist(self, item_id, url, data, playlist):
+ def _extract_mix_playlist(self, playlist, playlist_id, data, webpage):
+ first_id = last_id = None
+ ytcfg = self._extract_ytcfg(playlist_id, webpage)
+ headers = self._generate_api_headers(
+ ytcfg, account_syncid=self._extract_account_syncid(data),
+ identity_token=self._extract_identity_token(webpage, item_id=playlist_id),
+ visitor_data=try_get(self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str))
+ for page_num in itertools.count(1):
+ videos = list(self._playlist_entries(playlist))
+ if not videos:
+ return
+ start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1
+ if start >= len(videos):
+ return
+ for video in videos[start:]:
+ if video['id'] == first_id:
+ self.to_screen('First video %s found again; Assuming end of Mix' % first_id)
+ return
+ yield video
+ first_id = first_id or videos[0]['id']
+ last_id = videos[-1]['id']
+ watch_endpoint = try_get(
+ playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
+ query = {
+ 'playlistId': playlist_id,
+ 'videoId': watch_endpoint.get('videoId') or last_id,
+ 'index': watch_endpoint.get('index') or len(videos),
+ 'params': watch_endpoint.get('params') or 'OAE%3D'
+ }
+ response = self._extract_response(
+ item_id='%s page %d' % (playlist_id, page_num),
+ query=query,
+ ep='next',
+ headers=headers,
+ check_get_keys='contents'
+ )
+ playlist = try_get(
+ response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
+
+ def _extract_from_playlist(self, item_id, url, data, playlist, webpage):
title = playlist.get('title') or try_get(
data, lambda x: x['titleText']['simpleText'], compat_str)
playlist_id = playlist.get('playlistId') or item_id
- # Inline playlist rendition continuation does not always work
- # at Youtube side, so delegating regular tab-based playlist URL
- # processing whenever possible.
+
+ # Delegating everything except mix playlists to regular tab-based playlist URL
playlist_url = urljoin(url, try_get(
playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
compat_str))
return self.url_result(
playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
+
return self.playlist_result(
- self._playlist_entries(playlist), playlist_id=playlist_id,
- playlist_title=title)
+ self._extract_mix_playlist(playlist, playlist_id, data, webpage),
+ playlist_id=playlist_id, playlist_title=title)
@staticmethod
def _extract_alerts(data):
for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
if not isinstance(alert_dict, dict):
continue
- for renderer in alert_dict:
- alert = alert_dict[renderer]
+ for alert in alert_dict.values():
alert_type = alert.get('type')
if not alert_type:
continue
- message = try_get(alert, lambda x: x['text']['simpleText'], compat_str)
+ message = try_get(alert, lambda x: x['text']['simpleText'], compat_str) or ''
if message:
yield alert_type, message
for run in try_get(alert, lambda x: x['text']['runs'], list) or []:
- message = try_get(run, lambda x: x['text'], compat_str)
- if message:
- yield alert_type, message
+ message += try_get(run, lambda x: x['text'], compat_str)
+ if message:
+ yield alert_type, message
- def _extract_identity_token(self, webpage, item_id):
- ytcfg = self._extract_ytcfg(item_id, webpage)
- if ytcfg:
- token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
- if token:
- return token
- return self._search_regex(
- r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
- 'identity token', default=None)
+ def _report_alerts(self, alerts, expected=True):
+ errors = []
+ warnings = []
+ for alert_type, alert_message in alerts:
+ if alert_type.lower() == 'error':
+ errors.append([alert_type, alert_message])
+ else:
+ warnings.append([alert_type, alert_message])
+
+ for alert_type, alert_message in (warnings + errors[:-1]):
+ self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
+ if errors:
+ raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
+
+ def _extract_and_report_alerts(self, data, *args, **kwargs):
+ return self._report_alerts(self._extract_alerts(data), *args, **kwargs)
+
+ def _reload_with_unavailable_videos(self, item_id, data, webpage):
+ """
+ Get playlist with unavailable videos if the 'show unavailable videos' button exists.
+ """
+ sidebar_renderer = try_get(
+ data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list)
+ if not sidebar_renderer:
+ return
+ browse_id = params = None
+ for item in sidebar_renderer:
+ if not isinstance(item, dict):
+ continue
+ renderer = item.get('playlistSidebarPrimaryInfoRenderer')
+ menu_renderer = try_get(
+ renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
+ for menu_item in menu_renderer:
+ if not isinstance(menu_item, dict):
+ continue
+ nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
+ text = try_get(
+ nav_item_renderer, lambda x: x['text']['simpleText'], compat_str)
+ if not text or text.lower() != 'show unavailable videos':
+ continue
+ browse_endpoint = try_get(
+ nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
+ browse_id = browse_endpoint.get('browseId')
+ params = browse_endpoint.get('params')
+ break
+
+ ytcfg = self._extract_ytcfg(item_id, webpage)
+ headers = self._generate_api_headers(
+ ytcfg, account_syncid=self._extract_account_syncid(ytcfg),
+ identity_token=self._extract_identity_token(webpage, item_id=item_id),
+ visitor_data=try_get(
+ self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str))
+ query = {
+ 'params': params or 'wgYCCAA=',
+ 'browseId': browse_id or 'VL%s' % item_id
+ }
+ return self._extract_response(
+ item_id=item_id, headers=headers, query=query,
+ check_get_keys='contents', fatal=False,
+ note='Downloading API JSON with unavailable videos')
+
+ def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None,
+ ytcfg=None, check_get_keys=None, ep='browse', fatal=True):
+ response = None
+ last_error = None
+ count = -1
+ retries = self.get_param('extractor_retries', 3)
+ if check_get_keys is None:
+ check_get_keys = []
+ while count < retries:
+ count += 1
+ if last_error:
+ self.report_warning('%s. Retrying ...' % last_error)
+ try:
+ response = self._call_api(
+ ep=ep, fatal=True, headers=headers,
+ video_id=item_id, query=query,
+ context=self._extract_context(ytcfg),
+ api_key=self._extract_api_key(ytcfg),
+ note='%s%s' % (note, ' (retry #%d)' % count if count else ''))
+ except ExtractorError as e:
+ if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404):
+ # Downloading page may result in intermittent 5xx HTTP error
+ # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
+ last_error = 'HTTP Error %s' % e.cause.code
+ if count < retries:
+ continue
+ if fatal:
+ raise
+ else:
+ self.report_warning(error_to_compat_str(e))
+ return
+
+ else:
+ # Youtube may send alerts if there was an issue with the continuation page
+ self._extract_and_report_alerts(response, expected=False)
+ if not check_get_keys or dict_get(response, check_get_keys):
+ break
+ # Youtube sometimes sends incomplete data
+ # See: https://github.com/ytdl-org/youtube-dl/issues/28194
+ last_error = 'Incomplete data received'
+ if count >= retries:
+ if fatal:
+ raise ExtractorError(last_error)
+ else:
+ self.report_warning(last_error)
+ return
+ return response
+
+ def _extract_webpage(self, url, item_id):
+ retries = self.get_param('extractor_retries', 3)
+ count = -1
+ last_error = 'Incomplete yt initial data recieved'
+ while count < retries:
+ count += 1
+ # Sometimes youtube returns a webpage with incomplete ytInitialData
+ # See: https://github.com/yt-dlp/yt-dlp/issues/116
+ if count:
+ self.report_warning('%s. Retrying ...' % last_error)
+ webpage = self._download_webpage(
+ url, item_id,
+ 'Downloading webpage%s' % (' (retry #%d)' % count if count else ''))
+ data = self._extract_yt_initial_data(item_id, webpage)
+ if data.get('contents') or data.get('currentVideoEndpoint'):
+ break
+ # Extract alerts here only when there is error
+ self._extract_and_report_alerts(data)
+ if count >= retries:
+ raise ExtractorError(last_error)
+ return webpage, data
+
+ @staticmethod
+ def _smuggle_data(entries, data):
+ for entry in entries:
+ if data:
+ entry['url'] = smuggle_url(entry['url'], data)
+ yield entry
def _real_extract(self, url):
+ url, smuggled_data = unsmuggle_url(url, {})
+ if self.is_music_url(url):
+ smuggled_data['is_music_url'] = True
+ info_dict = self.__real_extract(url, smuggled_data)
+ if info_dict.get('entries'):
+ info_dict['entries'] = self._smuggle_data(info_dict['entries'], smuggled_data)
+ return info_dict
+
+ _url_re = re.compile(r'(?P<pre>%s)(?(channel_type)(?P<tab>/\w+))?(?P<post>.*)$' % _VALID_URL)
+
+ def __real_extract(self, url, smuggled_data):
item_id = self._match_id(url)
url = compat_urlparse.urlunparse(
compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
- is_home = re.match(r'(?P<pre>%s)(?P<post>/?(?![^#?]).*$)' % self._VALID_URL, url)
- if is_home is not None and is_home.group('not_channel') is None and item_id != 'feed':
- self._downloader.report_warning(
+ compat_opts = self.get_param('compat_opts', [])
+
+ def get_mobj(url):
+ mobj = self._url_re.match(url).groupdict()
+ mobj.update((k, '') for k, v in mobj.items() if v is None)
+ return mobj
+
+ mobj = get_mobj(url)
+ # Youtube returns incomplete data if tabname is not lower case
+ pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
+
+ if is_channel:
+ if smuggled_data.get('is_music_url'):
+ if item_id[:2] == 'VL':
+ # Youtube music VL channels have an equivalent playlist
+ item_id = item_id[2:]
+ pre, tab, post, is_channel = 'https://www.youtube.com/playlist?list=%s' % item_id, '', '', False
+ elif item_id[:2] == 'MP':
+ # Youtube music albums (/channel/MP...) have a OLAK playlist that can be extracted from the webpage
+ item_id = self._search_regex(
+ r'\\x22audioPlaylistId\\x22:\\x22([0-9A-Za-z_-]+)\\x22',
+ self._download_webpage('https://music.youtube.com/channel/%s' % item_id, item_id),
+ 'playlist id')
+ pre, tab, post, is_channel = 'https://www.youtube.com/playlist?list=%s' % item_id, '', '', False
+ elif mobj['channel_type'] == 'browse':
+ # Youtube music /browse/ should be changed to /channel/
+ pre = 'https://www.youtube.com/channel/%s' % item_id
+ if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
+ # Home URLs should redirect to /videos/
+ self.report_warning(
'A channel/user page was given. All the channel\'s videos will be downloaded. '
'To download only the videos in the home page, add a "/featured" to the URL')
- url = '%s/videos%s' % (is_home.group('pre'), is_home.group('post') or '')
+ tab = '/videos'
+
+ url = ''.join((pre, tab, post))
+ mobj = get_mobj(url)
# Handle both video/playlist URLs
- qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+ qs = parse_qs(url)
video_id = qs.get('v', [None])[0]
playlist_id = qs.get('list', [None])[0]
- if is_home is not None and is_home.group('not_channel') is not None and is_home.group('not_channel').startswith('watch') and not video_id:
- if playlist_id:
- self._downloader.report_warning('%s is not a valid Youtube URL. Trying to download playlist %s' % (url, playlist_id))
- url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
- # return self.url_result(playlist_id, ie=YoutubePlaylistIE.ie_key())
- else:
+ if not video_id and mobj['not_channel'].startswith('watch'):
+ if not playlist_id:
+ # If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
raise ExtractorError('Unable to recognize tab page')
+ # Common mistake: https://www.youtube.com/watch?list=playlist_id
+ self.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
+ url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
+ mobj = get_mobj(url)
+
if video_id and playlist_id:
- if self._downloader.params.get('noplaylist'):
+ if self.get_param('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
- self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
+ self.to_screen('Downloading playlist %s; add --no-playlist to just download video %s' % (playlist_id, video_id))
- retries = self._downloader.params.get('extractor_retries', 3)
- count = -1
- while count < retries:
- count += 1
- # Sometimes youtube returns a webpage with incomplete ytInitialData
- # See: https://github.com/yt-dlp/yt-dlp/issues/116
- if count:
- self.report_warning('Incomplete yt initial data recieved. Retrying ...')
- webpage = self._download_webpage(url, item_id,
- 'Downloading webpage%s' % ' (retry #%d)' % count if count else '')
- identity_token = self._extract_identity_token(webpage, item_id)
- data = self._extract_yt_initial_data(item_id, webpage)
- err_msg = None
- for alert_type, alert_message in self._extract_alerts(data):
- if alert_type.lower() == 'error':
- if err_msg:
- self._downloader.report_warning('YouTube said: %s - %s' % ('ERROR', err_msg))
- err_msg = alert_message
- else:
- self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
- if err_msg:
- raise ExtractorError('YouTube said: %s' % err_msg, expected=True)
- if data.get('contents') or data.get('currentVideoEndpoint'):
- break
+ webpage, data = self._extract_webpage(url, item_id)
+
+ tabs = try_get(
+ data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
+ if tabs:
+ selected_tab = self._extract_selected_tab(tabs)
+ tab_name = selected_tab.get('title', '')
+ if 'no-youtube-channel-redirect' not in compat_opts:
+ if mobj['tab'] == '/live':
+ # Live tab should have redirected to the video
+ raise ExtractorError('The channel is not currently live', expected=True)
+ if mobj['tab'] == '/videos' and tab_name.lower() != mobj['tab'][1:]:
+ if not mobj['not_channel'] and item_id[:2] == 'UC':
+ # Topic channels don't have /videos. Use the equivalent playlist instead
+ self.report_warning('The URL does not have a %s tab. Trying to redirect to playlist UU%s instead' % (mobj['tab'][1:], item_id[2:]))
+ pl_id = 'UU%s' % item_id[2:]
+ pl_url = 'https://www.youtube.com/playlist?list=%s%s' % (pl_id, mobj['post'])
+ try:
+ pl_webpage, pl_data = self._extract_webpage(pl_url, pl_id)
+ for alert_type, alert_message in self._extract_alerts(pl_data):
+ if alert_type == 'error':
+ raise ExtractorError('Youtube said: %s' % alert_message)
+ item_id, url, webpage, data = pl_id, pl_url, pl_webpage, pl_data
+ except ExtractorError:
+ self.report_warning('The playlist gave error. Falling back to channel URL')
+ else:
+ self.report_warning('The URL does not have a %s tab. %s is being downloaded instead' % (mobj['tab'][1:], tab_name))
+
+ self.write_debug('Final URL: %s' % url)
+
+ # YouTube sometimes provides a button to reload playlist with unavailable videos.
+ if 'no-youtube-unavailable-videos' not in compat_opts:
+ data = self._reload_with_unavailable_videos(item_id, data, webpage) or data
+ self._extract_and_report_alerts(data)
tabs = try_get(
data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
if tabs:
- return self._extract_from_tabs(item_id, webpage, data, tabs, identity_token)
+ return self._extract_from_tabs(item_id, webpage, data, tabs)
+
playlist = try_get(
data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
if playlist:
- return self._extract_from_playlist(item_id, url, data, playlist)
- # Fallback to video extraction if no playlist alike page is recognized.
- # First check for the current video then try the v attribute of URL query.
+ return self._extract_from_playlist(item_id, url, data, playlist, webpage)
+
video_id = try_get(
data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
compat_str) or video_id
if video_id:
+ if mobj['tab'] != '/live': # live tab is expected to redirect to video
+ self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
- # Failed to recognize
+
raise ExtractorError('Unable to recognize tab page')
@classmethod
def suitable(cls, url):
- return False if YoutubeTabIE.suitable(url) else super(
- YoutubePlaylistIE, cls).suitable(url)
+ if YoutubeTabIE.suitable(url):
+ return False
+ # Hack for lazy extractors until more generic solution is implemented
+ # (see #28780)
+ from .youtube import parse_qs
+ qs = parse_qs(url)
+ if qs.get('v', [None])[0]:
+ return False
+ return super(YoutubePlaylistIE, cls).suitable(url)
def _real_extract(self, url):
playlist_id = self._match_id(url)
- qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
- if not qs:
- qs = {'list': playlist_id}
- return self.url_result(
- update_url_query('https://www.youtube.com/playlist', qs),
- ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
+ is_music_url = YoutubeBaseInfoExtractor.is_music_url(url)
+ url = update_url_query(
+ 'https://www.youtube.com/playlist',
+ parse_qs(url) or {'list': playlist_id})
+ if is_music_url:
+ url = smuggle_url(url, {'is_music_url': True})
+ return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
class YoutubeYtBeIE(InfoExtractor):
ie=YoutubeTabIE.ie_key())
-class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
+class YoutubeSearchIE(SearchInfoExtractor, YoutubeTabIE):
IE_DESC = 'YouTube.com searches, "ytsearch" keyword'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_TESTS = []
def _entries(self, query, n):
- data = {
- 'context': {
- 'client': {
- 'clientName': 'WEB',
- 'clientVersion': '2.20201021.03.00',
- }
- },
- 'query': query,
- }
+ data = {'query': query}
if self._SEARCH_PARAMS:
data['params'] = self._SEARCH_PARAMS
total = 0
for page_num in itertools.count(1):
- search = self._download_json(
- 'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
- video_id='query "%s"' % query,
- note='Downloading page %s' % page_num,
- errnote='Unable to download API page', fatal=False,
- data=json.dumps(data).encode('utf8'),
- headers={'content-type': 'application/json'})
+ search = self._extract_response(
+ item_id='query "%s" page %s' % (query, page_num), ep='search', query=data,
+ check_get_keys=('contents', 'onResponseReceivedCommands')
+ )
if not search:
break
slr_contents = try_get(
Subclasses must define the _FEED_NAME property.
"""
_LOGIN_REQUIRED = True
- # _MAX_PAGES = 5
_TESTS = []
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
- def _real_initialize(self):
- self._login()
-
def _real_extract(self, url):
return self.url_result(
'https://www.youtube.com/feed/%s' % self._FEED_NAME,
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
- IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
- _VALID_URL = r':ythistory'
+ IE_DESC = 'Youtube watch history, ":ythis" for short (requires authentication)'
+ _VALID_URL = r':ythis(?:tory)?'
_FEED_NAME = 'history'
_TESTS = [{
'url': ':ythistory',