int_or_none,
intlist_to_bytes,
is_html,
+ join_nonempty,
mimetype2ext,
network_exceptions,
orderedSet,
parse_iso8601,
parse_qs,
qualities,
+ remove_end,
remove_start,
smuggle_url,
str_or_none,
unsmuggle_url,
update_url_query,
url_or_none,
- urlencode_postdata,
urljoin,
variadic,
)
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
+ 'REQUIRE_JS_PLAYER': False
},
'android_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
'clientVersion': '16.20',
},
},
- 'INNERTUBE_CONTEXT_CLIENT_NAME': 55
+ 'INNERTUBE_CONTEXT_CLIENT_NAME': 55,
+ 'REQUIRE_JS_PLAYER': False
},
'android_music': {
'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
}
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 21,
+ 'REQUIRE_JS_PLAYER': False
},
'android_creator': {
'INNERTUBE_CONTEXT': {
'clientVersion': '21.24.100',
},
},
- 'INNERTUBE_CONTEXT_CLIENT_NAME': 14
+ 'INNERTUBE_CONTEXT_CLIENT_NAME': 14,
+ 'REQUIRE_JS_PLAYER': False
},
# ios has HLS live streams
# See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680
'clientVersion': '16.20',
}
},
- 'INNERTUBE_CONTEXT_CLIENT_NAME': 5
+ 'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
+ 'REQUIRE_JS_PLAYER': False
},
'ios_embedded': {
'INNERTUBE_API_KEY': 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8',
'clientVersion': '16.20',
},
},
- 'INNERTUBE_CONTEXT_CLIENT_NAME': 66
+ 'INNERTUBE_CONTEXT_CLIENT_NAME': 66,
+ 'REQUIRE_JS_PLAYER': False
},
'ios_music': {
'INNERTUBE_API_KEY': 'AIzaSyDK3iBpDP9nHVTk2qL73FLJICfOC3c51Og',
'clientVersion': '4.32',
},
},
- 'INNERTUBE_CONTEXT_CLIENT_NAME': 26
+ 'INNERTUBE_CONTEXT_CLIENT_NAME': 26,
+ 'REQUIRE_JS_PLAYER': False
},
'ios_creator': {
'INNERTUBE_CONTEXT': {
'clientVersion': '21.24.100',
},
},
- 'INNERTUBE_CONTEXT_CLIENT_NAME': 15
+ 'INNERTUBE_CONTEXT_CLIENT_NAME': 15,
+ 'REQUIRE_JS_PLAYER': False
},
# mweb has 'ultralow' formats
# See: https://github.com/yt-dlp/yt-dlp/pull/557
for client, ytcfg in tuple(INNERTUBE_CLIENTS.items()):
ytcfg.setdefault('INNERTUBE_API_KEY', 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8')
ytcfg.setdefault('INNERTUBE_HOST', 'www.youtube.com')
+ ytcfg.setdefault('REQUIRE_JS_PLAYER', True)
ytcfg['INNERTUBE_CONTEXT']['client'].setdefault('hl', 'en')
ytcfg['priority'] = 10 * priority(client.split('_', 1)[0])
"""Provide base functions for Youtube extractors"""
_RESERVED_NAMES = (
- r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|'
+ r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|'
r'shorts|movies|results|shared|hashtag|trending|feed|feeds|'
r'browse|oembed|get_video_info|iframe_api|s/player|'
r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
- r''' # Unused since login is broken
- _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
- _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
-
- _LOOKUP_URL = 'https://accounts.google.com/_/signin/sl/lookup'
- _CHALLENGE_URL = 'https://accounts.google.com/_/signin/sl/challenge'
- _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
- '''
+ _INVIDIOUS_SITES = (
+ # invidious-redirect websites
+ r'(?:www\.)?redirect\.invidious\.io',
+ r'(?:(?:www|dev)\.)?invidio\.us',
+ # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
+ r'(?:www\.)?invidious\.pussthecat\.org',
+ r'(?:www\.)?invidious\.zee\.li',
+ r'(?:www\.)?invidious\.ethibox\.fr',
+ r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
+ # youtube-dl invidious instances list
+ r'(?:(?:www|no)\.)?invidiou\.sh',
+ r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
+ r'(?:www\.)?invidious\.kabi\.tk',
+ r'(?:www\.)?invidious\.mastodon\.host',
+ r'(?:www\.)?invidious\.zapashcanon\.fr',
+ r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
+ r'(?:www\.)?invidious\.tinfoil-hat\.net',
+ r'(?:www\.)?invidious\.himiko\.cloud',
+ r'(?:www\.)?invidious\.reallyancient\.tech',
+ r'(?:www\.)?invidious\.tube',
+ r'(?:www\.)?invidiou\.site',
+ r'(?:www\.)?invidious\.site',
+ r'(?:www\.)?invidious\.xyz',
+ r'(?:www\.)?invidious\.nixnet\.xyz',
+ r'(?:www\.)?invidious\.048596\.xyz',
+ r'(?:www\.)?invidious\.drycat\.fr',
+ r'(?:www\.)?inv\.skyn3t\.in',
+ r'(?:www\.)?tube\.poal\.co',
+ r'(?:www\.)?tube\.connect\.cafe',
+ r'(?:www\.)?vid\.wxzm\.sx',
+ r'(?:www\.)?vid\.mint\.lgbt',
+ r'(?:www\.)?vid\.puffyan\.us',
+ r'(?:www\.)?yewtu\.be',
+ r'(?:www\.)?yt\.elukerio\.org',
+ r'(?:www\.)?yt\.lelux\.fi',
+ r'(?:www\.)?invidious\.ggc-project\.de',
+ r'(?:www\.)?yt\.maisputain\.ovh',
+ r'(?:www\.)?ytprivate\.com',
+ r'(?:www\.)?invidious\.13ad\.de',
+ r'(?:www\.)?invidious\.toot\.koeln',
+ r'(?:www\.)?invidious\.fdn\.fr',
+ r'(?:www\.)?watch\.nettohikari\.com',
+ r'(?:www\.)?invidious\.namazso\.eu',
+ r'(?:www\.)?invidious\.silkky\.cloud',
+ r'(?:www\.)?invidious\.exonip\.de',
+ r'(?:www\.)?invidious\.riverside\.rocks',
+ r'(?:www\.)?invidious\.blamefran\.net',
+ r'(?:www\.)?invidious\.moomoo\.de',
+ r'(?:www\.)?ytb\.trom\.tf',
+ r'(?:www\.)?yt\.cyberhost\.uk',
+ r'(?:www\.)?kgg2m7yk5aybusll\.onion',
+ r'(?:www\.)?qklhadlycap4cnod\.onion',
+ r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
+ r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
+ r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
+ r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
+ r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
+ r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
+ r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
+ r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
+ r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
+ r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
+ )
def _login(self):
"""
Attempt to log in to YouTube.
- True is returned if successful or skipped.
- False is returned if login failed.
-
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
- def warn(message):
- self.report_warning(message)
-
- # username+password login is broken
if (self._LOGIN_REQUIRED
and self.get_param('cookiefile') is None
and self.get_param('cookiesfrombrowser') is None):
'Login details are needed to download this content', method='cookies')
username, password = self._get_login_info()
if username:
- warn('Logging in using username and password is broken. %s' % self._LOGIN_HINTS['cookies'])
- return
-
- # Everything below this is broken!
- r'''
- # No authentication to be performed
- if username is None:
- if self._LOGIN_REQUIRED and self.get_param('cookiefile') is None:
- raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
- # if self.get_param('cookiefile'): # TODO remove 'and False' later - too many people using outdated cookies and open issues, remind them.
- # self.to_screen('[Cookies] Reminder - Make sure to always use up to date cookies!')
- return True
-
- login_page = self._download_webpage(
- self._LOGIN_URL, None,
- note='Downloading login page',
- errnote='unable to fetch login page', fatal=False)
- if login_page is False:
- return
-
- login_form = self._hidden_inputs(login_page)
-
- def req(url, f_req, note, errnote):
- data = login_form.copy()
- data.update({
- 'pstMsg': 1,
- 'checkConnection': 'youtube',
- 'checkedDomains': 'youtube',
- 'hl': 'en',
- 'deviceinfo': '[null,null,null,[],null,"US",null,null,[],"GlifWebSignIn",null,[null,null,[]]]',
- 'f.req': json.dumps(f_req),
- 'flowName': 'GlifWebSignIn',
- 'flowEntry': 'ServiceLogin',
- # TODO: reverse actual botguard identifier generation algo
- 'bgRequest': '["identifier",""]',
- })
- return self._download_json(
- url, None, note=note, errnote=errnote,
- transform_source=lambda s: re.sub(r'^[^[]*', '', s),
- fatal=False,
- data=urlencode_postdata(data), headers={
- 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',
- 'Google-Accounts-XSRF': 1,
- })
-
- lookup_req = [
- username,
- None, [], None, 'US', None, None, 2, False, True,
- [
- None, None,
- [2, 1, None, 1,
- 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
- None, [], 4],
- 1, [None, None, []], None, None, None, True
- ],
- username,
- ]
-
- lookup_results = req(
- self._LOOKUP_URL, lookup_req,
- 'Looking up account info', 'Unable to look up account info')
-
- if lookup_results is False:
- return False
-
- user_hash = try_get(lookup_results, lambda x: x[0][2], compat_str)
- if not user_hash:
- warn('Unable to extract user hash')
- return False
-
- challenge_req = [
- user_hash,
- None, 1, None, [1, None, None, None, [password, None, True]],
- [
- None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
- 1, [None, None, []], None, None, None, True
- ]]
-
- challenge_results = req(
- self._CHALLENGE_URL, challenge_req,
- 'Logging in', 'Unable to log in')
-
- if challenge_results is False:
- return
-
- login_res = try_get(challenge_results, lambda x: x[0][5], list)
- if login_res:
- login_msg = try_get(login_res, lambda x: x[5], compat_str)
- warn(
- 'Unable to login: %s' % 'Invalid password'
- if login_msg == 'INCORRECT_ANSWER_ENTERED' else login_msg)
- return False
-
- res = try_get(challenge_results, lambda x: x[0][-1], list)
- if not res:
- warn('Unable to extract result entry')
- return False
-
- login_challenge = try_get(res, lambda x: x[0][0], list)
- if login_challenge:
- challenge_str = try_get(login_challenge, lambda x: x[2], compat_str)
- if challenge_str == 'TWO_STEP_VERIFICATION':
- # SEND_SUCCESS - TFA code has been successfully sent to phone
- # QUOTA_EXCEEDED - reached the limit of TFA codes
- status = try_get(login_challenge, lambda x: x[5], compat_str)
- if status == 'QUOTA_EXCEEDED':
- warn('Exceeded the limit of TFA codes, try later')
- return False
-
- tl = try_get(challenge_results, lambda x: x[1][2], compat_str)
- if not tl:
- warn('Unable to extract TL')
- return False
-
- tfa_code = self._get_tfa_info('2-step verification code')
-
- if not tfa_code:
- warn(
- 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
- '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
- return False
-
- tfa_code = remove_start(tfa_code, 'G-')
-
- tfa_req = [
- user_hash, None, 2, None,
- [
- 9, None, None, None, None, None, None, None,
- [None, tfa_code, True, 2]
- ]]
-
- tfa_results = req(
- self._TFA_URL.format(tl), tfa_req,
- 'Submitting TFA code', 'Unable to submit TFA code')
-
- if tfa_results is False:
- return False
-
- tfa_res = try_get(tfa_results, lambda x: x[0][5], list)
- if tfa_res:
- tfa_msg = try_get(tfa_res, lambda x: x[5], compat_str)
- warn(
- 'Unable to finish TFA: %s' % 'Invalid TFA code'
- if tfa_msg == 'INCORRECT_ANSWER_ENTERED' else tfa_msg)
- return False
-
- check_cookie_url = try_get(
- tfa_results, lambda x: x[0][-1][2], compat_str)
- else:
- CHALLENGES = {
- 'LOGIN_CHALLENGE': "This device isn't recognized. For your security, Google wants to make sure it's really you.",
- 'USERNAME_RECOVERY': 'Please provide additional information to aid in the recovery process.',
- 'REAUTH': "There is something unusual about your activity. For your security, Google wants to make sure it's really you.",
- }
- challenge = CHALLENGES.get(
- challenge_str,
- '%s returned error %s.' % (self.IE_NAME, challenge_str))
- warn('%s\nGo to https://accounts.google.com/, login and solve a challenge.' % challenge)
- return False
- else:
- check_cookie_url = try_get(res, lambda x: x[2], compat_str)
-
- if not check_cookie_url:
- warn('Unable to extract CheckCookie URL')
- return False
-
- check_cookie_results = self._download_webpage(
- check_cookie_url, None, 'Checking cookie', fatal=False)
-
- if check_cookie_results is False:
- return False
-
- if 'https://myaccount.google.com/' not in check_cookie_results:
- warn('Unable to log in')
- return False
-
- return True
- '''
+ self.report_warning(f'Cannot login to YouTube using username and password. {self._LOGIN_HINTS["cookies"]}')
def _initialize_consent(self):
cookies = self._get_cookies('https://www.youtube.com/')
def _real_initialize(self):
self._initialize_consent()
- if self._downloader is None:
- return
- if not self._login():
- return
+ self._login()
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
ytcfg, (lambda x: x['INNERTUBE_CLIENT_NAME'],
lambda x: x['INNERTUBE_CONTEXT']['client']['clientName']), compat_str, default_client)
- @staticmethod
- def _extract_session_index(*data):
- for ytcfg in data:
- session_index = int_or_none(try_get(ytcfg, lambda x: x['SESSION_INDEX']))
- if session_index is not None:
- return session_index
-
def _extract_client_version(self, ytcfg, default_client='web'):
return self._ytcfg_get_safe(
ytcfg, (lambda x: x['INNERTUBE_CLIENT_VERSION'],
data=json.dumps(data).encode('utf8'), headers=real_headers,
query={'key': api_key or self._extract_api_key()})
- def extract_yt_initial_data(self, video_id, webpage):
- return self._parse_json(
- self._search_regex(
- (r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
- self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
- video_id)
+ def extract_yt_initial_data(self, item_id, webpage, fatal=True):
+ data = self._search_regex(
+ (r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
+ self._YT_INITIAL_DATA_RE), webpage, 'yt initial data', fatal=fatal)
+ if data:
+ return self._parse_json(data, item_id, fatal=fatal)
- def _extract_identity_token(self, webpage, item_id):
- if not webpage:
- return None
- ytcfg = self.extract_ytcfg(item_id, webpage)
+ @staticmethod
+ def _extract_session_index(*data):
+ """
+ Index of current account in account list.
+ See: https://github.com/yt-dlp/yt-dlp/pull/519
+ """
+ for ytcfg in data:
+ session_index = int_or_none(try_get(ytcfg, lambda x: x['SESSION_INDEX']))
+ if session_index is not None:
+ return session_index
+
+ # Deprecated?
+ def _extract_identity_token(self, ytcfg=None, webpage=None):
if ytcfg:
token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
if token:
return token
- return self._search_regex(
- r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
- 'identity token', default=None)
+ if webpage:
+ return self._search_regex(
+ r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
+ 'identity token', default=None, fatal=False)
@staticmethod
def _extract_account_syncid(*args):
return delegated_sid
sync_ids = (try_get(
data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
- lambda x: x['DATASYNC_ID']), compat_str) or '').split("||")
+ lambda x: x['DATASYNC_ID']), compat_str) or '').split('||')
if len(sync_ids) >= 2 and sync_ids[1]:
# datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
# and just "user_syncid||" for primary channel. We only want the channel_syncid
return sync_ids[0]
+ @staticmethod
+ def _extract_visitor_data(*args):
+ """
+ Extracts visitorData from an API response or ytcfg
+ Appears to be used to track session state
+ """
+ return traverse_obj(
+ args, (..., ('VISITOR_DATA', ('INNERTUBE_CONTEXT', 'client', 'visitorData'), ('responseContext', 'visitorData'))),
+ expected_type=compat_str, get_all=False)
+
+ @property
+ def is_authenticated(self):
+ return bool(self._generate_sapisidhash_header())
+
def extract_ytcfg(self, video_id, webpage):
if not webpage:
return {}
default='{}'), video_id, fatal=False) or {}
def generate_api_headers(
- self, ytcfg=None, identity_token=None, account_syncid=None,
- visitor_data=None, api_hostname=None, default_client='web', session_index=None):
+ self, *, ytcfg=None, account_syncid=None, session_index=None,
+ visitor_data=None, identity_token=None, api_hostname=None, default_client='web'):
+
origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(default_client))
headers = {
'X-YouTube-Client-Name': compat_str(
self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)),
'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client),
- 'Origin': origin
+ 'Origin': origin,
+ 'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg),
+ 'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg),
+ 'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg)
}
- if not visitor_data and ytcfg:
- visitor_data = try_get(
- self._extract_context(ytcfg, default_client), lambda x: x['client']['visitorData'], compat_str)
- if identity_token:
- headers['X-Youtube-Identity-Token'] = identity_token
- if account_syncid:
- headers['X-Goog-PageId'] = account_syncid
- if session_index is None and ytcfg:
+ if session_index is None:
session_index = self._extract_session_index(ytcfg)
if account_syncid or session_index is not None:
headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0
- if visitor_data:
- headers['X-Goog-Visitor-Id'] = visitor_data
+
auth = self._generate_sapisidhash_header(origin)
if auth is not None:
headers['Authorization'] = auth
headers['X-Origin'] = origin
- return headers
+ return {h: v for h, v in headers.items() if v is not None}
@staticmethod
def _build_api_continuation_query(continuation, ctp=None):
if message:
yield alert_type, message
- def _report_alerts(self, alerts, expected=True, fatal=True):
+ def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False):
errors = []
warnings = []
for alert_type, alert_message in alerts:
warnings.append([alert_type, alert_message])
for alert_type, alert_message in (warnings + errors[:-1]):
- self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
+ self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message), only_once=only_once)
if errors:
raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
while count < retries:
count += 1
if last_error:
- self.report_warning('%s. Retrying ...' % last_error)
+ self.report_warning('%s. Retrying ...' % remove_end(last_error, '.'))
try:
response = self._call_api(
ep=ep, fatal=True, headers=headers,
# We also want to catch all other network exceptions since errors in later pages can be troublesome
# See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210
if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
- last_error = error_to_compat_str(e.cause or e)
+ last_error = error_to_compat_str(e.cause or e.msg)
if count < retries:
continue
if fatal:
return
else:
- # Youtube may send alerts if there was an issue with the continuation page
try:
- self._extract_and_report_alerts(response, expected=False)
+ self._extract_and_report_alerts(response, only_once=True)
except ExtractorError as e:
+ # YouTube servers may return errors we want to retry on in a 200 OK response
+ # See: https://github.com/yt-dlp/yt-dlp/issues/839
+ if 'unknown error' in e.msg.lower():
+ last_error = e.msg
+ continue
if fatal:
raise
self.report_warning(error_to_compat_str(e))
'_type': 'url',
'ie_key': YoutubeIE.ie_key(),
'id': video_id,
- 'url': video_id,
+ 'url': f'https://www.youtube.com/watch?v={video_id}',
'title': title,
'description': description,
'duration': duration,
class YoutubeIE(YoutubeBaseInfoExtractor):
- IE_DESC = 'YouTube.com'
- _INVIDIOUS_SITES = (
- # invidious-redirect websites
- r'(?:www\.)?redirect\.invidious\.io',
- r'(?:(?:www|dev)\.)?invidio\.us',
- # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
- r'(?:www\.)?invidious\.pussthecat\.org',
- r'(?:www\.)?invidious\.zee\.li',
- r'(?:www\.)?invidious\.ethibox\.fr',
- r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
- # youtube-dl invidious instances list
- r'(?:(?:www|no)\.)?invidiou\.sh',
- r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
- r'(?:www\.)?invidious\.kabi\.tk',
- r'(?:www\.)?invidious\.mastodon\.host',
- r'(?:www\.)?invidious\.zapashcanon\.fr',
- r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
- r'(?:www\.)?invidious\.tinfoil-hat\.net',
- r'(?:www\.)?invidious\.himiko\.cloud',
- r'(?:www\.)?invidious\.reallyancient\.tech',
- r'(?:www\.)?invidious\.tube',
- r'(?:www\.)?invidiou\.site',
- r'(?:www\.)?invidious\.site',
- r'(?:www\.)?invidious\.xyz',
- r'(?:www\.)?invidious\.nixnet\.xyz',
- r'(?:www\.)?invidious\.048596\.xyz',
- r'(?:www\.)?invidious\.drycat\.fr',
- r'(?:www\.)?inv\.skyn3t\.in',
- r'(?:www\.)?tube\.poal\.co',
- r'(?:www\.)?tube\.connect\.cafe',
- r'(?:www\.)?vid\.wxzm\.sx',
- r'(?:www\.)?vid\.mint\.lgbt',
- r'(?:www\.)?vid\.puffyan\.us',
- r'(?:www\.)?yewtu\.be',
- r'(?:www\.)?yt\.elukerio\.org',
- r'(?:www\.)?yt\.lelux\.fi',
- r'(?:www\.)?invidious\.ggc-project\.de',
- r'(?:www\.)?yt\.maisputain\.ovh',
- r'(?:www\.)?ytprivate\.com',
- r'(?:www\.)?invidious\.13ad\.de',
- r'(?:www\.)?invidious\.toot\.koeln',
- r'(?:www\.)?invidious\.fdn\.fr',
- r'(?:www\.)?watch\.nettohikari\.com',
- r'(?:www\.)?invidious\.namazso\.eu',
- r'(?:www\.)?invidious\.silkky\.cloud',
- r'(?:www\.)?invidious\.exonip\.de',
- r'(?:www\.)?invidious\.riverside\.rocks',
- r'(?:www\.)?invidious\.blamefran\.net',
- r'(?:www\.)?invidious\.moomoo\.de',
- r'(?:www\.)?ytb\.trom\.tf',
- r'(?:www\.)?yt\.cyberhost\.uk',
- r'(?:www\.)?kgg2m7yk5aybusll\.onion',
- r'(?:www\.)?qklhadlycap4cnod\.onion',
- r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
- r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
- r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
- r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
- r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
- r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
- r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
- r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
- r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
- r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
- )
+ IE_DESC = 'YouTube'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?(1).+)? # if we found the ID, everything can follow
(?:\#|$)""" % {
- 'invidious': '|'.join(_INVIDIOUS_SITES),
+ 'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
self._code_cache = {}
self._player_cache = {}
- def _extract_player_url(self, ytcfg=None, webpage=None):
- player_url = try_get(ytcfg, (lambda x: x['PLAYER_JS_URL']), str)
- if not player_url and webpage:
- player_url = self._search_regex(
- r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"',
- webpage, 'player URL', fatal=False)
+ def _extract_player_url(self, *ytcfgs, webpage=None):
+ player_url = traverse_obj(
+ ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'),
+ get_all=False, expected_type=compat_str)
if not player_url:
- return None
+ return
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
'https://www.youtube.com', player_url)
return player_url
+ def _download_player_url(self, video_id, fatal=False):
+ res = self._download_webpage(
+ 'https://www.youtube.com/iframe_api',
+ note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
+ if res:
+ player_version = self._search_regex(
+ r'player\\?/([0-9a-fA-F]{8})\\?/', res, 'player version', fatal=fatal)
+ if player_version:
+ return f'https://www.youtube.com/s/player/{player_version}/player_ias.vflset/en_US/base.js'
+
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id')
- def _load_player(self, video_id, player_url, fatal=True) -> bool:
+ def _load_player(self, video_id, player_url, fatal=True):
player_id = self._extract_player_info(player_url)
if player_id not in self._code_cache:
- self._code_cache[player_id] = self._download_webpage(
+ code = self._download_webpage(
player_url, video_id, fatal=fatal,
note='Downloading player ' + player_id,
errnote='Download of %s failed' % player_url)
- return player_id in self._code_cache
+ if code:
+ self._code_cache[player_id] = code
+ return self._code_cache.get(player_id)
def _extract_signature_function(self, video_id, player_url, example_sig):
player_id = self._extract_player_info(player_url)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
- if self._load_player(video_id, player_url):
- code = self._code_cache[player_id]
+ code = self._load_player(video_id, player_url)
+ if code:
res = self._parse_sig_js(code)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
return res
def _print_sig_code(self, func, example_sig):
+ if not self.get_param('youtube_print_sig_code'):
+ return
+
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
- if self.get_param('youtube_print_sig_code'):
- self._print_sig_code(func, s)
+ self._print_sig_code(func, s)
return func(s)
except Exception as e:
- tb = traceback.format_exc()
- raise ExtractorError(
- 'Signature extraction failed: ' + tb, cause=e)
+ raise ExtractorError('Signature extraction failed: ' + traceback.format_exc(), cause=e)
+
+ def _decrypt_nsig(self, s, video_id, player_url):
+ """Turn the encrypted n field into a working signature"""
+ if player_url is None:
+ raise ExtractorError('Cannot decrypt nsig without player_url')
+ if player_url.startswith('//'):
+ player_url = 'https:' + player_url
+ elif not re.match(r'https?://', player_url):
+ player_url = compat_urlparse.urljoin(
+ 'https://www.youtube.com', player_url)
+
+ sig_id = ('nsig_value', s)
+ if sig_id in self._player_cache:
+ return self._player_cache[sig_id]
+
+ try:
+ player_id = ('nsig', player_url)
+ if player_id not in self._player_cache:
+ self._player_cache[player_id] = self._extract_n_function(video_id, player_url)
+ func = self._player_cache[player_id]
+ self._player_cache[sig_id] = func(s)
+ self.write_debug(f'Decrypted nsig {s} => {self._player_cache[sig_id]}')
+ return self._player_cache[sig_id]
+ except Exception as e:
+ raise ExtractorError(traceback.format_exc(), cause=e, video_id=video_id)
+
+ def _extract_n_function_name(self, jscode):
+ return self._search_regex(
+ (r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z0-9$]{3})\([a-zA-Z0-9]\)',),
+ jscode, 'Initial JS player n function name', group='nfunc')
+
+ def _extract_n_function(self, video_id, player_url):
+ player_id = self._extract_player_info(player_url)
+ func_code = self._downloader.cache.load('youtube-nsig', player_id)
+
+ if func_code:
+ jsi = JSInterpreter(func_code)
+ else:
+ jscode = self._load_player(video_id, player_url)
+ funcname = self._extract_n_function_name(jscode)
+ jsi = JSInterpreter(jscode)
+ func_code = jsi.extract_function_code(funcname)
+ self._downloader.cache.store('youtube-nsig', player_id, func_code)
+
+ if self.get_param('youtube_print_sig_code'):
+ self.to_screen(f'Extracted nsig function from {player_id}:\n{func_code[1]}\n')
+
+ return lambda s: jsi.extract_function_from_code(*func_code)([s])
def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
"""
raise ExtractorError(error_msg)
self.report_warning(error_msg)
return
- if self._load_player(video_id, player_url, fatal=fatal):
- player_id = self._extract_player_info(player_url)
- code = self._code_cache[player_id]
+ code = self._load_player(video_id, player_url, fatal=fatal)
+ if code:
sts = int_or_none(self._search_regex(
r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code,
'JS player signature timestamp', group='sts', fatal=fatal))
'parent': parent or 'root'
}
- def _comment_entries(self, root_continuation_data, identity_token, account_syncid,
- ytcfg, video_id, parent=None, comment_counts=None):
+ def _comment_entries(self, root_continuation_data, ytcfg, video_id, parent=None, comment_counts=None):
def extract_header(contents):
- _total_comments = 0
_continuation = None
for content in contents:
comments_header_renderer = try_get(content, lambda x: x['commentsHeaderRenderer'])
if expected_comment_count:
comment_counts[1] = expected_comment_count
self.to_screen('Downloading ~%d comments' % expected_comment_count)
- _total_comments = comment_counts[1]
sort_mode_str = self._configuration_arg('comment_sort', [''])[0]
comment_sort_index = int(sort_mode_str != 'top') # 1 = new, 0 = top
sort_text = 'top comments' if comment_sort_index == 0 else 'newest first'
self.to_screen('Sorting comments by %s' % sort_text)
break
- return _total_comments, _continuation
+ return _continuation
def extract_thread(contents):
if not parent:
if comment_replies_renderer:
comment_counts[2] += 1
comment_entries_iter = self._comment_entries(
- comment_replies_renderer, identity_token, account_syncid, ytcfg,
- video_id, parent=comment.get('id'), comment_counts=comment_counts)
+ comment_replies_renderer, ytcfg, video_id,
+ parent=comment.get('id'), comment_counts=comment_counts)
for reply_comment in comment_entries_iter:
yield reply_comment
continuation_token = self._generate_comment_continuation(video_id)
continuation = self._build_api_continuation_query(continuation_token, None)
+ message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1)
+ if message and not parent:
+ self.report_warning(message, video_id=video_id)
+
visitor_data = None
is_first_continuation = parent is None
for page_num in itertools.count(0):
if not continuation:
break
- headers = self.generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data)
+ headers = self.generate_api_headers(ytcfg=ytcfg, visitor_data=visitor_data)
comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1])
if page_num == 0:
if is_first_continuation:
lambda x: x['appendContinuationItemsAction']['continuationItems']),
list) or []
if is_first_continuation:
- total_comments, continuation = extract_header(continuation_items)
- if total_comments:
- yield total_comments
+ continuation = extract_header(continuation_items)
is_first_continuation = False
if continuation:
break
continue
if is_first_continuation:
header_continuation_items = [continuation_renderer.get('header') or {}]
- total_comments, continuation = extract_header(header_continuation_items)
- if total_comments:
- yield total_comments
+ continuation = extract_header(header_continuation_items)
is_first_continuation = False
if continuation:
break
[bytes_to_intlist(base64.b64decode(part)) for part in parts]))
return base64.b64encode(intlist_to_bytes(new_continuation_intlist)).decode('utf-8')
- def _extract_comments(self, ytcfg, video_id, contents, webpage):
+ def _get_comments(self, ytcfg, video_id, contents, webpage):
"""Entry for comment extraction"""
def _real_comment_extract(contents):
- if isinstance(contents, list):
- for entry in contents:
- for key, renderer in entry.items():
- if key not in known_entry_comment_renderers:
- continue
- yield from self._comment_entries(
- renderer, video_id=video_id, ytcfg=ytcfg,
- identity_token=self._extract_identity_token(webpage, item_id=video_id),
- account_syncid=self._extract_account_syncid(ytcfg))
- break
- comments = []
- known_entry_comment_renderers = ('itemSectionRenderer',)
- estimated_total = 0
- max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0]) or float('inf')
+ renderer = next((
+ item for item in traverse_obj(contents, (..., 'itemSectionRenderer'), default={})
+ if item.get('sectionIdentifier') == 'comment-item-section'), None)
+ yield from self._comment_entries(renderer, ytcfg, video_id)
+
+ max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0])
# Force English regardless of account setting to prevent parsing issues
# See: https://github.com/yt-dlp/yt-dlp/issues/532
ytcfg = copy.deepcopy(ytcfg)
traverse_obj(
ytcfg, ('INNERTUBE_CONTEXT', 'client'), expected_type=dict, default={})['hl'] = 'en'
- try:
- for comment in _real_comment_extract(contents):
- if len(comments) >= max_comments:
- break
- if isinstance(comment, int):
- estimated_total = comment
- continue
- comments.append(comment)
- except KeyboardInterrupt:
- self.to_screen('Interrupted by user')
- self.to_screen('Downloaded %d/%d comments' % (len(comments), estimated_total))
- return {
- 'comments': comments,
- 'comment_count': len(comments),
- }
+ return itertools.islice(_real_comment_extract(contents), 0, max_comments)
@staticmethod
- def _generate_player_context(sts=None):
+ def _get_checkok_params():
+ return {'contentCheckOk': True, 'racyCheckOk': True}
+
+ @classmethod
+ def _generate_player_context(cls, sts=None):
context = {
'html5Preference': 'HTML5_PREF_WANTS',
}
'playbackContext': {
'contentPlaybackContext': context
},
- 'contentCheckOk': True,
- 'racyCheckOk': True
+ **cls._get_checkok_params()
}
@staticmethod
def _is_unplayable(player_response):
return traverse_obj(player_response, ('playabilityStatus', 'status')) == 'UNPLAYABLE'
- def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, identity_token, player_url, initial_pr):
+ def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr):
session_index = self._extract_session_index(player_ytcfg, master_ytcfg)
syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr)
- sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False)
+ sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) if player_url else None
headers = self.generate_api_headers(
- player_ytcfg, identity_token, syncid,
- default_client=client, session_index=session_index)
+ ytcfg=player_ytcfg, account_syncid=syncid, session_index=session_index, default_client=client)
yt_query = {'videoId': video_id}
yt_query.update(self._generate_player_context(sts))
webpage = self._download_webpage(url, video_id, fatal=False, note=f'Downloading {client} config')
return self.extract_ytcfg(video_id, webpage) or {}
- def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg, player_url, identity_token):
+ def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg):
initial_pr = None
if webpage:
initial_pr = self._extract_yt_initial_variable(
original_clients = clients
clients = clients[::-1]
+ prs = []
def append_client(client_name):
if client_name in INNERTUBE_CLIENTS and client_name not in original_clients:
# extraction of some data. So we return the initial_pr with formats
# stripped out even if not requested by the user
# See: https://github.com/yt-dlp/yt-dlp/issues/501
- yielded_pr = False
if initial_pr:
pr = dict(initial_pr)
pr['streamingData'] = None
- yielded_pr = True
- yield pr
+ prs.append(pr)
last_error = None
+ tried_iframe_fallback = False
+ player_url = None
while clients:
client = clients.pop()
player_ytcfg = master_ytcfg if client == 'web' else {}
if 'configs' not in self._configuration_arg('player_skip'):
player_ytcfg = self._extract_player_ytcfg(client, video_id) or player_ytcfg
+ player_url = player_url or self._extract_player_url(master_ytcfg, player_ytcfg, webpage=webpage)
+ require_js_player = self._get_default_ytcfg(client).get('REQUIRE_JS_PLAYER')
+ if 'js' in self._configuration_arg('player_skip'):
+ require_js_player = False
+ player_url = None
+
+ if not player_url and not tried_iframe_fallback and require_js_player:
+ player_url = self._download_player_url(video_id)
+ tried_iframe_fallback = True
+
try:
pr = initial_pr if client == 'web' and initial_pr else self._extract_player_response(
- client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, identity_token, player_url, initial_pr)
+ client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr)
except ExtractorError as e:
if last_error:
self.report_warning(last_error)
continue
if pr:
- yielded_pr = True
- yield pr
+ prs.append(pr)
# creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in
- if client.endswith('_agegate') and self._is_unplayable(pr) and self._generate_sapisidhash_header():
+ if client.endswith('_agegate') and self._is_unplayable(pr) and self.is_authenticated:
append_client(client.replace('_agegate', '_creator'))
elif self._is_agegated(pr):
append_client(f'{client}_agegate')
if last_error:
- if not yielded_pr:
+ if not len(prs):
raise last_error
self.report_warning(last_error)
+ return prs, player_url
def _extract_formats(self, streaming_data, video_id, player_url, is_live):
- itags, stream_ids = [], []
+ itags, stream_ids = {}, []
itag_qualities, res_qualities = {}, {}
q = qualities([
# Normally tiny is the smallest video-only formats. But
sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
fmt_url += '&' + sp + '=' + signature
+ query = parse_qs(fmt_url)
+ throttled = False
+ if query.get('ratebypass') != ['yes'] and query.get('n'):
+ try:
+ fmt_url = update_url_query(fmt_url, {
+ 'n': self._decrypt_nsig(query['n'][0], video_id, player_url)})
+ except ExtractorError as e:
+ self.report_warning(
+ f'nsig extraction failed: You may experience throttling for some formats\n'
+ f'n = {query["n"][0]} ; player = {player_url}\n{e}', only_once=True)
+ throttled = True
+
if itag:
- itags.append(itag)
+ itags[itag] = 'https'
stream_ids.append(stream_id)
tbr = float_or_none(
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_id': itag,
- 'format_note': ', '.join(filter(None, (
- audio_track.get('displayName'),
- fmt.get('qualityLabel') or quality.replace('audio_quality_', '')))),
- 'fps': int_or_none(fmt.get('fps')),
+ 'format_note': join_nonempty(
+ '%s%s' % (audio_track.get('displayName') or '',
+ ' (default)' if audio_track.get('audioIsDefault') else ''),
+ fmt.get('qualityLabel') or quality.replace('audio_quality_', ''),
+ throttled and 'THROTTLED', delim=', '),
+ 'source_preference': -10 if throttled else -1,
+ 'fps': int_or_none(fmt.get('fps')) or None,
'height': height,
'quality': q(quality),
'tbr': tbr,
'url': fmt_url,
'width': int_or_none(fmt.get('width')),
'language': audio_track.get('id', '').split('.')[0],
+ 'language_preference': 1 if audio_track.get('audioIsDefault') else -1,
}
mime_mobj = re.match(
r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
and 'dash' not in skip_manifests and self.get_param('youtube_include_dash_manifest', True))
get_hls = 'hls' not in skip_manifests and self.get_param('youtube_include_hls_manifest', True)
- def guess_quality(f):
- for val, qdict in ((f.get('format_id'), itag_qualities), (f.get('height'), res_qualities)):
- if val in qdict:
- return q(qdict[val])
- return -1
+ def process_manifest_format(f, proto, itag):
+ if itag in itags:
+ if itags[itag] == proto or f'{itag}-{proto}' in itags:
+ return False
+ itag = f'{itag}-{proto}'
+ if itag:
+ f['format_id'] = itag
+ itags[itag] = proto
+
+ f['quality'] = next((
+ q(qdict[val])
+ for val, qdict in ((f.get('format_id'), itag_qualities), (f.get('height'), res_qualities))
+ if val in qdict), -1)
+ return True
for sd in streaming_data:
hls_manifest_url = get_hls and sd.get('hlsManifestUrl')
if hls_manifest_url:
for f in self._extract_m3u8_formats(hls_manifest_url, video_id, 'mp4', fatal=False):
- itag = self._search_regex(
- r'/itag/(\d+)', f['url'], 'itag', default=None)
- if itag in itags:
- continue
- if itag:
- f['format_id'] = itag
- itags.append(itag)
- f['quality'] = guess_quality(f)
- yield f
+ if process_manifest_format(f, 'hls', self._search_regex(
+ r'/itag/(\d+)', f['url'], 'itag', default=None)):
+ yield f
dash_manifest_url = get_dash and sd.get('dashManifestUrl')
if dash_manifest_url:
for f in self._extract_mpd_formats(dash_manifest_url, video_id, fatal=False):
- itag = f['format_id']
- if itag in itags:
- continue
- if itag:
- itags.append(itag)
- f['quality'] = guess_quality(f)
- filesize = int_or_none(self._search_regex(
- r'/clen/(\d+)', f.get('fragment_base_url')
- or f['url'], 'file size', default=None))
- if filesize:
- f['filesize'] = filesize
- yield f
+ if process_manifest_format(f, 'dash', f['format_id']):
+ f['filesize'] = int_or_none(self._search_regex(
+ r'/clen/(\d+)', f.get('fragment_base_url') or f['url'], 'file size', default=None))
+ yield f
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
base_url = self.http_scheme() + '//www.youtube.com/'
webpage_url = base_url + 'watch?v=' + video_id
- webpage = self._download_webpage(
- webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
+ webpage = None
+ if 'webpage' not in self._configuration_arg('player_skip'):
+ webpage = self._download_webpage(
+ webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg()
- player_url = self._extract_player_url(master_ytcfg, webpage)
- identity_token = self._extract_identity_token(webpage, video_id)
- player_responses = list(self._extract_player_responses(
+ player_responses, player_url = self._extract_player_responses(
self._get_requested_clients(url, smuggled_data),
- video_id, webpage, master_ytcfg, player_url, identity_token))
+ video_id, webpage, master_ytcfg)
get_first = lambda obj, keys, **kwargs: traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False)
or search_meta(['og:title', 'twitter:title', 'title']))
video_description = get_first(video_details, 'shortDescription')
- if not smuggled_data.get('force_singlefeed', False):
- if not self.get_param('noplaylist'):
- multifeed_metadata_list = get_first(
- player_responses,
- ('multicamera', 'playerLegacyMulticameraRenderer', 'metadataList'),
- expected_type=str)
- if multifeed_metadata_list:
- entries = []
- feed_ids = []
- for feed in multifeed_metadata_list.split(','):
- # Unquote should take place before split on comma (,) since textual
- # fields may contain comma as well (see
- # https://github.com/ytdl-org/youtube-dl/issues/8536)
- feed_data = compat_parse_qs(
- compat_urllib_parse_unquote_plus(feed))
-
- def feed_entry(name):
- return try_get(
- feed_data, lambda x: x[name][0], compat_str)
-
- feed_id = feed_entry('id')
- if not feed_id:
- continue
- feed_title = feed_entry('title')
- title = video_title
- if feed_title:
- title += ' (%s)' % feed_title
- entries.append({
- '_type': 'url_transparent',
- 'ie_key': 'Youtube',
- 'url': smuggle_url(
- '%swatch?v=%s' % (base_url, feed_data['id'][0]),
- {'force_singlefeed': True}),
- 'title': title,
- })
- feed_ids.append(feed_id)
- self.to_screen(
- 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
- % (', '.join(feed_ids), video_id))
- return self.playlist_result(
- entries, video_id, video_title, video_description)
- else:
+ multifeed_metadata_list = get_first(
+ player_responses,
+ ('multicamera', 'playerLegacyMulticameraRenderer', 'metadataList'),
+ expected_type=str)
+ if multifeed_metadata_list and not smuggled_data.get('force_singlefeed'):
+ if self.get_param('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
+ else:
+ entries = []
+ feed_ids = []
+ for feed in multifeed_metadata_list.split(','):
+ # Unquote should take place before split on comma (,) since textual
+ # fields may contain comma as well (see
+ # https://github.com/ytdl-org/youtube-dl/issues/8536)
+ feed_data = compat_parse_qs(
+ compat_urllib_parse_unquote_plus(feed))
+
+ def feed_entry(name):
+ return try_get(
+ feed_data, lambda x: x[name][0], compat_str)
+
+ feed_id = feed_entry('id')
+ if not feed_id:
+ continue
+ feed_title = feed_entry('title')
+ title = video_title
+ if feed_title:
+ title += ' (%s)' % feed_title
+ entries.append({
+ '_type': 'url_transparent',
+ 'ie_key': 'Youtube',
+ 'url': smuggle_url(
+ '%swatch?v=%s' % (base_url, feed_data['id'][0]),
+ {'force_singlefeed': True}),
+ 'title': title,
+ })
+ feed_ids.append(feed_id)
+ self.to_screen(
+ 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
+ % (', '.join(feed_ids), video_id))
+ return self.playlist_result(
+ entries, video_id, video_title, video_description)
live_broadcast_details = traverse_obj(microformats, (..., 'liveBroadcastDetails'))
is_live = get_first(video_details, 'isLive')
if reason:
self.raise_no_formats(reason, expected=True)
- for f in formats:
- if '&c=WEB&' in f['url'] and '&ratebypass=yes&' not in f['url']: # throttled
- f['source_preference'] = -10
- # TODO: this method is not reliable
- f['format_note'] = format_field(f, 'format_note', '%s ') + '(maybe throttled)'
-
# Source is given priority since formats that throttle are given lower source_preference
# When throttling issue is fully fixed, remove this
- self._sort_formats(formats, ('quality', 'height', 'fps', 'source'))
+ self._sort_formats(formats, ('quality', 'res', 'fps', 'hdr:12', 'source', 'codec:vp9.2', 'lang', 'proto'))
keywords = get_first(video_details, 'keywords', expected_type=list) or []
if not keywords and webpage:
thumbnails.append({
'url': thumbnail_url,
})
+ original_thumbnails = thumbnails.copy()
+
# The best resolution thumbnails sometimes does not appear in the webpage
# See: https://github.com/ytdl-org/youtube-dl/issues/29049, https://github.com/yt-dlp/yt-dlp/issues/340
# List of possible thumbnails - Ref: <https://stackoverflow.com/a/20542029>
- hq_thumbnail_names = ['maxresdefault', 'hq720', 'sddefault', 'sd1', 'sd2', 'sd3']
- # TODO: Test them also? - For some videos, even these don't exist
- guaranteed_thumbnail_names = [
+ thumbnail_names = [
+ 'maxresdefault', 'hq720', 'sddefault', 'sd1', 'sd2', 'sd3',
'hqdefault', 'hq1', 'hq2', 'hq3', '0',
'mqdefault', 'mq1', 'mq2', 'mq3',
'default', '1', '2', '3'
]
- thumbnail_names = hq_thumbnail_names + guaranteed_thumbnail_names
n_thumbnail_names = len(thumbnail_names)
-
thumbnails.extend({
'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format(
video_id=video_id, name=name, ext=ext,
webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''),
- '_test_url': name in hq_thumbnail_names,
} for name in thumbnail_names for ext in ('webp', 'jpg'))
for thumb in thumbnails:
i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names)
thumb['preference'] = (0 if '.webp' in thumb['url'] else -1) - (2 * i)
self._remove_duplicate_formats(thumbnails)
+ self._downloader._sort_thumbnails(original_thumbnails)
category = get_first(microformats, 'category') or search_meta('genre')
channel_id = str_or_none(
'title': self._live_title(video_title) if is_live else video_title,
'formats': formats,
'thumbnails': thumbnails,
+ # The best thumbnail that we are sure exists. Prevents unnecessary
+ # URL checking if user don't care about getting the best possible thumbnail
+ 'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')),
'description': video_description,
'upload_date': unified_strdate(
get_first(microformats, 'uploadDate')
}
pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict)
- # Converted into dicts to remove duplicates
- captions = {
- sub.get('baseUrl'): sub
- for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
- translation_languages = {
- lang.get('languageCode'): lang.get('languageName')
- for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
- subtitles = {}
if pctr:
+ def get_lang_code(track):
+ return (remove_start(track.get('vssId') or '', '.').replace('.', '-')
+ or track.get('languageCode'))
+
+ # Converted into dicts to remove duplicates
+ captions = {
+ get_lang_code(sub): sub
+ for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
+ translation_languages = {
+ lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
+ for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
+
def process_language(container, base_url, lang_code, sub_name, query):
lang_subs = container.setdefault(lang_code, [])
for fmt in self._SUBTITLE_FORMATS:
'name': sub_name,
})
- for base_url, caption_track in captions.items():
+ subtitles, automatic_captions = {}, {}
+ for lang_code, caption_track in captions.items():
+ base_url = caption_track.get('baseUrl')
if not base_url:
continue
+ lang_name = self._get_text(caption_track, 'name', max_runs=1)
if caption_track.get('kind') != 'asr':
- lang_code = (
- remove_start(caption_track.get('vssId') or '', '.').replace('.', '-')
- or caption_track.get('languageCode'))
if not lang_code:
continue
process_language(
- subtitles, base_url, lang_code,
- traverse_obj(caption_track, ('name', 'simpleText')),
- {})
- continue
- automatic_captions = {}
+ subtitles, base_url, lang_code, lang_name, {})
+ if not caption_track.get('isTranslatable'):
+ continue
for trans_code, trans_name in translation_languages.items():
if not trans_code:
continue
+ if caption_track.get('kind') != 'asr':
+ trans_code += f'-{lang_code}'
+ trans_name += format_field(lang_name, template=' from %s')
process_language(
- automatic_captions, base_url, trans_code,
- self._get_text(trans_name, max_runs=1),
- {'tlang': trans_code})
- info['automatic_captions'] = automatic_captions
- info['subtitles'] = subtitles
+ automatic_captions, base_url, trans_code, trans_name, {'tlang': trans_code})
+ info['automatic_captions'] = automatic_captions
+ info['subtitles'] = subtitles
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
webpage, self._YT_INITIAL_DATA_RE, video_id,
'yt initial data')
if not initial_data:
- headers = self.generate_api_headers(
- master_ytcfg, identity_token, self._extract_account_syncid(master_ytcfg),
- session_index=self._extract_session_index(master_ytcfg))
-
+ query = {'videoId': video_id}
+ query.update(self._get_checkok_params())
initial_data = self._extract_response(
item_id=video_id, ep='next', fatal=False,
- ytcfg=master_ytcfg, headers=headers, query={'videoId': video_id},
+ ytcfg=master_ytcfg, query=query,
+ headers=self.generate_api_headers(ytcfg=master_ytcfg),
note='Downloading initial data API JSON')
try:
# This will error if there is no livechat
initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
- info['subtitles']['live_chat'] = [{
+ info.setdefault('subtitles', {})['live_chat'] = [{
'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies
'video_id': video_id,
'ext': 'json',
needs_auth=info['age_limit'] >= 18,
is_unlisted=None if is_private is None else is_unlisted)
- # get xsrf for annotations or comments
- get_annotations = self.get_param('writeannotations', False)
- get_comments = self.get_param('getcomments', False)
- if get_annotations or get_comments:
- xsrf_token = None
- if master_ytcfg:
- xsrf_token = try_get(master_ytcfg, lambda x: x['XSRF_TOKEN'], compat_str)
- if not xsrf_token:
- xsrf_token = self._search_regex(
- r'([\'"])XSRF_TOKEN\1\s*:\s*([\'"])(?P<xsrf_token>(?:(?!\2).)+)\2',
- webpage, 'xsrf token', group='xsrf_token', fatal=False)
-
- # annotations
- if get_annotations:
- invideo_url = get_first(
- player_responses,
- ('annotations', 0, 'playerAnnotationsUrlsRenderer', 'invideoUrl'),
- expected_type=str)
- if xsrf_token and invideo_url:
- xsrf_field_name = None
- if master_ytcfg:
- xsrf_field_name = try_get(master_ytcfg, lambda x: x['XSRF_FIELD_NAME'], compat_str)
- if not xsrf_field_name:
- xsrf_field_name = self._search_regex(
- r'([\'"])XSRF_FIELD_NAME\1\s*:\s*([\'"])(?P<xsrf_field_name>\w+)\2',
- webpage, 'xsrf field name',
- group='xsrf_field_name', default='session_token')
- info['annotations'] = self._download_webpage(
- self._proto_relative_url(invideo_url),
- video_id, note='Downloading annotations',
- errnote='Unable to download video annotations', fatal=False,
- data=urlencode_postdata({xsrf_field_name: xsrf_token}))
-
- if get_comments:
- info['__post_extractor'] = lambda: self._extract_comments(master_ytcfg, video_id, contents, webpage)
+ info['__post_extractor'] = self.extract_comments(master_ytcfg, video_id, contents, webpage)
self.mark_watched(video_id, player_responses)
class YoutubeTabIE(YoutubeBaseInfoExtractor):
- IE_DESC = 'YouTube.com tab'
+ IE_DESC = 'YouTube Tabs'
_VALID_URL = r'''(?x)
https?://
(?:\w+\.)?
(?:
youtube(?:kids)?\.com|
- invidio\.us
+ %(invidious)s
)/
(?:
(?P<channel_type>channel|c|user|browse)/|
feed/|hashtag/|
(?:playlist|watch)\?.*?\blist=
)|
- (?!(?:%s)\b) # Direct URLs
+ (?!(?:%(reserved_names)s)\b) # Direct URLs
)
(?P<id>[^/?\#&]+)
- ''' % YoutubeBaseInfoExtractor._RESERVED_NAMES
+ ''' % {
+ 'reserved_names': YoutubeBaseInfoExtractor._RESERVED_NAMES,
+ 'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
+ }
IE_NAME = 'youtube:tab'
_TESTS = [{
'url': 'https://www.youtube.com/feed/watch_later',
'only_matching': True,
}, {
- 'note': 'Recommended - redirects to home page',
+ 'note': 'Recommended - redirects to home page.',
'url': 'https://www.youtube.com/feed/recommended',
'only_matching': True,
}, {
'availability': 'unlisted'
},
'playlist_count': 1,
+ }, {
+ 'note': 'API Fallback: Recommended - redirects to home page. Requires visitorData',
+ 'url': 'https://www.youtube.com/feed/recommended',
+ 'info_dict': {
+ 'id': 'recommended',
+ 'title': 'recommended',
+ },
+ 'playlist_mincount': 50,
+ 'params': {
+ 'skip_download': True,
+ 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
+ },
+ }, {
+ 'note': 'API Fallback: /videos tab, sorted by oldest first',
+ 'url': 'https://www.youtube.com/user/theCodyReeder/videos?view=0&sort=da&flow=grid',
+ 'info_dict': {
+ 'id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
+ 'title': 'Cody\'sLab - Videos',
+ 'description': 'md5:d083b7c2f0c67ee7a6c74c3e9b4243fa',
+ 'uploader': 'Cody\'sLab',
+ 'uploader_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
+ },
+ 'playlist_mincount': 650,
+ 'params': {
+ 'skip_download': True,
+ 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
+ },
+ }, {
+ 'note': 'API Fallback: Topic, should redirect to playlist?list=UU...',
+ 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
+ 'info_dict': {
+ 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
+ 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
+ 'title': 'Uploads from Royalty Free Music - Topic',
+ 'uploader': 'Royalty Free Music - Topic',
+ },
+ 'expected_warnings': [
+ 'A channel/user page was given',
+ 'The URL does not have a videos tab',
+ ],
+ 'playlist_mincount': 101,
+ 'params': {
+ 'skip_download': True,
+ 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
+ },
}]
@classmethod
if entry:
yield entry
'''
- def _entries(self, tab, item_id, identity_token, account_syncid, ytcfg):
+ def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data):
def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds
contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
if not continuation_list[0]:
continuation_list[0] = self._extract_continuation(parent_renderer)
- continuation_list = [None] # Python 2 doesnot support nonlocal
+ continuation_list = [None] # Python 2 does not support nonlocal
tab_content = try_get(tab, lambda x: x['content'], dict)
if not tab_content:
return
for entry in extract_entries(parent_renderer):
yield entry
continuation = continuation_list[0]
- visitor_data = None
for page_num in itertools.count(1):
if not continuation:
break
- headers = self.generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data)
+ headers = self.generate_api_headers(
+ ytcfg=ytcfg, account_syncid=account_syncid, visitor_data=visitor_data)
response = self._extract_response(
item_id='%s page %s' % (item_id, page_num),
query=continuation, headers=headers, ytcfg=ytcfg,
if not response:
break
- visitor_data = try_get(
- response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data
+ # Extracting updated visitor data is required to prevent an infinite extraction loop in some cases
+ # See: https://github.com/ytdl-org/youtube-dl/issues/28702
+ visitor_data = self._extract_visitor_data(response) or visitor_data
known_continuation_renderers = {
'playlistVideoListContinuation': self._playlist_entries,
try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
return {k: v for k, v in uploader.items() if v is not None}
- def _extract_from_tabs(self, item_id, webpage, data, tabs):
+ def _extract_from_tabs(self, item_id, ytcfg, data, tabs):
playlist_id = title = description = channel_url = channel_name = channel_id = None
- thumbnails_list = tags = []
+ thumbnails_list = []
+ tags = []
selected_tab = self._extract_selected_tab(tabs)
renderer = try_get(
'channel': metadata['uploader'],
'channel_id': metadata['uploader_id'],
'channel_url': metadata['uploader_url']})
- ytcfg = self.extract_ytcfg(item_id, webpage)
return self.playlist_result(
self._entries(
- selected_tab, playlist_id,
- self._extract_identity_token(webpage, item_id),
- self._extract_account_syncid(ytcfg, data), ytcfg),
+ selected_tab, playlist_id, ytcfg,
+ self._extract_account_syncid(ytcfg, data),
+ self._extract_visitor_data(data, ytcfg)),
**metadata)
- def _extract_mix_playlist(self, playlist, playlist_id, data, webpage):
- first_id = last_id = None
- ytcfg = self.extract_ytcfg(playlist_id, webpage)
- headers = self.generate_api_headers(
- ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
- identity_token=self._extract_identity_token(webpage, item_id=playlist_id))
+ def _extract_mix_playlist(self, playlist, playlist_id, data, ytcfg):
+ first_id = last_id = response = None
for page_num in itertools.count(1):
videos = list(self._playlist_entries(playlist))
if not videos:
last_id = videos[-1]['id']
watch_endpoint = try_get(
playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
+ headers = self.generate_api_headers(
+ ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
+ visitor_data=self._extract_visitor_data(response, data, ytcfg))
query = {
'playlistId': playlist_id,
'videoId': watch_endpoint.get('videoId') or last_id,
playlist = try_get(
response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
- def _extract_from_playlist(self, item_id, url, data, playlist, webpage):
+ def _extract_from_playlist(self, item_id, url, data, playlist, ytcfg):
title = playlist.get('title') or try_get(
data, lambda x: x['titleText']['simpleText'], compat_str)
playlist_id = playlist.get('playlistId') or item_id
video_title=title)
return self.playlist_result(
- self._extract_mix_playlist(playlist, playlist_id, data, webpage),
+ self._extract_mix_playlist(playlist, playlist_id, data, ytcfg),
playlist_id=playlist_id, playlist_title=title)
def _extract_availability(self, data):
if renderer:
return renderer
- def _reload_with_unavailable_videos(self, item_id, data, webpage):
+ def _reload_with_unavailable_videos(self, item_id, data, ytcfg):
"""
Get playlist with unavailable videos if the 'show unavailable videos' button exists.
"""
params = browse_endpoint.get('params')
break
- ytcfg = self.extract_ytcfg(item_id, webpage)
headers = self.generate_api_headers(
- ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
- identity_token=self._extract_identity_token(webpage, item_id=item_id),
- visitor_data=try_get(
- self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str))
+ ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
+ visitor_data=self._extract_visitor_data(data, ytcfg))
query = {
'params': params or 'wgYCCAA=',
'browseId': browse_id or 'VL%s' % item_id
check_get_keys='contents', fatal=False, ytcfg=ytcfg,
note='Downloading API JSON with unavailable videos')
- def _extract_webpage(self, url, item_id):
+ def _extract_webpage(self, url, item_id, fatal=True):
retries = self.get_param('extractor_retries', 3)
count = -1
- last_error = 'Incomplete yt initial data recieved'
+ webpage = data = last_error = None
while count < retries:
count += 1
# Sometimes youtube returns a webpage with incomplete ytInitialData
# See: https://github.com/yt-dlp/yt-dlp/issues/116
- if count:
+ if last_error:
self.report_warning('%s. Retrying ...' % last_error)
- webpage = self._download_webpage(
- url, item_id,
- 'Downloading webpage%s' % (' (retry #%d)' % count if count else ''))
- data = self.extract_yt_initial_data(item_id, webpage)
- if data.get('contents') or data.get('currentVideoEndpoint'):
+ try:
+ webpage = self._download_webpage(
+ url, item_id,
+ note='Downloading webpage%s' % (' (retry #%d)' % count if count else '',))
+ data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {}
+ except ExtractorError as e:
+ if isinstance(e.cause, network_exceptions):
+ if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
+ last_error = error_to_compat_str(e.cause or e.msg)
+ if count < retries:
+ continue
+ if fatal:
+ raise
+ self.report_warning(error_to_compat_str(e))
break
- # Extract alerts here only when there is error
- self._extract_and_report_alerts(data)
- if count >= retries:
- raise ExtractorError(last_error)
+ else:
+ try:
+ self._extract_and_report_alerts(data)
+ except ExtractorError as e:
+ if fatal:
+ raise
+ self.report_warning(error_to_compat_str(e))
+ break
+
+ if dict_get(data, ('contents', 'currentVideoEndpoint')):
+ break
+
+ last_error = 'Incomplete yt initial data received'
+ if count >= retries:
+ if fatal:
+ raise ExtractorError(last_error)
+ self.report_warning(last_error)
+ break
+
return webpage, data
+ def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=False, default_client='web'):
+ data = None
+ if 'webpage' not in self._configuration_arg('skip'):
+ webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal)
+ ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage)
+ if not data:
+ if not ytcfg and self.is_authenticated:
+ msg = 'Playlists that require authentication may not extract correctly without a successful webpage download.'
+ if 'authcheck' not in self._configuration_arg('skip') and fatal:
+ raise ExtractorError(
+ msg + ' If you are not downloading private content, or your cookies are only for the first account and channel,'
+ ' pass "--extractor-args youtubetab:skip=authcheck" to skip this check',
+ expected=True)
+ self.report_warning(msg, only_once=True)
+ data = self._extract_tab_endpoint(url, item_id, ytcfg, fatal=fatal, default_client=default_client)
+ return data, ytcfg
+
+ def _extract_tab_endpoint(self, url, item_id, ytcfg=None, fatal=True, default_client='web'):
+ headers = self.generate_api_headers(ytcfg=ytcfg, default_client=default_client)
+ resolve_response = self._extract_response(
+ item_id=item_id, query={'url': url}, check_get_keys='endpoint', headers=headers, ytcfg=ytcfg, fatal=fatal,
+ ep='navigation/resolve_url', note='Downloading API parameters API JSON', default_client=default_client)
+ endpoints = {'browseEndpoint': 'browse', 'watchEndpoint': 'next'}
+ for ep_key, ep in endpoints.items():
+ params = try_get(resolve_response, lambda x: x['endpoint'][ep_key], dict)
+ if params:
+ return self._extract_response(
+ item_id=item_id, query=params, ep=ep, headers=headers,
+ ytcfg=ytcfg, fatal=fatal, default_client=default_client,
+ check_get_keys=('contents', 'currentVideoEndpoint'))
+ err_note = 'Failed to resolve url (does the playlist exist?)'
+ if fatal:
+ raise ExtractorError(err_note, expected=True)
+ self.report_warning(err_note, item_id)
+
@staticmethod
def _smuggle_data(entries, data):
for entry in entries:
mobj = get_mobj(url)
# Youtube returns incomplete data if tabname is not lower case
pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
-
if is_channel:
if smuggled_data.get('is_music_url'):
if item_id[:2] == 'VL':
item_id = item_id[2:]
pre, tab, post, is_channel = 'https://www.youtube.com/playlist?list=%s' % item_id, '', '', False
elif item_id[:2] == 'MP':
- # Youtube music albums (/channel/MP...) have a OLAK playlist that can be extracted from the webpage
- item_id = self._search_regex(
- r'\\x22audioPlaylistId\\x22:\\x22([0-9A-Za-z_-]+)\\x22',
- self._download_webpage('https://music.youtube.com/channel/%s' % item_id, item_id),
- 'playlist id')
- pre, tab, post, is_channel = 'https://www.youtube.com/playlist?list=%s' % item_id, '', '', False
+ # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
+ mdata = self._extract_tab_endpoint(
+ 'https://music.youtube.com/channel/%s' % item_id, item_id, default_client='web_music')
+ murl = traverse_obj(
+ mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'), get_all=False, expected_type=compat_str)
+ if not murl:
+ raise ExtractorError('Failed to resolve album to playlist.')
+ return self.url_result(murl, ie=YoutubeTabIE.ie_key())
elif mobj['channel_type'] == 'browse':
# Youtube music /browse/ should be changed to /channel/
pre = 'https://www.youtube.com/channel/%s' % item_id
if video_id and playlist_id:
if self.get_param('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
- return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
+ return self.url_result(f'https://www.youtube.com/watch?v={video_id}', ie=YoutubeIE.ie_key(), video_id=video_id)
self.to_screen('Downloading playlist %s; add --no-playlist to just download video %s' % (playlist_id, video_id))
- webpage, data = self._extract_webpage(url, item_id)
+ data, ytcfg = self._extract_data(url, item_id)
tabs = try_get(
data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
pl_id = 'UU%s' % item_id[2:]
pl_url = 'https://www.youtube.com/playlist?list=%s%s' % (pl_id, mobj['post'])
try:
- pl_webpage, pl_data = self._extract_webpage(pl_url, pl_id)
- for alert_type, alert_message in self._extract_alerts(pl_data):
- if alert_type == 'error':
- raise ExtractorError('Youtube said: %s' % alert_message)
- item_id, url, webpage, data = pl_id, pl_url, pl_webpage, pl_data
+ data, ytcfg, item_id, url = *self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True), pl_id, pl_url
except ExtractorError:
self.report_warning('The playlist gave error. Falling back to channel URL')
else:
# YouTube sometimes provides a button to reload playlist with unavailable videos.
if 'no-youtube-unavailable-videos' not in compat_opts:
- data = self._reload_with_unavailable_videos(item_id, data, webpage) or data
- self._extract_and_report_alerts(data)
+ data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
+ self._extract_and_report_alerts(data, only_once=True)
tabs = try_get(
data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
if tabs:
- return self._extract_from_tabs(item_id, webpage, data, tabs)
+ return self._extract_from_tabs(item_id, ytcfg, data, tabs)
playlist = try_get(
data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
if playlist:
- return self._extract_from_playlist(item_id, url, data, playlist, webpage)
+ return self._extract_from_playlist(item_id, url, data, playlist, ytcfg)
video_id = try_get(
data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
if video_id:
if mobj['tab'] != '/live': # live tab is expected to redirect to video
self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
- return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
+ return self.url_result(f'https://www.youtube.com/watch?v={video_id}', ie=YoutubeIE.ie_key(), video_id=video_id)
raise ExtractorError('Unable to recognize tab page')
class YoutubePlaylistIE(InfoExtractor):
- IE_DESC = 'YouTube.com playlists'
+ IE_DESC = 'YouTube playlists'
_VALID_URL = r'''(?x)(?:
(?:https?://)?
(?:\w+\.)?
(?:
(?:
youtube(?:kids)?\.com|
- invidio\.us
+ %(invidious)s
)
/.*?\?.*?\blist=
)?
(?P<id>%(playlist_id)s)
- )''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
+ )''' % {
+ 'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE,
+ 'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
+ }
IE_NAME = 'youtube:playlist'
_TESTS = [{
'note': 'issue #673',
def suitable(cls, url):
if YoutubeTabIE.suitable(url):
return False
- # Hack for lazy extractors until more generic solution is implemented
- # (see #28780)
- from .youtube import parse_qs
+ from ..utils import parse_qs
qs = parse_qs(url)
if qs.get('v', [None])[0]:
return False
class YoutubeYtUserIE(InfoExtractor):
- IE_DESC = 'YouTube.com user videos, URL or "ytuser" keyword'
+ IE_DESC = 'YouTube user videos; "ytuser:" prefix'
_VALID_URL = r'ytuser:(?P<id>.+)'
_TESTS = [{
'url': 'ytuser:phihag',
def _real_extract(self, url):
user_id = self._match_id(url)
return self.url_result(
- 'https://www.youtube.com/user/%s' % user_id,
+ 'https://www.youtube.com/user/%s/videos' % user_id,
ie=YoutubeTabIE.ie_key(), video_id=user_id)
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
- IE_DESC = 'YouTube.com liked videos, ":ytfav" for short (requires authentication)'
+ IE_DESC = 'YouTube liked videos; ":ytfav" keyword (requires cookies)'
_VALID_URL = r':ytfav(?:ou?rite)?s?'
_LOGIN_REQUIRED = True
_TESTS = [{
class YoutubeSearchIE(SearchInfoExtractor, YoutubeTabIE):
- IE_DESC = 'YouTube.com searches, "ytsearch" keyword'
- # there doesn't appear to be a real limit, for example if you search for
- # 'python' you get more than 8.000.000 results
- _MAX_RESULTS = float('inf')
+ IE_DESC = 'YouTube searches'
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_SEARCH_PARAMS = None
_TESTS = []
- def _entries(self, query, n):
+ def _search_results(self, query):
data = {'query': query}
if self._SEARCH_PARAMS:
data['params'] = self._SEARCH_PARAMS
- total = 0
continuation = {}
for page_num in itertools.count(1):
data.update(continuation)
continue
yield self._extract_video(video)
- total += 1
- if total == n:
- return
if not continuation:
break
- def _get_n_results(self, query, n):
- """Get a specified number of results for a query"""
- return self.playlist_result(self._entries(query, n), query, query)
-
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
- IE_DESC = 'YouTube.com searches, newest videos first, "ytsearchdate" keyword'
+ IE_DESC = 'YouTube searches, newest videos first'
_SEARCH_PARAMS = 'CAI%3D'
class YoutubeSearchURLIE(YoutubeSearchIE):
- IE_DESC = 'YouTube.com search URLs'
+ IE_DESC = 'YouTube search URLs with sorting and filter support'
IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
+ _SEARCH_KEY = None
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?:[^&]+)(?:[&]|$)'
# _MAX_RESULTS = 100
_TESTS = [{
class YoutubeWatchLaterIE(InfoExtractor):
IE_NAME = 'youtube:watchlater'
- IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
+ IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
_VALID_URL = r':ytwatchlater'
_TESTS = [{
'url': ':ytwatchlater',
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
- IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
+ IE_DESC = 'YouTube recommended videos; ":ytrec" keyword'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_LOGIN_REQUIRED = False
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
- IE_DESC = 'YouTube.com subscriptions feed, ":ytsubs" for short (requires authentication)'
+ IE_DESC = 'YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)'
_VALID_URL = r':ytsub(?:scription)?s?'
_FEED_NAME = 'subscriptions'
_TESTS = [{
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
- IE_DESC = 'Youtube watch history, ":ythis" for short (requires authentication)'
+ IE_DESC = 'Youtube watch history; ":ythis" keyword (requires cookies)'
_VALID_URL = r':ythis(?:tory)?'
_FEED_NAME = 'history'
_TESTS = [{
expected=True)
+class YoutubeClipIE(InfoExtractor):
+ IE_NAME = 'youtube:clip'
+ IE_DESC = False # Do not list
+ _VALID_URL = r'https?://(?:www\.)?youtube\.com/clip/'
+
+ def _real_extract(self, url):
+ self.report_warning('YouTube clips are not currently supported. The entire video will be downloaded instead')
+ return self.url_result(url, 'Generic')
+
+
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list