variadic,
)
+
+STREAMING_DATA_CLIENT_NAME = '__yt_dlp_client'
# any clients starting with _ cannot be explicitly requested by the user
INNERTUBE_CLIENTS = {
'web': {
return client_name, base, variant[0] if variant else None
+def short_client_name(client_name):
+ main, *parts = _split_innertube_client(client_name)[0].replace('embedscreen', 'e_s').split('_')
+ return join_nonempty(main[:4], ''.join(x[0] for x in parts)).upper()
+
+
def build_innertube_clients():
THIRD_PARTY = {
'embedUrl': 'https://www.youtube.com/', # Can be any valid URL
r'(?:www\.)?invidious\.pussthecat\.org',
r'(?:www\.)?invidious\.zee\.li',
r'(?:www\.)?invidious\.ethibox\.fr',
+ r'(?:www\.)?iv\.ggtyler\.dev',
+ r'(?:www\.)?inv\.vern\.i2p',
+ r'(?:www\.)?am74vkcrjp2d5v36lcdqgsj2m6x36tbrkhsruoegwfcizzabnfgf5zyd\.onion',
+ r'(?:www\.)?inv\.riverside\.rocks',
+ r'(?:www\.)?invidious\.silur\.me',
+ r'(?:www\.)?inv\.bp\.projectsegfau\.lt',
+ r'(?:www\.)?invidious\.g4c3eya4clenolymqbpgwz3q3tawoxw56yhzk4vugqrl6dtu3ejvhjid\.onion',
+ r'(?:www\.)?invidious\.slipfox\.xyz',
+ r'(?:www\.)?invidious\.esmail5pdn24shtvieloeedh7ehz3nrwcdivnfhfcedl7gf4kwddhkqd\.onion',
+ r'(?:www\.)?inv\.vernccvbvyi5qhfzyqengccj7lkove6bjot2xhh5kajhwvidqafczrad\.onion',
+ r'(?:www\.)?invidious\.tiekoetter\.com',
+ r'(?:www\.)?iv\.odysfvr23q5wgt7i456o5t3trw2cw5dgn56vbjfbq2m7xsc5vqbqpcyd\.onion',
+ r'(?:www\.)?invidious\.nerdvpn\.de',
+ r'(?:www\.)?invidious\.weblibre\.org',
+ r'(?:www\.)?inv\.odyssey346\.dev',
+ r'(?:www\.)?invidious\.dhusch\.de',
+ r'(?:www\.)?iv\.melmac\.space',
+ r'(?:www\.)?watch\.thekitty\.zone',
+ r'(?:www\.)?invidious\.privacydev\.net',
+ r'(?:www\.)?ng27owmagn5amdm7l5s3rsqxwscl5ynppnis5dqcasogkyxcfqn7psid\.onion',
+ r'(?:www\.)?invidious\.drivet\.xyz',
+ r'(?:www\.)?vid\.priv\.au',
+ r'(?:www\.)?euxxcnhsynwmfidvhjf6uzptsmh4dipkmgdmcmxxuo7tunp3ad2jrwyd\.onion',
+ r'(?:www\.)?inv\.vern\.cc',
+ r'(?:www\.)?invidious\.esmailelbob\.xyz',
+ r'(?:www\.)?invidious\.sethforprivacy\.com',
+ r'(?:www\.)?yt\.oelrichsgarcia\.de',
+ r'(?:www\.)?yt\.artemislena\.eu',
+ r'(?:www\.)?invidious\.flokinet\.to',
+ r'(?:www\.)?invidious\.baczek\.me',
+ r'(?:www\.)?y\.com\.sb',
+ r'(?:www\.)?invidious\.epicsite\.xyz',
+ r'(?:www\.)?invidious\.lidarshield\.cloud',
+ r'(?:www\.)?yt\.funami\.tech',
r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
r'(?:www\.)?osbivz6guyeahrwp2lnwyjk2xos342h4ocsxyqrlaopqjuhwn2djiiyd\.onion',
r'(?:www\.)?u2cvlit75owumwpy4dj2hsmvkq7nvrclkpht7xgyye2pyoxhpmclkrad\.onion',
r'(?:www\.)?piped\.qdi\.fi',
r'(?:www\.)?piped\.video',
r'(?:www\.)?piped\.aeong\.one',
+ r'(?:www\.)?piped\.moomoo\.me',
+ r'(?:www\.)?piped\.chauvet\.pro',
+ r'(?:www\.)?watch\.leptons\.xyz',
+ r'(?:www\.)?pd\.vern\.cc',
+ r'(?:www\.)?piped\.hostux\.net',
+ r'(?:www\.)?piped\.lunar\.icu',
+ # Hyperpipe instances from https://hyperpipe.codeberg.page/
+ r'(?:www\.)?hyperpipe\.surge\.sh',
+ r'(?:www\.)?hyperpipe\.esmailelbob\.xyz',
+ r'(?:www\.)?listen\.whatever\.social',
+ r'(?:www\.)?music\.adminforge\.de',
)
# extracted from account/account_menu ep
}
badges = []
- for badge in traverse_obj(renderer, ('badges', ..., 'metadataBadgeRenderer'), default=[]):
+ for badge in traverse_obj(renderer, ('badges', ..., 'metadataBadgeRenderer')):
badge_type = (
privacy_icon_map.get(traverse_obj(badge, ('icon', 'iconType'), expected_type=str))
or badge_style_map.get(traverse_obj(badge, 'style'))
runs = item
runs = runs[:min(len(runs), max_runs or len(runs))]
- text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[]))
+ text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str))
if text:
return text
"""
thumbnails = []
for path in path_list or [()]:
- for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...), default=[]):
+ for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...)):
thumbnail_url = url_or_none(thumbnail.get('url'))
if not thumbnail_url:
continue
@staticmethod
def is_music_url(url):
- return re.match(r'https?://music\.youtube\.com/', url) is not None
+ return re.match(r'(https?://)?music\.youtube\.com/', url) is not None
def _extract_video(self, renderer):
video_id = renderer.get('videoId')
youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
- (?:(?:v|embed|e|shorts)/(?!videoseries|live_stream)) # v/ or embed/ or e/ or shorts/
+ (?:(?:v|embed|e|shorts|live)/(?!videoseries|live_stream)) # v/ or embed/ or e/ or shorts/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
'duration': 106,
},
'params': {'extractor_args': {'youtube': {'player_client': ['tv_embedded']}}, 'format': '251-drc'},
- }
+ },
+ {
+ 'url': 'https://www.youtube.com/live/qVv6vCqciTM',
+ 'info_dict': {
+ 'id': 'qVv6vCqciTM',
+ 'ext': 'mp4',
+ 'age_limit': 0,
+ 'uploader_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
+ 'comment_count': int,
+ 'chapters': 'count:13',
+ 'upload_date': '20221223',
+ 'thumbnail': 'https://i.ytimg.com/vi/qVv6vCqciTM/maxresdefault.jpg',
+ 'channel_url': 'https://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
+ 'uploader_url': 'http://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
+ 'like_count': int,
+ 'release_date': '20221223',
+ 'tags': ['Vtuber', '月ノ美兎', '名取さな', 'にじさんじ', 'クリスマス', '3D配信'],
+ 'title': '【 #インターネット女クリスマス 】3Dで歌ってはしゃぐインターネットの女たち【月ノ美兎/名取さな】',
+ 'view_count': int,
+ 'playable_in_embed': True,
+ 'duration': 4438,
+ 'availability': 'public',
+ 'channel_follower_count': int,
+ 'channel_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
+ 'categories': ['Entertainment'],
+ 'live_status': 'was_live',
+ 'release_timestamp': 1671793345,
+ 'channel': 'さなちゃんねる',
+ 'description': 'md5:6aebf95cc4a1d731aebc01ad6cc9806d',
+ 'uploader': 'さなちゃんねる',
+ },
+ },
]
_WEBPAGE_TESTS = [
return
_, _, prs, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
- video_details = traverse_obj(
- prs, (..., 'videoDetails'), expected_type=dict, default=[])
+ video_details = traverse_obj(prs, (..., 'videoDetails'), expected_type=dict)
microformats = traverse_obj(
prs, (..., 'microformat', 'playerMicroformatRenderer'),
- expected_type=dict, default=[])
+ expected_type=dict)
_, live_status, _, formats, _ = self._list_formats(video_id, microformats, video_details, prs, player_url)
is_live = live_status == 'is_live'
start_time = time.time()
'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters'
), expected_type=list)
- return self._extract_chapters(
+ return self._extract_chapters_helper(
chapter_list,
- chapter_time=lambda chapter: float_or_none(
+ start_function=lambda chapter: float_or_none(
traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000),
- chapter_title=lambda chapter: traverse_obj(
+ title_function=lambda chapter: traverse_obj(
chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str),
duration=duration)
content_list = traverse_obj(
data,
('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'),
- expected_type=list, default=[])
+ expected_type=list)
chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription'))
chapter_title = lambda chapter: self._get_text(chapter, 'title')
return next(filter(None, (
- self._extract_chapters(traverse_obj(contents, (..., 'macroMarkersListItemRenderer')),
- chapter_time, chapter_title, duration)
+ self._extract_chapters_helper(traverse_obj(contents, (..., 'macroMarkersListItemRenderer')),
+ chapter_time, chapter_title, duration)
for contents in content_list)), [])
- def _extract_chapters_from_description(self, description, duration):
- duration_re = r'(?:\d+:)?\d{1,2}:\d{2}'
- sep_re = r'(?m)^\s*(%s)\b\W*\s(%s)\s*$'
- return self._extract_chapters(
- re.findall(sep_re % (duration_re, r'.+?'), description or ''),
- chapter_time=lambda x: parse_duration(x[0]), chapter_title=lambda x: x[1],
- duration=duration, strict=False) or self._extract_chapters(
- re.findall(sep_re % (r'.+?', duration_re), description or ''),
- chapter_time=lambda x: parse_duration(x[1]), chapter_title=lambda x: x[0],
- duration=duration, strict=False)
-
- def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration, strict=True):
- if not duration:
- return
- chapter_list = [{
- 'start_time': chapter_time(chapter),
- 'title': chapter_title(chapter),
- } for chapter in chapter_list or []]
- if not strict:
- chapter_list.sort(key=lambda c: c['start_time'] or 0)
-
- chapters = [{'start_time': 0}]
- for idx, chapter in enumerate(chapter_list):
- if chapter['start_time'] is None:
- self.report_warning(f'Incomplete chapter {idx}')
- elif chapters[-1]['start_time'] <= chapter['start_time'] <= duration:
- chapters.append(chapter)
- elif chapter not in chapters:
- self.report_warning(
- f'Invalid start time ({chapter["start_time"]} < {chapters[-1]["start_time"]}) for chapter "{chapter["title"]}"')
- return chapters[1:]
-
def _extract_comment(self, comment_renderer, parent=None):
comment_id = comment_renderer.get('commentId')
if not comment_id:
comment = self._extract_comment(comment_renderer, parent)
if not comment:
continue
+ # Sometimes YouTube may break and give us infinite looping comments.
+ # See: https://github.com/yt-dlp/yt-dlp/issues/6290
+ if comment['id'] in tracker['seen_comment_ids']:
+ self.report_warning('Detected YouTube comments looping. Stopping comment extraction as we probably cannot get any more.')
+ yield
+ else:
+ tracker['seen_comment_ids'].add(comment['id'])
tracker['running_total'] += 1
tracker['total_reply_comments' if parent else 'total_parent_comments'] += 1
est_total=0,
current_page_thread=0,
total_parent_comments=0,
- total_reply_comments=0)
+ total_reply_comments=0,
+ seen_comment_ids=set())
# TODO: Deprecated
# YouTube comments have a max depth of 2
if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')):
return True
- reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[])
+ reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')))
AGE_GATE_REASONS = (
'confirm your age', 'age-restricted', 'inappropriate', # reason
'age_verification_required', 'age_check_required', # status
self.report_warning(
f'Skipping player response from {client} client (got player response for video "{pr_video_id}" instead of "{video_id}")' + bug_reports_message())
else:
+ # Save client name for introspection later
+ name = short_client_name(client)
+ sd = traverse_obj(pr, ('streamingData', {dict})) or {}
+ sd[STREAMING_DATA_CLIENT_NAME] = name
+ for f in traverse_obj(sd, (('formats', 'adaptiveFormats'), ..., {dict})):
+ f[STREAMING_DATA_CLIENT_NAME] = name
prs.append(pr)
# creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in
return live_status
def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, live_status, duration):
+ CHUNK_SIZE = 10 << 20
itags, stream_ids = collections.defaultdict(set), []
itag_qualities, res_qualities = {}, {0: None}
q = qualities([
'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats
'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
])
- streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[])
+ streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...))
+ all_formats = self._configuration_arg('include_duplicate_formats')
+
+ def build_fragments(f):
+ return LazyList({
+ 'url': update_url_query(f['url'], {
+ 'range': f'{range_start}-{min(range_start + CHUNK_SIZE - 1, f["filesize"])}'
+ })
+ } for range_start in range(0, f['filesize'], CHUNK_SIZE))
for fmt in streaming_formats:
if fmt.get('targetDurationSec'):
itag = str_or_none(fmt.get('itag'))
audio_track = fmt.get('audioTrack') or {}
stream_id = (itag, audio_track.get('id'), fmt.get('isDrc'))
- if stream_id in stream_ids:
- continue
+ if not all_formats:
+ if stream_id in stream_ids:
+ continue
quality = fmt.get('quality')
height = int_or_none(fmt.get('height'))
if is_damaged:
self.report_warning(
f'{video_id}: Some formats are possibly damaged. They will be deprioritized', only_once=True)
+
+ client_name = fmt.get(STREAMING_DATA_CLIENT_NAME)
dct = {
'asr': int_or_none(fmt.get('audioSampleRate')),
'filesize': int_or_none(fmt.get('contentLength')),
'format_id': f'{itag}{"-drc" if fmt.get("isDrc") else ""}',
'format_note': join_nonempty(
- '%s%s' % (audio_track.get('displayName') or '',
- ' (default)' if language_preference > 0 else ''),
+ join_nonempty(audio_track.get('displayName'),
+ language_preference > 0 and ' (default)', delim=''),
fmt.get('qualityLabel') or quality.replace('audio_quality_', ''),
- 'DRC' if fmt.get('isDrc') else None,
+ fmt.get('isDrc') and 'DRC',
try_get(fmt, lambda x: x['projectionType'].replace('RECTANGULAR', '').lower()),
try_get(fmt, lambda x: x['spatialAudioType'].replace('SPATIAL_AUDIO_TYPE_', '').lower()),
- throttled and 'THROTTLED', is_damaged and 'DAMAGED', delim=', '),
+ throttled and 'THROTTLED', is_damaged and 'DAMAGED',
+ (self.get_param('verbose') or all_formats) and client_name,
+ delim=', '),
# Format 22 is likely to be damaged. See https://github.com/yt-dlp/yt-dlp/issues/3372
'source_preference': -10 if throttled else -5 if itag == '22' else -1,
'fps': int_or_none(fmt.get('fps')) or None,
if mime_mobj:
dct['ext'] = mimetype2ext(mime_mobj.group(1))
dct.update(parse_codecs(mime_mobj.group(2)))
- no_audio = dct.get('acodec') == 'none'
- no_video = dct.get('vcodec') == 'none'
- if no_audio:
- dct['vbr'] = tbr
- if no_video:
- dct['abr'] = tbr
- if no_audio or no_video:
- dct['downloader_options'] = {
- # Youtube throttles chunks >~10M
- 'http_chunk_size': 10485760,
- }
- if dct.get('ext'):
- dct['container'] = dct['ext'] + '_dash'
-
if itag:
itags[itag].add(('https', dct.get('language')))
stream_ids.append(stream_id)
+ single_stream = 'none' in (dct.get('acodec'), dct.get('vcodec'))
+ if single_stream and dct.get('ext'):
+ dct['container'] = dct['ext'] + '_dash'
+
+ if dct['filesize']:
+ yield {
+ **dct,
+ 'format_id': f'{dct["format_id"]}-dashy' if all_formats else dct['format_id'],
+ 'protocol': 'http_dash_segments',
+ 'fragments': build_fragments(dct),
+ }
+ if not all_formats:
+ continue
+ dct['downloader_options'] = {'http_chunk_size': CHUNK_SIZE}
yield dct
needs_live_processing = self._needs_live_processing(live_status, duration)
elif skip_bad_formats and live_status == 'is_live' and needs_live_processing != 'is_live':
skip_manifests.add('dash')
- def process_manifest_format(f, proto, itag):
+ def process_manifest_format(f, proto, client_name, itag):
key = (proto, f.get('language'))
- if key in itags[itag]:
+ if not all_formats and key in itags[itag]:
return False
itags[itag].add(key)
- if any(p != proto for p, _ in itags[itag]):
+ if itag and all_formats:
+ f['format_id'] = f'{itag}-{proto}'
+ elif any(p != proto for p, _ in itags[itag]):
f['format_id'] = f'{itag}-{proto}'
elif itag:
f['format_id'] = itag
f['quality'] = q(itag_qualities.get(try_get(f, lambda f: f['format_id'].split('-')[0]), -1))
if f['quality'] == -1 and f.get('height'):
f['quality'] = q(res_qualities[min(res_qualities, key=lambda x: abs(x - f['height']))])
+ if self.get_param('verbose'):
+ f['format_note'] = join_nonempty(f.get('format_note'), client_name, delim=', ')
return True
subtitles = {}
for sd in streaming_data:
+ client_name = sd.get(STREAMING_DATA_CLIENT_NAME)
+
hls_manifest_url = 'hls' not in skip_manifests and sd.get('hlsManifestUrl')
if hls_manifest_url:
fmts, subs = self._extract_m3u8_formats_and_subtitles(
hls_manifest_url, video_id, 'mp4', fatal=False, live=live_status == 'is_live')
subtitles = self._merge_subtitles(subs, subtitles)
for f in fmts:
- if process_manifest_format(f, 'hls', self._search_regex(
+ if process_manifest_format(f, 'hls', client_name, self._search_regex(
r'/itag/(\d+)', f['url'], 'itag', default=None)):
yield f
formats, subs = self._extract_mpd_formats_and_subtitles(dash_manifest_url, video_id, fatal=False)
subtitles = self._merge_subtitles(subs, subtitles) # Prioritize HLS subs over DASH
for f in formats:
- if process_manifest_format(f, 'dash', f['format_id']):
+ if process_manifest_format(f, 'dash', client_name, f['format_id']):
f['filesize'] = int_or_none(self._search_regex(
r'/clen/(\d+)', f.get('fragment_base_url') or f['url'], 'file size', default=None))
if needs_live_processing:
else 'was_live' if live_content
else 'not_live' if False in (is_live, live_content)
else None)
- streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[])
+ streaming_data = traverse_obj(player_responses, (..., 'streamingData'))
*formats, subtitles = self._extract_formats_and_subtitles(streaming_data, video_id, player_url, live_status, duration)
return live_broadcast_details, live_status, streaming_data, formats, subtitles
webpage, master_ytcfg, player_responses, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
playability_statuses = traverse_obj(
- player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[])
+ player_responses, (..., 'playabilityStatus'), expected_type=dict)
trailer_video_id = get_first(
playability_statuses,
search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None))
if webpage else (lambda x: None))
- video_details = traverse_obj(
- player_responses, (..., 'videoDetails'), expected_type=dict, default=[])
+ video_details = traverse_obj(player_responses, (..., 'videoDetails'), expected_type=dict)
microformats = traverse_obj(
player_responses, (..., 'microformat', 'playerMicroformatRenderer'),
- expected_type=dict, default=[])
+ expected_type=dict)
translated_title = self._get_text(microformats, (..., 'title'))
video_title = (self._preferred_lang and translated_title
'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')),
'description': video_description,
'uploader': get_first(video_details, 'author'),
- 'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
+ 'uploader_id': self._search_regex(r'/(?:channel/|user/|(?=@))([^/?&#]+)', owner_profile_url, 'uploader id', default=None),
'uploader_url': owner_profile_url,
'channel_id': channel_id,
'channel_url': format_field(channel_id, None, 'https://www.youtube.com/channel/%s'),
# Converted into dicts to remove duplicates
captions = {
get_lang_code(sub): sub
- for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
+ for sub in traverse_obj(pctr, (..., 'captionTracks', ...))}
translation_languages = {
lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
- for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
+ for lang in traverse_obj(pctr, (..., 'translationLanguages', ...))}
def process_language(container, base_url, lang_code, sub_name, query):
lang_subs = container.setdefault(lang_code, [])
initial_data = None
if webpage:
initial_data = self.extract_yt_initial_data(video_id, webpage, fatal=False)
+ if not traverse_obj(initial_data, 'contents'):
+ self.report_warning('Incomplete data received in embedded initial data; re-fetching using API.')
+ initial_data = None
if not initial_data:
query = {'videoId': video_id}
query.update(self._get_checkok_params())
initial_data = self._extract_response(
item_id=video_id, ep='next', fatal=False,
- ytcfg=master_ytcfg, query=query,
+ ytcfg=master_ytcfg, query=query, check_get_keys='contents',
headers=self.generate_api_headers(ytcfg=master_ytcfg),
note='Downloading initial data API JSON')
info['comment_count'] = traverse_obj(initial_data, (
'contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents', ..., 'itemSectionRenderer',
- 'contents', ..., 'commentsEntryPointHeaderRenderer', 'commentCount', 'simpleText'
+ 'contents', ..., 'commentsEntryPointHeaderRenderer', 'commentCount'
), (
'engagementPanels', lambda _, v: v['engagementPanelSectionListRenderer']['panelIdentifier'] == 'comment-item-section',
- 'engagementPanelSectionListRenderer', 'header', 'engagementPanelTitleHeaderRenderer', 'contextualInfo', 'runs', ..., 'text'
- ), expected_type=int_or_none, get_all=False)
+ 'engagementPanelSectionListRenderer', 'header', 'engagementPanelTitleHeaderRenderer', 'contextualInfo'
+ ), expected_type=self._get_count, get_all=False)
try: # This will error if there is no livechat
initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
list) or []):
tbrs = variadic(
traverse_obj(
- tlb, 'toggleButtonRenderer',
- ('segmentedLikeDislikeButtonRenderer', ..., 'toggleButtonRenderer'),
- default=[]))
+ tlb, ('toggleButtonRenderer', ...),
+ ('segmentedLikeDislikeButtonRenderer', ..., 'toggleButtonRenderer')))
for tbr in tbrs:
for getter, regex in [(
lambda x: x['defaultText']['accessibility']['accessibilityData'],
return info_dict
return wrapper
- def _extract_channel_id(self, webpage):
- channel_id = self._html_search_meta(
- 'channelId', webpage, 'channel id', default=None)
- if channel_id:
- return channel_id
- channel_url = self._html_search_meta(
- ('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
- 'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
- 'twitter:app:url:googleplay'), webpage, 'channel url')
- return self._search_regex(
- r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
- channel_url, 'channel id')
-
@staticmethod
def _extract_basic_item_renderer(item):
# Modified from _extract_grid_item_renderer
info['view_count'] = self._get_count(playlist_stats, 1)
if info['view_count'] is None: # 0 is allowed
info['view_count'] = self._get_count(playlist_header_renderer, 'viewCountText')
+ if info['view_count'] is None:
+ info['view_count'] = self._get_count(data, (
+ 'contents', 'twoColumnBrowseResultsRenderer', 'tabs', ..., 'tabRenderer', 'content', 'sectionListRenderer',
+ 'contents', ..., 'itemSectionRenderer', 'contents', ..., 'channelAboutFullMetadataRenderer', 'viewCountText'))
info['playlist_count'] = self._get_count(playlist_stats, 0)
if info['playlist_count'] is None: # 0 is allowed
IE_DESC = 'YouTube Tabs'
_VALID_URL = r'''(?x:
https?://
- (?:\w+\.)?
+ (?!consent\.)(?:\w+\.)?
(?:
youtube(?:kids)?\.com|
%(invidious)s
}
}],
'params': {'extract_flat': True},
+ }, {
+ 'url': 'https://www.youtube.com/@3blue1brown/about',
+ 'info_dict': {
+ 'id': 'UCYO_jab_esuFRV4b17AJtAw',
+ 'tags': ['Mathematics'],
+ 'title': '3Blue1Brown - About',
+ 'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
+ 'channel_follower_count': int,
+ 'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
+ 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
+ 'channel': '3Blue1Brown',
+ 'uploader': '3Blue1Brown',
+ 'view_count': int,
+ 'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw',
+ 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
+ },
+ 'playlist_count': 0,
}]
@classmethod
original_tab_id, display_id = tab[1:], f'{item_id}{tab}'
if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
url = f'{pre}/videos{post}'
+ if smuggled_data.get('is_music_url'):
+ self.report_warning(f'YouTube Music is not directly supported. Redirecting to {url}')
# Handle both video/playlist URLs
qs = parse_qs(url)
}
+class YoutubeConsentRedirectIE(YoutubeBaseInfoExtractor):
+ IE_NAME = 'youtube:consent'
+ IE_DESC = False # Do not list
+ _VALID_URL = r'https?://consent\.youtube\.com/m\?'
+ _TESTS = [{
+ 'url': 'https://consent.youtube.com/m?continue=https%3A%2F%2Fwww.youtube.com%2Flive%2FqVv6vCqciTM%3Fcbrd%3D1&gl=NL&m=0&pc=yt&hl=en&src=1',
+ 'info_dict': {
+ 'id': 'qVv6vCqciTM',
+ 'ext': 'mp4',
+ 'age_limit': 0,
+ 'uploader_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
+ 'comment_count': int,
+ 'chapters': 'count:13',
+ 'upload_date': '20221223',
+ 'thumbnail': 'https://i.ytimg.com/vi/qVv6vCqciTM/maxresdefault.jpg',
+ 'channel_url': 'https://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
+ 'uploader_url': 'http://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
+ 'like_count': int,
+ 'release_date': '20221223',
+ 'tags': ['Vtuber', '月ノ美兎', '名取さな', 'にじさんじ', 'クリスマス', '3D配信'],
+ 'title': '【 #インターネット女クリスマス 】3Dで歌ってはしゃぐインターネットの女たち【月ノ美兎/名取さな】',
+ 'view_count': int,
+ 'playable_in_embed': True,
+ 'duration': 4438,
+ 'availability': 'public',
+ 'channel_follower_count': int,
+ 'channel_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
+ 'categories': ['Entertainment'],
+ 'live_status': 'was_live',
+ 'release_timestamp': 1671793345,
+ 'channel': 'さなちゃんねる',
+ 'description': 'md5:6aebf95cc4a1d731aebc01ad6cc9806d',
+ 'uploader': 'さなちゃんねる',
+ },
+ 'add_ie': ['Youtube'],
+ 'params': {'skip_download': 'Youtube'},
+ }]
+
+ def _real_extract(self, url):
+ redirect_url = url_or_none(parse_qs(url).get('continue', [None])[-1])
+ if not redirect_url:
+ raise ExtractorError('Invalid cookie consent redirect URL', expected=True)
+ return self.url_result(redirect_url)
+
+
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list