X-Git-Url: https://jfr.im/git/yt-dlp.git/blobdiff_plain/2e30b46fe4a04e82d1ec1a21f8d387e5f96405be..c9abebb851e6188cb34b9eb744c1863dd46af919:/yt_dlp/extractor/youtube.py diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index 555c94f97..6e6abd65b 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -1,5 +1,6 @@ import base64 import calendar +import collections import copy import datetime import enum @@ -291,7 +292,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor): """Provide base functions for Youtube extractors""" _RESERVED_NAMES = ( - r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|' + r'channel|c|user|playlist|watch|w|v|embed|e|live|watch_popup|clip|' r'shorts|movies|results|search|shared|hashtag|trending|explore|feed|feeds|' r'browse|oembed|get_video_info|iframe_api|s/player|source|' r'storefront|oops|index|account|t/terms|about|upload|signin|logout') @@ -311,6 +312,40 @@ class YoutubeBaseInfoExtractor(InfoExtractor): r'(?:www\.)?invidious\.pussthecat\.org', r'(?:www\.)?invidious\.zee\.li', r'(?:www\.)?invidious\.ethibox\.fr', + r'(?:www\.)?iv\.ggtyler\.dev', + r'(?:www\.)?inv\.vern\.i2p', + r'(?:www\.)?am74vkcrjp2d5v36lcdqgsj2m6x36tbrkhsruoegwfcizzabnfgf5zyd\.onion', + r'(?:www\.)?inv\.riverside\.rocks', + r'(?:www\.)?invidious\.silur\.me', + r'(?:www\.)?inv\.bp\.projectsegfau\.lt', + r'(?:www\.)?invidious\.g4c3eya4clenolymqbpgwz3q3tawoxw56yhzk4vugqrl6dtu3ejvhjid\.onion', + r'(?:www\.)?invidious\.slipfox\.xyz', + r'(?:www\.)?invidious\.esmail5pdn24shtvieloeedh7ehz3nrwcdivnfhfcedl7gf4kwddhkqd\.onion', + r'(?:www\.)?inv\.vernccvbvyi5qhfzyqengccj7lkove6bjot2xhh5kajhwvidqafczrad\.onion', + r'(?:www\.)?invidious\.tiekoetter\.com', + r'(?:www\.)?iv\.odysfvr23q5wgt7i456o5t3trw2cw5dgn56vbjfbq2m7xsc5vqbqpcyd\.onion', + r'(?:www\.)?invidious\.nerdvpn\.de', + r'(?:www\.)?invidious\.weblibre\.org', + r'(?:www\.)?inv\.odyssey346\.dev', + r'(?:www\.)?invidious\.dhusch\.de', + r'(?:www\.)?iv\.melmac\.space', + r'(?:www\.)?watch\.thekitty\.zone', + r'(?:www\.)?invidious\.privacydev\.net', + r'(?:www\.)?ng27owmagn5amdm7l5s3rsqxwscl5ynppnis5dqcasogkyxcfqn7psid\.onion', + r'(?:www\.)?invidious\.drivet\.xyz', + r'(?:www\.)?vid\.priv\.au', + r'(?:www\.)?euxxcnhsynwmfidvhjf6uzptsmh4dipkmgdmcmxxuo7tunp3ad2jrwyd\.onion', + r'(?:www\.)?inv\.vern\.cc', + r'(?:www\.)?invidious\.esmailelbob\.xyz', + r'(?:www\.)?invidious\.sethforprivacy\.com', + r'(?:www\.)?yt\.oelrichsgarcia\.de', + r'(?:www\.)?yt\.artemislena\.eu', + r'(?:www\.)?invidious\.flokinet\.to', + r'(?:www\.)?invidious\.baczek\.me', + r'(?:www\.)?y\.com\.sb', + r'(?:www\.)?invidious\.epicsite\.xyz', + r'(?:www\.)?invidious\.lidarshield\.cloud', + r'(?:www\.)?yt\.funami\.tech', r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion', r'(?:www\.)?osbivz6guyeahrwp2lnwyjk2xos342h4ocsxyqrlaopqjuhwn2djiiyd\.onion', r'(?:www\.)?u2cvlit75owumwpy4dj2hsmvkq7nvrclkpht7xgyye2pyoxhpmclkrad\.onion', @@ -369,14 +404,37 @@ class YoutubeBaseInfoExtractor(InfoExtractor): r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion', # piped instances from https://github.com/TeamPiped/Piped/wiki/Instances r'(?:www\.)?piped\.kavin\.rocks', - r'(?:www\.)?piped\.silkky\.cloud', r'(?:www\.)?piped\.tokhmi\.xyz', - r'(?:www\.)?piped\.moomoo\.me', - r'(?:www\.)?il\.ax', - r'(?:www\.)?piped\.syncpundit\.com', + r'(?:www\.)?piped\.syncpundit\.io', r'(?:www\.)?piped\.mha\.fi', + r'(?:www\.)?watch\.whatever\.social', + r'(?:www\.)?piped\.garudalinux\.org', + r'(?:www\.)?piped\.rivo\.lol', + r'(?:www\.)?piped-libre\.kavin\.rocks', + r'(?:www\.)?yt\.jae\.fi', r'(?:www\.)?piped\.mint\.lgbt', - r'(?:www\.)?piped\.privacy\.com\.de', + r'(?:www\.)?il\.ax', + r'(?:www\.)?piped\.esmailelbob\.xyz', + r'(?:www\.)?piped\.projectsegfau\.lt', + r'(?:www\.)?piped\.privacydev\.net', + r'(?:www\.)?piped\.palveluntarjoaja\.eu', + r'(?:www\.)?piped\.smnz\.de', + r'(?:www\.)?piped\.adminforge\.de', + r'(?:www\.)?watch\.whatevertinfoil\.de', + r'(?:www\.)?piped\.qdi\.fi', + r'(?:www\.)?piped\.video', + r'(?:www\.)?piped\.aeong\.one', + r'(?:www\.)?piped\.moomoo\.me', + r'(?:www\.)?piped\.chauvet\.pro', + r'(?:www\.)?watch\.leptons\.xyz', + r'(?:www\.)?pd\.vern\.cc', + r'(?:www\.)?piped\.hostux\.net', + r'(?:www\.)?piped\.lunar\.icu', + # Hyperpipe instances from https://hyperpipe.codeberg.page/ + r'(?:www\.)?hyperpipe\.surge\.sh', + r'(?:www\.)?hyperpipe\.esmailelbob\.xyz', + r'(?:www\.)?listen\.whatever\.social', + r'(?:www\.)?music\.adminforge\.de', ) # extracted from account/account_menu ep @@ -732,7 +790,7 @@ def _extract_badges(self, renderer: dict): } badges = [] - for badge in traverse_obj(renderer, ('badges', ..., 'metadataBadgeRenderer'), default=[]): + for badge in traverse_obj(renderer, ('badges', ..., 'metadataBadgeRenderer')): badge_type = ( privacy_icon_map.get(traverse_obj(badge, ('icon', 'iconType'), expected_type=str)) or badge_style_map.get(traverse_obj(badge, 'style')) @@ -772,7 +830,7 @@ def _get_text(data, *path_list, max_runs=None): runs = item runs = runs[:min(len(runs), max_runs or len(runs))] - text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[])) + text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str)) if text: return text @@ -792,7 +850,7 @@ def _extract_thumbnails(data, *path_list): """ thumbnails = [] for path in path_list or [()]: - for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...), default=[]): + for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...)): thumbnail_url = url_or_none(thumbnail.get('url')) if not thumbnail_url: continue @@ -898,32 +956,40 @@ def _extract_response(self, item_id, query, note='Downloading API JSON', headers @staticmethod def is_music_url(url): - return re.match(r'https?://music\.youtube\.com/', url) is not None + return re.match(r'(https?://)?music\.youtube\.com/', url) is not None def _extract_video(self, renderer): video_id = renderer.get('videoId') - title = self._get_text(renderer, 'title') + + reel_header_renderer = traverse_obj(renderer, ( + 'navigationEndpoint', 'reelWatchEndpoint', 'overlay', 'reelPlayerOverlayRenderer', + 'reelPlayerHeaderSupportedRenderers', 'reelPlayerHeaderRenderer')) + + title = self._get_text(renderer, 'title', 'headline') or self._get_text(reel_header_renderer, 'reelTitleText') description = self._get_text(renderer, 'descriptionSnippet') - duration = parse_duration(self._get_text( - renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text'))) + + duration = int_or_none(renderer.get('lengthSeconds')) if duration is None: + duration = parse_duration(self._get_text( + renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text'))) + if duration is None: + # XXX: should write a parser to be more general to support more cases (e.g. shorts in shorts tab) duration = parse_duration(self._search_regex( r'(?i)(ago)(?!.*\1)\s+(?P[a-z0-9 ,]+?)(?:\s+[\d,]+\s+views)?(?:\s+-\s+play\s+short)?$', traverse_obj(renderer, ('title', 'accessibility', 'accessibilityData', 'label'), default='', expected_type=str), video_id, default=None, group='duration')) - view_count = self._get_count(renderer, 'viewCountText', 'shortViewCountText') - uploader = self._get_text(renderer, 'ownerText', 'shortBylineText') channel_id = traverse_obj( renderer, ('shortBylineText', 'runs', ..., 'navigationEndpoint', 'browseEndpoint', 'browseId'), expected_type=str, get_all=False) - time_text = self._get_text(renderer, 'publishedTimeText') or '' - scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False)) + if not channel_id: + channel_id = traverse_obj(reel_header_renderer, ('channelNavigationEndpoint', 'browseEndpoint', 'browseId')) + overlay_style = traverse_obj( renderer, ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'style'), get_all=False, expected_type=str) badges = self._extract_badges(renderer) - thumbnails = self._extract_thumbnails(renderer, 'thumbnail') + navigation_url = urljoin('https://www.youtube.com/', traverse_obj( renderer, ('navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'), expected_type=str)) or '' @@ -931,12 +997,22 @@ def _extract_video(self, renderer): if overlay_style == 'SHORTS' or '/shorts/' in navigation_url: url = f'https://www.youtube.com/shorts/{video_id}' + time_text = (self._get_text(renderer, 'publishedTimeText', 'videoInfo') + or self._get_text(reel_header_renderer, 'timestampText') or '') + scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False)) + live_status = ( 'is_upcoming' if scheduled_timestamp is not None else 'was_live' if 'streamed' in time_text.lower() else 'is_live' if overlay_style == 'LIVE' or self._has_badge(badges, BadgeType.LIVE_NOW) else None) + # videoInfo is a string like '50K views • 10 years ago'. + view_count_text = self._get_text(renderer, 'viewCountText', 'shortViewCountText', 'videoInfo') or '' + view_count = (0 if 'no views' in view_count_text.lower() + else self._get_count({'simpleText': view_count_text})) + view_count_field = 'concurrent_view_count' if live_status in ('is_live', 'is_upcoming') else 'view_count' + return { '_type': 'url', 'ie_key': YoutubeIE.ie_key(), @@ -945,9 +1021,11 @@ def _extract_video(self, renderer): 'title': title, 'description': description, 'duration': duration, - 'uploader': uploader, 'channel_id': channel_id, - 'thumbnails': thumbnails, + 'channel': (self._get_text(renderer, 'ownerText', 'shortBylineText') + or self._get_text(reel_header_renderer, 'channelTitleText')), + 'channel_url': f'https://www.youtube.com/channel/{channel_id}' if channel_id else None, + 'thumbnails': self._extract_thumbnails(renderer, 'thumbnail'), 'timestamp': (self._parse_time_text(time_text) if self._configuration_arg('approximate_date', ie_key=YoutubeTabIE) else None), @@ -959,7 +1037,7 @@ def _extract_video(self, renderer): needs_premium=self._has_badge(badges, BadgeType.AVAILABILITY_PREMIUM) or None, needs_subscription=self._has_badge(badges, BadgeType.AVAILABILITY_SUBSCRIPTION) or None, is_unlisted=self._has_badge(badges, BadgeType.AVAILABILITY_UNLISTED) or None), - 'concurrent_view_count' if live_status in ('is_live', 'is_upcoming') else 'view_count': view_count, + view_count_field: view_count, 'live_status': live_status } @@ -979,7 +1057,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains (?:.*?\#/)? # handle anchor (#/) redirect urls (?: # the various things that can precede the ID: - (?:(?:v|embed|e|shorts)/(?!videoseries|live_stream)) # v/ or embed/ or e/ or shorts/ + (?:(?:v|embed|e|shorts|live)/(?!videoseries|live_stream)) # v/ or embed/ or e/ or shorts/ |(?: # or the v= param in all its forms (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! @@ -1020,6 +1098,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): ]*\bhref="(?Phttps://www\.youtube\.com/watch\?v=[0-9A-Za-z_-]{11})" \s[^>]*\bclass="[^"]*\blazy-load-youtube''', ] + _RETURN_TYPE = 'video' # XXX: How to handle multifeed? _PLAYER_INFO_RE = ( r'/s/player/(?P[a-zA-Z0-9_-]{8,})/player', @@ -1550,66 +1629,99 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'skip': 'This live event has ended.', }, { - # Multifeed videos (multiple cameras), URL is for Main Camera - 'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg', + # Multifeed videos (multiple cameras), URL can be of any Camera + 'url': 'https://www.youtube.com/watch?v=zaPI8MvL8pg', 'info_dict': { - 'id': 'jvGDaLqkpTg', - 'title': 'Tom Clancy Free Weekend Rainbow Whatever', - 'description': 'md5:e03b909557865076822aa169218d6a5d', + 'id': 'zaPI8MvL8pg', + 'title': 'Terraria 1.2 Live Stream | Let\'s Play - Part 04', + 'description': 'md5:563ccbc698b39298481ca3c571169519', }, 'playlist': [{ 'info_dict': { - 'id': 'jvGDaLqkpTg', - 'ext': 'mp4', - 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)', - 'description': 'md5:e03b909557865076822aa169218d6a5d', - 'duration': 10643, - 'upload_date': '20161111', - 'uploader': 'Team PGP', - 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg', - }, - }, { - 'info_dict': { - 'id': '3AKt1R1aDnw', + 'id': 'j5yGuxZ8lLU', 'ext': 'mp4', - 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)', - 'description': 'md5:e03b909557865076822aa169218d6a5d', - 'duration': 10991, - 'upload_date': '20161111', - 'uploader': 'Team PGP', - 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg', + 'title': 'Terraria 1.2 Live Stream | Let\'s Play - Part 04 (Chris)', + 'uploader': 'WiiLikeToPlay', + 'description': 'md5:563ccbc698b39298481ca3c571169519', + 'uploader_url': 'http://www.youtube.com/user/WiiRikeToPray', + 'duration': 10120, + 'channel_follower_count': int, + 'channel_url': 'https://www.youtube.com/channel/UCN2XePorRokPB9TEgRZpddg', + 'availability': 'public', + 'playable_in_embed': True, + 'upload_date': '20131105', + 'uploader_id': 'WiiRikeToPray', + 'categories': ['Gaming'], + 'live_status': 'was_live', + 'tags': 'count:24', + 'release_timestamp': 1383701910, + 'thumbnail': 'https://i.ytimg.com/vi/j5yGuxZ8lLU/maxresdefault.jpg', + 'comment_count': int, + 'age_limit': 0, + 'like_count': int, + 'channel_id': 'UCN2XePorRokPB9TEgRZpddg', + 'channel': 'WiiLikeToPlay', + 'view_count': int, + 'release_date': '20131106', }, }, { 'info_dict': { - 'id': 'RtAMM00gpVc', + 'id': 'zaPI8MvL8pg', 'ext': 'mp4', - 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)', - 'description': 'md5:e03b909557865076822aa169218d6a5d', - 'duration': 10995, - 'upload_date': '20161111', - 'uploader': 'Team PGP', - 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg', + 'title': 'Terraria 1.2 Live Stream | Let\'s Play - Part 04 (Tyson)', + 'uploader_id': 'WiiRikeToPray', + 'availability': 'public', + 'channel_url': 'https://www.youtube.com/channel/UCN2XePorRokPB9TEgRZpddg', + 'channel': 'WiiLikeToPlay', + 'uploader_url': 'http://www.youtube.com/user/WiiRikeToPray', + 'channel_follower_count': int, + 'description': 'md5:563ccbc698b39298481ca3c571169519', + 'duration': 10108, + 'age_limit': 0, + 'like_count': int, + 'tags': 'count:24', + 'channel_id': 'UCN2XePorRokPB9TEgRZpddg', + 'uploader': 'WiiLikeToPlay', + 'release_timestamp': 1383701915, + 'comment_count': int, + 'upload_date': '20131105', + 'thumbnail': 'https://i.ytimg.com/vi/zaPI8MvL8pg/maxresdefault.jpg', + 'release_date': '20131106', + 'playable_in_embed': True, + 'live_status': 'was_live', + 'categories': ['Gaming'], + 'view_count': int, }, }, { 'info_dict': { - 'id': '6N2fdlP3C5U', + 'id': 'R7r3vfO7Hao', 'ext': 'mp4', - 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)', - 'description': 'md5:e03b909557865076822aa169218d6a5d', - 'duration': 10990, - 'upload_date': '20161111', - 'uploader': 'Team PGP', - 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg', + 'title': 'Terraria 1.2 Live Stream | Let\'s Play - Part 04 (Spencer)', + 'thumbnail': 'https://i.ytimg.com/vi/R7r3vfO7Hao/maxresdefault.jpg', + 'channel_id': 'UCN2XePorRokPB9TEgRZpddg', + 'like_count': int, + 'availability': 'public', + 'playable_in_embed': True, + 'upload_date': '20131105', + 'description': 'md5:563ccbc698b39298481ca3c571169519', + 'uploader_id': 'WiiRikeToPray', + 'uploader_url': 'http://www.youtube.com/user/WiiRikeToPray', + 'channel_follower_count': int, + 'tags': 'count:24', + 'release_date': '20131106', + 'uploader': 'WiiLikeToPlay', + 'comment_count': int, + 'channel_url': 'https://www.youtube.com/channel/UCN2XePorRokPB9TEgRZpddg', + 'channel': 'WiiLikeToPlay', + 'categories': ['Gaming'], + 'release_timestamp': 1383701914, + 'live_status': 'was_live', + 'age_limit': 0, + 'duration': 10128, + 'view_count': int, }, }], - 'params': { - 'skip_download': True, - }, - 'skip': 'Not multifeed anymore', + 'params': {'skip_download': True}, }, { # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536) @@ -2449,7 +2561,95 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'note': '6 channel audio', 'url': 'https://www.youtube.com/watch?v=zgdo7-RRjgo', 'only_matching': True, - } + }, { + 'note': 'Multiple HLS formats with same itag', + 'url': 'https://www.youtube.com/watch?v=kX3nB4PpJko', + 'info_dict': { + 'id': 'kX3nB4PpJko', + 'ext': 'mp4', + 'categories': ['Entertainment'], + 'description': 'md5:e8031ff6e426cdb6a77670c9b81f6fa6', + 'uploader_url': 'http://www.youtube.com/user/MrBeast6000', + 'live_status': 'not_live', + 'duration': 937, + 'channel_follower_count': int, + 'thumbnail': 'https://i.ytimg.com/vi_webp/kX3nB4PpJko/maxresdefault.webp', + 'title': 'Last To Take Hand Off Jet, Keeps It!', + 'channel': 'MrBeast', + 'playable_in_embed': True, + 'view_count': int, + 'upload_date': '20221112', + 'uploader': 'MrBeast', + 'uploader_id': 'MrBeast6000', + 'channel_url': 'https://www.youtube.com/channel/UCX6OQ3DkcsbYNE6H8uQQuVA', + 'age_limit': 0, + 'availability': 'public', + 'channel_id': 'UCX6OQ3DkcsbYNE6H8uQQuVA', + 'like_count': int, + 'tags': [], + }, + 'params': {'extractor_args': {'youtube': {'player_client': ['ios']}}, 'format': '233-1'}, + }, { + 'note': 'Audio formats with Dynamic Range Compression', + 'url': 'https://www.youtube.com/watch?v=Tq92D6wQ1mg', + 'info_dict': { + 'id': 'Tq92D6wQ1mg', + 'ext': 'weba', + 'title': '[MMD] Adios - EVERGLOW [+Motion DL]', + 'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ', + 'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ', + 'channel_follower_count': int, + 'description': 'md5:17eccca93a786d51bc67646756894066', + 'upload_date': '20191228', + 'uploader_url': 'http://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ', + 'tags': ['mmd', 'dance', 'mikumikudance', 'kpop', 'vtuber'], + 'playable_in_embed': True, + 'like_count': int, + 'categories': ['Entertainment'], + 'thumbnail': 'https://i.ytimg.com/vi/Tq92D6wQ1mg/sddefault.jpg', + 'age_limit': 18, + 'channel': 'Projekt Melody', + 'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ', + 'view_count': int, + 'availability': 'needs_auth', + 'comment_count': int, + 'live_status': 'not_live', + 'uploader': 'Projekt Melody', + 'duration': 106, + }, + 'params': {'extractor_args': {'youtube': {'player_client': ['tv_embedded']}}, 'format': '251-drc'}, + }, + { + 'url': 'https://www.youtube.com/live/qVv6vCqciTM', + 'info_dict': { + 'id': 'qVv6vCqciTM', + 'ext': 'mp4', + 'age_limit': 0, + 'uploader_id': 'UCIdEIHpS0TdkqRkHL5OkLtA', + 'comment_count': int, + 'chapters': 'count:13', + 'upload_date': '20221223', + 'thumbnail': 'https://i.ytimg.com/vi/qVv6vCqciTM/maxresdefault.jpg', + 'channel_url': 'https://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA', + 'uploader_url': 'http://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA', + 'like_count': int, + 'release_date': '20221223', + 'tags': ['Vtuber', '月ノ美兎', '名取さな', 'にじさんじ', 'クリスマス', '3D配信'], + 'title': '【 #インターネット女クリスマス 】3Dで歌ってはしゃぐインターネットの女たち【月ノ美兎/名取さな】', + 'view_count': int, + 'playable_in_embed': True, + 'duration': 4438, + 'availability': 'public', + 'channel_follower_count': int, + 'channel_id': 'UCIdEIHpS0TdkqRkHL5OkLtA', + 'categories': ['Entertainment'], + 'live_status': 'was_live', + 'release_timestamp': 1671793345, + 'channel': 'さなちゃんねる', + 'description': 'md5:6aebf95cc4a1d731aebc01ad6cc9806d', + 'uploader': 'さなちゃんねる', + }, + }, ] _WEBPAGE_TESTS = [ @@ -2513,11 +2713,10 @@ def refetch_manifest(format_id, delay): return _, _, prs, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url) - video_details = traverse_obj( - prs, (..., 'videoDetails'), expected_type=dict, default=[]) + video_details = traverse_obj(prs, (..., 'videoDetails'), expected_type=dict) microformats = traverse_obj( prs, (..., 'microformat', 'playerMicroformatRenderer'), - expected_type=dict, default=[]) + expected_type=dict) _, live_status, _, formats, _ = self._list_formats(video_id, microformats, video_details, prs, player_url) is_live = live_status == 'is_live' start_time = time.time() @@ -2526,18 +2725,19 @@ def mpd_feed(format_id, delay): """ @returns (manifest_url, manifest_stream_number, is_live) or None """ - with lock: - refetch_manifest(format_id, delay) - - f = next((f for f in formats if f['format_id'] == format_id), None) - if not f: - if not is_live: - self.to_screen(f'{video_id}: Video is no longer live') - else: - self.report_warning( - f'Cannot find refreshed manifest for format {format_id}{bug_reports_message()}') - return None - return f['manifest_url'], f['manifest_stream_number'], is_live + for retry in self.RetryManager(fatal=False): + with lock: + refetch_manifest(format_id, delay) + + f = next((f for f in formats if f['format_id'] == format_id), None) + if not f: + if not is_live: + retry.error = f'{video_id}: Video is no longer live' + else: + retry.error = f'Cannot find refreshed manifest for format {format_id}{bug_reports_message()}' + continue + return f['manifest_url'], f['manifest_stream_number'], is_live + return None for f in formats: f['is_live'] = is_live @@ -3005,11 +3205,11 @@ def _extract_chapters_from_json(self, data, duration): 'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters' ), expected_type=list) - return self._extract_chapters( + return self._extract_chapters_helper( chapter_list, - chapter_time=lambda chapter: float_or_none( + start_function=lambda chapter: float_or_none( traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000), - chapter_title=lambda chapter: traverse_obj( + title_function=lambda chapter: traverse_obj( chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str), duration=duration) @@ -3017,47 +3217,15 @@ def _extract_chapters_from_engagement_panel(self, data, duration): content_list = traverse_obj( data, ('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'), - expected_type=list, default=[]) + expected_type=list) chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription')) chapter_title = lambda chapter: self._get_text(chapter, 'title') return next(filter(None, ( - self._extract_chapters(traverse_obj(contents, (..., 'macroMarkersListItemRenderer')), - chapter_time, chapter_title, duration) + self._extract_chapters_helper(traverse_obj(contents, (..., 'macroMarkersListItemRenderer')), + chapter_time, chapter_title, duration) for contents in content_list)), []) - def _extract_chapters_from_description(self, description, duration): - duration_re = r'(?:\d+:)?\d{1,2}:\d{2}' - sep_re = r'(?m)^\s*(%s)\b\W*\s(%s)\s*$' - return self._extract_chapters( - re.findall(sep_re % (duration_re, r'.+?'), description or ''), - chapter_time=lambda x: parse_duration(x[0]), chapter_title=lambda x: x[1], - duration=duration, strict=False) or self._extract_chapters( - re.findall(sep_re % (r'.+?', duration_re), description or ''), - chapter_time=lambda x: parse_duration(x[1]), chapter_title=lambda x: x[0], - duration=duration, strict=False) - - def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration, strict=True): - if not duration: - return - chapter_list = [{ - 'start_time': chapter_time(chapter), - 'title': chapter_title(chapter), - } for chapter in chapter_list or []] - if not strict: - chapter_list.sort(key=lambda c: c['start_time'] or 0) - - chapters = [{'start_time': 0}] - for idx, chapter in enumerate(chapter_list): - if chapter['start_time'] is None: - self.report_warning(f'Incomplete chapter {idx}') - elif chapters[-1]['start_time'] <= chapter['start_time'] <= duration: - chapters.append(chapter) - elif chapter not in chapters: - self.report_warning( - f'Invalid start time ({chapter["start_time"]} < {chapters[-1]["start_time"]}) for chapter "{chapter["title"]}"') - return chapters[1:] - def _extract_comment(self, comment_renderer, parent=None): comment_id = comment_renderer.get('commentId') if not comment_id: @@ -3141,6 +3309,13 @@ def extract_thread(contents): comment = self._extract_comment(comment_renderer, parent) if not comment: continue + # Sometimes YouTube may break and give us infinite looping comments. + # See: https://github.com/yt-dlp/yt-dlp/issues/6290 + if comment['id'] in tracker['seen_comment_ids']: + self.report_warning('Detected YouTube comments looping. Stopping comment extraction as we probably cannot get any more.') + yield + else: + tracker['seen_comment_ids'].add(comment['id']) tracker['running_total'] += 1 tracker['total_reply_comments' if parent else 'total_parent_comments'] += 1 @@ -3165,7 +3340,8 @@ def extract_thread(contents): est_total=0, current_page_thread=0, total_parent_comments=0, - total_reply_comments=0) + total_reply_comments=0, + seen_comment_ids=set()) # TODO: Deprecated # YouTube comments have a max depth of 2 @@ -3207,11 +3383,21 @@ def extract_thread(contents): note_prefix = '%sDownloading comment%s API JSON page %d %s' % ( ' ' if parent else '', ' replies' if parent else '', page_num, comment_prog_str) - - response = self._extract_response( - item_id=None, query=continuation, - ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix, - check_get_keys='onResponseReceivedEndpoints' if not is_forced_continuation else None) + try: + response = self._extract_response( + item_id=None, query=continuation, + ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix, + check_get_keys='onResponseReceivedEndpoints' if not is_forced_continuation else None) + except ExtractorError as e: + # Ignore incomplete data error for replies if retries didn't work. + # This is to allow any other parent comments and comment threads to be downloaded. + # See: https://github.com/yt-dlp/yt-dlp/issues/4669 + if 'incomplete data' in str(e).lower() and parent and self.get_param('ignoreerrors') is True: + self.report_warning( + 'Received incomplete data for a comment reply thread and retrying did not help. ' + 'Ignoring to let other comments be downloaded.') + else: + raise is_forced_continuation = False continuation_contents = traverse_obj( response, 'onResponseReceivedEndpoints', expected_type=list, default=[]) @@ -3240,6 +3426,7 @@ def extract_thread(contents): message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1) if message and not parent and tracker['running_total'] == 0: self.report_warning(f'Youtube said: {message}', video_id=video_id, only_once=True) + raise self.CommentsDisabled @staticmethod def _generate_comment_continuation(video_id): @@ -3283,7 +3470,7 @@ def _is_agegated(player_response): if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')): return True - reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[]) + reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason'))) AGE_GATE_REASONS = ( 'confirm your age', 'age-restricted', 'inappropriate', # reason 'age_verification_required', 'age_check_required', # status @@ -3430,7 +3617,7 @@ def _needs_live_processing(self, live_status, duration): return live_status def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, live_status, duration): - itags, stream_ids = {}, [] + itags, stream_ids = collections.defaultdict(set), [] itag_qualities, res_qualities = {}, {0: None} q = qualities([ # Normally tiny is the smallest video-only formats. But @@ -3439,7 +3626,7 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, l 'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres' ]) - streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[]) + streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...)) for fmt in streaming_formats: if fmt.get('targetDurationSec'): @@ -3447,7 +3634,7 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, l itag = str_or_none(fmt.get('itag')) audio_track = fmt.get('audioTrack') or {} - stream_id = '%s.%s' % (itag or '', audio_track.get('id', '')) + stream_id = (itag, audio_track.get('id'), fmt.get('isDrc')) if stream_id in stream_ids: continue @@ -3512,10 +3699,6 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, l video_id=video_id, only_once=True) throttled = True - if itag: - itags[itag] = 'https' - stream_ids.append(stream_id) - tbr = float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) language_preference = ( 10 if audio_track.get('audioIsDefault') and 10 @@ -3532,11 +3715,12 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, l dct = { 'asr': int_or_none(fmt.get('audioSampleRate')), 'filesize': int_or_none(fmt.get('contentLength')), - 'format_id': itag, + 'format_id': f'{itag}{"-drc" if fmt.get("isDrc") else ""}', 'format_note': join_nonempty( - '%s%s' % (audio_track.get('displayName') or '', - ' (default)' if language_preference > 0 else ''), + join_nonempty(audio_track.get('displayName'), + language_preference > 0 and ' (default)', delim=''), fmt.get('qualityLabel') or quality.replace('audio_quality_', ''), + fmt.get('isDrc') and 'DRC', try_get(fmt, lambda x: x['projectionType'].replace('RECTANGULAR', '').lower()), try_get(fmt, lambda x: x['spatialAudioType'].replace('SPATIAL_AUDIO_TYPE_', '').lower()), throttled and 'THROTTLED', is_damaged and 'DAMAGED', delim=', '), @@ -3545,13 +3729,13 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, l 'fps': int_or_none(fmt.get('fps')) or None, 'audio_channels': fmt.get('audioChannels'), 'height': height, - 'quality': q(quality), + 'quality': q(quality) - bool(fmt.get('isDrc')) / 2, 'has_drm': bool(fmt.get('drmFamilies')), 'tbr': tbr, 'url': fmt_url, 'width': int_or_none(fmt.get('width')), 'language': join_nonempty(audio_track.get('id', '').split('.')[0], - 'desc' if language_preference < -1 else ''), + 'desc' if language_preference < -1 else '') or None, 'language_preference': language_preference, # Strictly de-prioritize damaged and 3gp formats 'preference': -10 if is_damaged else -2 if itag == '17' else None, @@ -3561,19 +3745,26 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, l if mime_mobj: dct['ext'] = mimetype2ext(mime_mobj.group(1)) dct.update(parse_codecs(mime_mobj.group(2))) - no_audio = dct.get('acodec') == 'none' - no_video = dct.get('vcodec') == 'none' - if no_audio: - dct['vbr'] = tbr - if no_video: - dct['abr'] = tbr - if no_audio or no_video: - dct['downloader_options'] = { - # Youtube throttles chunks >~10M - 'http_chunk_size': 10485760, - } - if dct.get('ext'): - dct['container'] = dct['ext'] + '_dash' + + single_stream = 'none' in (dct.get('acodec'), dct.get('vcodec')) + if single_stream and dct.get('ext'): + dct['container'] = dct['ext'] + '_dash' + if single_stream or itag == '17': + CHUNK_SIZE = 10 << 20 + dct.update({ + 'protocol': 'http_dash_segments', + 'fragments': [{ + 'url': update_url_query(dct['url'], { + 'range': f'{range_start}-{min(range_start + CHUNK_SIZE - 1, dct["filesize"])}' + }) + } for range_start in range(0, dct['filesize'], CHUNK_SIZE)] + } if itag != '17' and dct['filesize'] else { + 'downloader_options': {'http_chunk_size': CHUNK_SIZE} + }) + + if itag: + itags[itag].add(('https', dct.get('language'))) + stream_ids.append(stream_id) yield dct needs_live_processing = self._needs_live_processing(live_status, duration) @@ -3594,13 +3785,15 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, l skip_manifests.add('dash') def process_manifest_format(f, proto, itag): - if itag in itags: - if itags[itag] == proto or f'{itag}-{proto}' in itags: - return False - itag = f'{itag}-{proto}' - if itag: + key = (proto, f.get('language')) + if key in itags[itag]: + return False + itags[itag].add(key) + + if any(p != proto for p, _ in itags[itag]): + f['format_id'] = f'{itag}-{proto}' + elif itag: f['format_id'] = itag - itags[itag] = proto f['quality'] = q(itag_qualities.get(try_get(f, lambda f: f['format_id'].split('-')[0]), -1)) if f['quality'] == -1 and f.get('height'): @@ -3702,7 +3895,7 @@ def _list_formats(self, video_id, microformats, video_details, player_responses, else 'was_live' if live_content else 'not_live' if False in (is_live, live_content) else None) - streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[]) + streaming_data = traverse_obj(player_responses, (..., 'streamingData')) *formats, subtitles = self._extract_formats_and_subtitles(streaming_data, video_id, player_url, live_status, duration) return live_broadcast_details, live_status, streaming_data, formats, subtitles @@ -3717,7 +3910,7 @@ def _real_extract(self, url): webpage, master_ytcfg, player_responses, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url) playability_statuses = traverse_obj( - player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[]) + player_responses, (..., 'playabilityStatus'), expected_type=dict) trailer_video_id = get_first( playability_statuses, @@ -3730,11 +3923,10 @@ def _real_extract(self, url): search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None)) if webpage else (lambda x: None)) - video_details = traverse_obj( - player_responses, (..., 'videoDetails'), expected_type=dict, default=[]) + video_details = traverse_obj(player_responses, (..., 'videoDetails'), expected_type=dict) microformats = traverse_obj( player_responses, (..., 'microformat', 'playerMicroformatRenderer'), - expected_type=dict, default=[]) + expected_type=dict) translated_title = self._get_text(microformats, (..., 'title')) video_title = (self._preferred_lang and translated_title @@ -3897,10 +4089,6 @@ def is_bad_format(fmt): formats.extend(self._extract_storyboard(player_responses, duration)) - # source_preference is lower for throttled/potentially damaged formats - self._sort_formats(formats, ( - 'quality', 'res', 'fps', 'hdr:12', 'source', 'vcodec:vp9.2', 'channels', 'acodec', 'lang', 'proto')) - info = { 'id': video_id, 'title': video_title, @@ -3911,7 +4099,7 @@ def is_bad_format(fmt): 'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')), 'description': video_description, 'uploader': get_first(video_details, 'author'), - 'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None, + 'uploader_id': self._search_regex(r'/(?:channel/|user/|(?=@))([^/?&#]+)', owner_profile_url, 'uploader id', default=None), 'uploader_url': owner_profile_url, 'channel_id': channel_id, 'channel_url': format_field(channel_id, None, 'https://www.youtube.com/channel/%s'), @@ -3930,6 +4118,8 @@ def is_bad_format(fmt): 'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'), 'live_status': live_status, 'release_timestamp': live_start_time, + '_format_sort_fields': ( # source_preference is lower for throttled/potentially damaged formats + 'quality', 'res', 'fps', 'hdr:12', 'source', 'vcodec:vp9.2', 'channels', 'acodec', 'lang', 'proto') } subtitles = {} @@ -3942,10 +4132,10 @@ def get_lang_code(track): # Converted into dicts to remove duplicates captions = { get_lang_code(sub): sub - for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])} + for sub in traverse_obj(pctr, (..., 'captionTracks', ...))} translation_languages = { lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1) - for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])} + for lang in traverse_obj(pctr, (..., 'translationLanguages', ...))} def process_language(container, base_url, lang_code, sub_name, query): lang_subs = container.setdefault(lang_code, []) @@ -3979,7 +4169,7 @@ def process_language(container, base_url, lang_code, sub_name, query): if not trans_code: continue orig_trans_code = trans_code - if caption_track.get('kind') != 'asr': + if caption_track.get('kind') != 'asr' and trans_code != 'und': if not get_translated_subs: continue trans_code += f'-{lang_code}' @@ -4099,9 +4289,8 @@ def process_language(container, base_url, lang_code, sub_name, query): list) or []): tbrs = variadic( traverse_obj( - tlb, 'toggleButtonRenderer', - ('segmentedLikeDislikeButtonRenderer', ..., 'toggleButtonRenderer'), - default=[])) + tlb, ('toggleButtonRenderer', ...), + ('segmentedLikeDislikeButtonRenderer', ..., 'toggleButtonRenderer'))) for tbr in tbrs: for getter, regex in [( lambda x: x['defaultText']['accessibility']['accessibilityData'], @@ -4222,15 +4411,19 @@ def process_language(container, base_url, lang_code, sub_name, query): class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor): - @staticmethod def passthrough_smuggled_data(func): - def _smuggle(entries, smuggled_data): - for entry in entries: - # TODO: Convert URL to music.youtube instead. - # Do we need to passthrough any other smuggled_data? - entry['url'] = smuggle_url(entry['url'], smuggled_data) - yield entry + def _smuggle(info, smuggled_data): + if info.get('_type') not in ('url', 'url_transparent'): + return info + if smuggled_data.get('is_music_url'): + parsed_url = urllib.parse.urlparse(info['url']) + if parsed_url.netloc in ('www.youtube.com', 'music.youtube.com'): + smuggled_data.pop('is_music_url') + info['url'] = urllib.parse.urlunparse(parsed_url._replace(netloc='music.youtube.com')) + if smuggled_data: + info['url'] = smuggle_url(info['url'], smuggled_data) + return info @functools.wraps(func) def wrapper(self, url): @@ -4238,24 +4431,13 @@ def wrapper(self, url): if self.is_music_url(url): smuggled_data['is_music_url'] = True info_dict = func(self, url, smuggled_data) - if smuggled_data and info_dict.get('entries'): - info_dict['entries'] = _smuggle(info_dict['entries'], smuggled_data) + if smuggled_data: + _smuggle(info_dict, smuggled_data) + if info_dict.get('entries'): + info_dict['entries'] = (_smuggle(i, smuggled_data.copy()) for i in info_dict['entries']) return info_dict return wrapper - def _extract_channel_id(self, webpage): - channel_id = self._html_search_meta( - 'channelId', webpage, 'channel id', default=None) - if channel_id: - return channel_id - channel_url = self._html_search_meta( - ('og:url', 'al:ios:url', 'al:android:url', 'al:web:url', - 'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad', - 'twitter:app:url:googleplay'), webpage, 'channel url') - return self._search_regex( - r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+', - channel_url, 'channel id') - @staticmethod def _extract_basic_item_renderer(item): # Modified from _extract_grid_item_renderer @@ -4270,6 +4452,25 @@ def _extract_basic_item_renderer(item): elif key.startswith('grid') and key.endswith('Renderer'): return renderer + def _extract_channel_renderer(self, renderer): + channel_id = renderer['channelId'] + title = self._get_text(renderer, 'title') + channel_url = f'https://www.youtube.com/channel/{channel_id}' + return { + '_type': 'url', + 'url': channel_url, + 'id': channel_id, + 'ie_key': YoutubeTabIE.ie_key(), + 'channel': title, + 'channel_id': channel_id, + 'channel_url': channel_url, + 'title': title, + 'channel_follower_count': self._get_count(renderer, 'subscriberCountText'), + 'thumbnails': self._extract_thumbnails(renderer, 'thumbnail'), + 'playlist_count': self._get_count(renderer, 'videoCountText'), + 'description': self._get_text(renderer, 'descriptionSnippet'), + } + def _grid_entries(self, grid_renderer): for item in grid_renderer['items']: if not isinstance(item, dict): @@ -4295,9 +4496,7 @@ def _grid_entries(self, grid_renderer): # channel channel_id = renderer.get('channelId') if channel_id: - yield self.url_result( - 'https://www.youtube.com/channel/%s' % channel_id, - ie=YoutubeTabIE.ie_key(), video_title=title) + yield self._extract_channel_renderer(renderer) continue # generic endpoint URL support ep_url = urljoin('https://www.youtube.com/', try_get( @@ -4575,58 +4774,52 @@ def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data): @staticmethod def _extract_selected_tab(tabs, fatal=True): - for tab in tabs: - renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {} - if renderer.get('selected') is True: - return renderer - else: - if fatal: - raise ExtractorError('Unable to find selected tab') - - def _extract_uploader(self, data): - uploader = {} - renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {} - owner = try_get( - renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict) - if owner: - owner_text = owner.get('text') - uploader['uploader'] = self._search_regex( - r'^by (.+) and \d+ others?$', owner_text, 'uploader', default=owner_text) - uploader['uploader_id'] = try_get( - owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], str) - uploader['uploader_url'] = urljoin( - 'https://www.youtube.com/', - try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], str)) - return filter_dict(uploader) + for tab_renderer in tabs: + if tab_renderer.get('selected'): + return tab_renderer + if fatal: + raise ExtractorError('Unable to find selected tab') + + @staticmethod + def _extract_tab_renderers(response): + return traverse_obj( + response, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs', ..., ('tabRenderer', 'expandableTabRenderer')), expected_type=dict) def _extract_from_tabs(self, item_id, ytcfg, data, tabs): - playlist_id = title = description = channel_url = channel_name = channel_id = None - tags = [] + metadata = self._extract_metadata_from_tabs(item_id, data) selected_tab = self._extract_selected_tab(tabs) - primary_sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') - renderer = try_get( - data, lambda x: x['metadata']['channelMetadataRenderer'], dict) - if renderer: - channel_name = renderer.get('title') - channel_url = renderer.get('channelUrl') - channel_id = renderer.get('externalId') - else: - renderer = try_get( - data, lambda x: x['metadata']['playlistMetadataRenderer'], dict) + metadata['title'] += format_field(selected_tab, 'title', ' - %s') + metadata['title'] += format_field(selected_tab, 'expandedText', ' - %s') - if renderer: - title = renderer.get('title') - description = renderer.get('description', '') - playlist_id = channel_id - tags = renderer.get('keywords', '').split() + return self.playlist_result( + self._entries( + selected_tab, metadata['id'], ytcfg, + self._extract_account_syncid(ytcfg, data), + self._extract_visitor_data(data, ytcfg)), + **metadata) + + def _extract_metadata_from_tabs(self, item_id, data): + info = {'id': item_id} + + metadata_renderer = traverse_obj(data, ('metadata', 'channelMetadataRenderer'), expected_type=dict) + if metadata_renderer: + info.update({ + 'uploader': metadata_renderer.get('title'), + 'uploader_id': metadata_renderer.get('externalId'), + 'uploader_url': metadata_renderer.get('channelUrl'), + }) + if info['uploader_id']: + info['id'] = info['uploader_id'] + else: + metadata_renderer = traverse_obj(data, ('metadata', 'playlistMetadataRenderer'), expected_type=dict) # We can get the uncropped banner/avatar by replacing the crop params with '=s0' # See: https://github.com/yt-dlp/yt-dlp/issues/2237#issuecomment-1013694714 def _get_uncropped(url): return url_or_none((url or '').split('=')[0] + '=s0') - avatar_thumbnails = self._extract_thumbnails(renderer, 'avatar') + avatar_thumbnails = self._extract_thumbnails(metadata_renderer, 'avatar') if avatar_thumbnails: uncropped_avatar = _get_uncropped(avatar_thumbnails[0]['url']) if uncropped_avatar: @@ -4637,7 +4830,7 @@ def _get_uncropped(url): }) channel_banners = self._extract_thumbnails( - data, ('header', ..., ['banner', 'mobileBanner', 'tvBanner'])) + data, ('header', ..., ('banner', 'mobileBanner', 'tvBanner'))) for banner in channel_banners: banner['preference'] = -10 @@ -4650,46 +4843,68 @@ def _get_uncropped(url): 'preference': -5 }) + # Deprecated - remove primary_sidebar_renderer when layout discontinued + primary_sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') + playlist_header_renderer = traverse_obj(data, ('header', 'playlistHeaderRenderer'), expected_type=dict) + primary_thumbnails = self._extract_thumbnails( primary_sidebar_renderer, ('thumbnailRenderer', ('playlistVideoThumbnailRenderer', 'playlistCustomThumbnailRenderer'), 'thumbnail')) + playlist_thumbnails = self._extract_thumbnails( + playlist_header_renderer, ('playlistHeaderBanner', 'heroPlaylistThumbnailRenderer', 'thumbnail')) - if playlist_id is None: - playlist_id = item_id - - playlist_stats = traverse_obj(primary_sidebar_renderer, 'stats') - last_updated_unix = self._parse_time_text(self._get_text(playlist_stats, 2)) - if title is None: - title = self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag')) or playlist_id - title += format_field(selected_tab, 'title', ' - %s') - title += format_field(selected_tab, 'expandedText', ' - %s') - - metadata = { - 'playlist_id': playlist_id, - 'playlist_title': title, - 'playlist_description': description, - 'uploader': channel_name, - 'uploader_id': channel_id, - 'uploader_url': channel_url, - 'thumbnails': primary_thumbnails + avatar_thumbnails + channel_banners, - 'tags': tags, - 'view_count': self._get_count(playlist_stats, 1), + info.update({ + 'title': (traverse_obj(metadata_renderer, 'title') + or self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag')) + or info['id']), 'availability': self._extract_availability(data), - 'modified_date': strftime_or_none(last_updated_unix, '%Y%m%d'), - 'playlist_count': self._get_count(playlist_stats, 0), 'channel_follower_count': self._get_count(data, ('header', ..., 'subscriberCountText')), - } - if not channel_id: - metadata.update(self._extract_uploader(data)) - metadata.update({ - 'channel': metadata['uploader'], - 'channel_id': metadata['uploader_id'], - 'channel_url': metadata['uploader_url']}) - return self.playlist_result( - self._entries( - selected_tab, playlist_id, ytcfg, - self._extract_account_syncid(ytcfg, data), - self._extract_visitor_data(data, ytcfg)), - **metadata) + 'description': try_get(metadata_renderer, lambda x: x.get('description', '')), + 'tags': try_get(metadata_renderer or {}, lambda x: x.get('keywords', '').split()), + 'thumbnails': (primary_thumbnails or playlist_thumbnails) + avatar_thumbnails + channel_banners, + }) + + # Playlist stats is a text runs array containing [video count, view count, last updated]. + # last updated or (view count and last updated) may be missing. + playlist_stats = get_first( + (primary_sidebar_renderer, playlist_header_renderer), (('stats', 'briefStats', 'numVideosText'), )) + + last_updated_unix = self._parse_time_text( + self._get_text(playlist_stats, 2) # deprecated, remove when old layout discontinued + or self._get_text(playlist_header_renderer, ('byline', 1, 'playlistBylineRenderer', 'text'))) + info['modified_date'] = strftime_or_none(last_updated_unix, '%Y%m%d') + + info['view_count'] = self._get_count(playlist_stats, 1) + if info['view_count'] is None: # 0 is allowed + info['view_count'] = self._get_count(playlist_header_renderer, 'viewCountText') + if info['view_count'] is None: + info['view_count'] = self._get_count(data, ( + 'contents', 'twoColumnBrowseResultsRenderer', 'tabs', ..., 'tabRenderer', 'content', 'sectionListRenderer', + 'contents', ..., 'itemSectionRenderer', 'contents', ..., 'channelAboutFullMetadataRenderer', 'viewCountText')) + + info['playlist_count'] = self._get_count(playlist_stats, 0) + if info['playlist_count'] is None: # 0 is allowed + info['playlist_count'] = self._get_count(playlist_header_renderer, ('byline', 0, 'playlistBylineRenderer', 'text')) + + if not info.get('uploader_id'): + owner = traverse_obj(playlist_header_renderer, 'ownerText') + if not owner: # Deprecated + owner = traverse_obj( + self._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer'), + ('videoOwner', 'videoOwnerRenderer', 'title')) + owner_text = self._get_text(owner) + browse_ep = traverse_obj(owner, ('runs', 0, 'navigationEndpoint', 'browseEndpoint')) or {} + info.update({ + 'uploader': self._search_regex(r'^by (.+) and \d+ others?$', owner_text, 'uploader', default=owner_text), + 'uploader_id': browse_ep.get('browseId'), + 'uploader_url': urljoin('https://www.youtube.com', browse_ep.get('canonicalBaseUrl')) + }) + + info.update({ + 'channel': info['uploader'], + 'channel_id': info['uploader_id'], + 'channel_url': info['uploader_url'] + }) + return info def _extract_inline_playlist(self, playlist, playlist_id, data, ytcfg): first_id = last_id = response = None @@ -4751,19 +4966,21 @@ def _extract_availability(self, data): Note: Unless YouTube tells us explicitly, we do not assume it is public @param data: response """ - renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {} + sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {} + playlist_header_renderer = traverse_obj(data, ('header', 'playlistHeaderRenderer')) or {} + player_header_privacy = playlist_header_renderer.get('privacy') - player_header_privacy = traverse_obj( - data, ('header', 'playlistHeaderRenderer', 'privacy'), expected_type=str) - - badges = self._extract_badges(renderer) + badges = self._extract_badges(sidebar_renderer) # Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge - privacy_setting_icon = traverse_obj( - renderer, ( - 'privacyForm', 'dropdownFormFieldRenderer', 'dropdown', 'dropdownRenderer', 'entries', - lambda _, v: v['privacyDropdownItemRenderer']['isSelected'], 'privacyDropdownItemRenderer', 'icon', 'iconType'), - get_all=False, expected_type=str) + privacy_setting_icon = get_first( + (playlist_header_renderer, sidebar_renderer), + ('privacyForm', 'dropdownFormFieldRenderer', 'dropdown', 'dropdownRenderer', 'entries', + lambda _, v: v['privacyDropdownItemRenderer']['isSelected'], 'privacyDropdownItemRenderer', 'icon', 'iconType'), + expected_type=str) + + microformats_is_unlisted = traverse_obj( + data, ('microformat', 'microformatDataRenderer', 'unlisted'), expected_type=bool) return ( 'public' if ( @@ -4778,7 +4995,8 @@ def _extract_availability(self, data): is_unlisted=( self._has_badge(badges, BadgeType.AVAILABILITY_UNLISTED) or player_header_privacy == 'UNLISTED' if player_header_privacy is not None - else privacy_setting_icon == 'PRIVACY_UNLISTED' if privacy_setting_icon is not None else None), + else privacy_setting_icon == 'PRIVACY_UNLISTED' if privacy_setting_icon is not None + else microformats_is_unlisted if microformats_is_unlisted is not None else None), needs_subscription=self._has_badge(badges, BadgeType.AVAILABILITY_SUBSCRIPTION) or None, needs_premium=self._has_badge(badges, BadgeType.AVAILABILITY_PREMIUM) or None, needs_auth=False)) @@ -4794,39 +5012,23 @@ def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict): def _reload_with_unavailable_videos(self, item_id, data, ytcfg): """ - Get playlist with unavailable videos if the 'show unavailable videos' button exists. + Reload playlists with unavailable videos (e.g. private videos, region blocked, etc.) """ - browse_id = params = None - renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') - if not renderer: + is_playlist = bool(traverse_obj( + data, ('metadata', 'playlistMetadataRenderer'), ('header', 'playlistHeaderRenderer'))) + if not is_playlist: return - menu_renderer = try_get( - renderer, lambda x: x['menu']['menuRenderer']['items'], list) or [] - for menu_item in menu_renderer: - if not isinstance(menu_item, dict): - continue - nav_item_renderer = menu_item.get('menuNavigationItemRenderer') - text = try_get( - nav_item_renderer, lambda x: x['text']['simpleText'], str) - if not text or text.lower() != 'show unavailable videos': - continue - browse_endpoint = try_get( - nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {} - browse_id = browse_endpoint.get('browseId') - params = browse_endpoint.get('params') - break - headers = self.generate_api_headers( ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data), visitor_data=self._extract_visitor_data(data, ytcfg)) query = { - 'params': params or 'wgYCCAA=', - 'browseId': browse_id or 'VL%s' % item_id + 'params': 'wgYCCAA=', + 'browseId': f'VL{item_id}' } return self._extract_response( item_id=item_id, headers=headers, query=query, check_get_keys='contents', fatal=False, ytcfg=ytcfg, - note='Downloading API JSON with unavailable videos') + note='Redownloading playlist API JSON with unavailable videos') @functools.cached_property def skip_webpage(self): @@ -4878,8 +5080,7 @@ def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=Fals webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal) ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage) # Reject webpage data if redirected to home page without explicitly requesting - selected_tab = self._extract_selected_tab(traverse_obj( - data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list, default=[]), fatal=False) or {} + selected_tab = self._extract_selected_tab(self._extract_tab_renderers(data), fatal=False) or {} if (url != 'https://www.youtube.com/feed/recommended' and selected_tab.get('tabIdentifier') == 'FEwhat_to_watch' # Home page and 'no-youtube-channel-redirect' not in self.get_param('compat_opts', [])): @@ -4950,7 +5151,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): IE_DESC = 'YouTube Tabs' _VALID_URL = r'''(?x: https?:// - (?:\w+\.)? + (?!consent\.)(?:\w+\.)? (?: youtube(?:kids)?\.com| %(invidious)s @@ -5324,6 +5525,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'channel_url': 'https://www.youtube.com/user/Computerphile', 'channel': 'Computerphile', 'availability': 'public', + 'modified_date': '20190712', }, 'playlist_mincount': 11, }, { @@ -5372,18 +5574,19 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'categories': ['News & Politics'], 'tags': list, 'like_count': int, - 'release_timestamp': 1642502819, + 'release_timestamp': int, 'channel': 'Sky News', 'channel_id': 'UCoMdktPbSTixAyNGwb-UYkQ', 'age_limit': 0, 'view_count': int, - 'thumbnail': 'https://i.ytimg.com/vi/GgL890LIznQ/maxresdefault_live.jpg', + 'thumbnail': r're:https?://i\.ytimg\.com/vi/[^/]+/maxresdefault(?:_live)?\.jpg', 'playable_in_embed': True, - 'release_date': '20220118', + 'release_date': r're:\d+', 'availability': 'public', 'live_status': 'is_live', 'channel_url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ', - 'channel_follower_count': int + 'channel_follower_count': int, + 'concurrent_view_count': int, }, 'params': { 'skip_download': True, @@ -5461,7 +5664,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'title': '#cctv9', 'tags': [], }, - 'playlist_mincount': 350, + 'playlist_mincount': 300, # not consistent but should be over 300 }, { 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU', 'only_matching': True, @@ -5512,22 +5715,16 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'description': '', 'availability': 'public', }, - 'expected_warnings': [ - 'The URL does not have a videos tab', - r'[Uu]navailable videos (are|will be) hidden', - ], 'playlist_mincount': 101, }, { - 'note': 'Topic without a UU playlist', + # Destination channel with only a hidden self tab (tab id is UCtFRv9O2AHqOZjjynzrv-xg) + # Treat as a general feed 'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg', 'info_dict': { 'id': 'UCtFRv9O2AHqOZjjynzrv-xg', 'title': 'UCtFRv9O2AHqOZjjynzrv-xg', 'tags': [], }, - 'expected_warnings': [ - 'the playlist redirect gave error', - ], 'playlist_mincount': 9, }, { 'note': 'Youtube music Album', @@ -5595,6 +5792,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'skip_download': True, 'extractor_args': {'youtubetab': {'skip': ['webpage']}} }, + 'skip': 'Query for sorting no longer works', }, { 'note': 'API Fallback: Topic, should redirect to playlist?list=UU...', 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw', @@ -5613,10 +5811,6 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw', 'availability': 'public', }, - 'expected_warnings': [ - 'does not have a videos tab', - r'[Uu]navailable videos (are|will be) hidden', - ], 'playlist_mincount': 101, 'params': { 'skip_download': True, @@ -5653,7 +5847,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'tags': [], 'uploader_id': 'UCiu-3thuViMebBjw_5nWYrA', 'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', - 'description': '', + 'description': 'test description', 'title': 'cole-dlp-test-acc - 再生リスト', 'uploader_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', 'uploader': 'cole-dlp-test-acc', @@ -5694,13 +5888,271 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): }, 'playlist_mincount': 50, + }, { + # Channel with a real live tab (not to be mistaken with streams tab) + # Do not treat like it should redirect to live stream + 'url': 'https://www.youtube.com/channel/UCEH7P7kyJIkS_gJf93VYbmg/live', + 'info_dict': { + 'id': 'UCEH7P7kyJIkS_gJf93VYbmg', + 'title': 'UCEH7P7kyJIkS_gJf93VYbmg - Live', + 'tags': [], + }, + 'playlist_mincount': 20, + }, { + # Tab name is not the same as tab id + 'url': 'https://www.youtube.com/channel/UCQvWX73GQygcwXOTSf_VDVg/letsplay', + 'info_dict': { + 'id': 'UCQvWX73GQygcwXOTSf_VDVg', + 'title': 'UCQvWX73GQygcwXOTSf_VDVg - Let\'s play', + 'tags': [], + }, + 'playlist_mincount': 8, + }, { + # Home tab id is literally home. Not to get mistaken with featured + 'url': 'https://www.youtube.com/channel/UCQvWX73GQygcwXOTSf_VDVg/home', + 'info_dict': { + 'id': 'UCQvWX73GQygcwXOTSf_VDVg', + 'title': 'UCQvWX73GQygcwXOTSf_VDVg - Home', + 'tags': [], + }, + 'playlist_mincount': 8, + }, { + # Should get three playlists for videos, shorts and streams tabs + 'url': 'https://www.youtube.com/channel/UCK9V2B22uJYu3N7eR_BT9QA', + 'info_dict': { + 'id': 'UCK9V2B22uJYu3N7eR_BT9QA', + 'title': 'Polka Ch. 尾丸ポルカ', + 'channel_follower_count': int, + 'channel_id': 'UCK9V2B22uJYu3N7eR_BT9QA', + 'channel_url': 'https://www.youtube.com/channel/UCK9V2B22uJYu3N7eR_BT9QA', + 'uploader': 'Polka Ch. 尾丸ポルカ', + 'description': 'md5:3b8df1ac5af337aa206e37ee3d181ec9', + 'channel': 'Polka Ch. 尾丸ポルカ', + 'tags': 'count:35', + 'uploader_url': 'https://www.youtube.com/channel/UCK9V2B22uJYu3N7eR_BT9QA', + 'uploader_id': 'UCK9V2B22uJYu3N7eR_BT9QA', + }, + 'playlist_count': 3, + }, { + # Shorts tab with channel with handle + 'url': 'https://www.youtube.com/@NotJustBikes/shorts', + 'info_dict': { + 'id': 'UC0intLFzLaudFG-xAvUEO-A', + 'title': 'Not Just Bikes - Shorts', + 'tags': 'count:12', + 'uploader': 'Not Just Bikes', + 'channel_url': 'https://www.youtube.com/channel/UC0intLFzLaudFG-xAvUEO-A', + 'description': 'md5:7513148b1f02b924783157d84c4ea555', + 'channel_follower_count': int, + 'uploader_id': 'UC0intLFzLaudFG-xAvUEO-A', + 'channel_id': 'UC0intLFzLaudFG-xAvUEO-A', + 'uploader_url': 'https://www.youtube.com/channel/UC0intLFzLaudFG-xAvUEO-A', + 'channel': 'Not Just Bikes', + }, + 'playlist_mincount': 10, + }, { + # Streams tab + 'url': 'https://www.youtube.com/channel/UC3eYAvjCVwNHgkaGbXX3sig/streams', + 'info_dict': { + 'id': 'UC3eYAvjCVwNHgkaGbXX3sig', + 'title': '中村悠一 - Live', + 'tags': 'count:7', + 'channel_id': 'UC3eYAvjCVwNHgkaGbXX3sig', + 'channel_url': 'https://www.youtube.com/channel/UC3eYAvjCVwNHgkaGbXX3sig', + 'uploader_id': 'UC3eYAvjCVwNHgkaGbXX3sig', + 'channel': '中村悠一', + 'uploader_url': 'https://www.youtube.com/channel/UC3eYAvjCVwNHgkaGbXX3sig', + 'channel_follower_count': int, + 'uploader': '中村悠一', + 'description': 'md5:e744f6c93dafa7a03c0c6deecb157300', + }, + 'playlist_mincount': 60, + }, { + # Channel with no uploads and hence no videos, streams, shorts tabs or uploads playlist. This should fail. + # See test_youtube_lists + 'url': 'https://www.youtube.com/channel/UC2yXPzFejc422buOIzn_0CA', + 'only_matching': True, + }, { + # No uploads and no UCID given. Should fail with no uploads error + # See test_youtube_lists + 'url': 'https://www.youtube.com/news', + 'only_matching': True + }, { + # No videos tab but has a shorts tab + 'url': 'https://www.youtube.com/c/TKFShorts', + 'info_dict': { + 'id': 'UCgJ5_1F6yJhYLnyMszUdmUg', + 'title': 'Shorts Break - Shorts', + 'tags': 'count:32', + 'channel_id': 'UCgJ5_1F6yJhYLnyMszUdmUg', + 'channel': 'Shorts Break', + 'description': 'md5:a6c234cf3d50d878ef8721e34457cd11', + 'uploader': 'Shorts Break', + 'channel_follower_count': int, + 'uploader_id': 'UCgJ5_1F6yJhYLnyMszUdmUg', + 'uploader_url': 'https://www.youtube.com/channel/UCgJ5_1F6yJhYLnyMszUdmUg', + 'channel_url': 'https://www.youtube.com/channel/UCgJ5_1F6yJhYLnyMszUdmUg', + }, + 'playlist_mincount': 30, + }, { + # Trending Now Tab. tab id is empty + 'url': 'https://www.youtube.com/feed/trending', + 'info_dict': { + 'id': 'trending', + 'title': 'trending - Now', + 'tags': [], + }, + 'playlist_mincount': 30, + }, { + # Trending Gaming Tab. tab id is empty + 'url': 'https://www.youtube.com/feed/trending?bp=4gIcGhpnYW1pbmdfY29ycHVzX21vc3RfcG9wdWxhcg%3D%3D', + 'info_dict': { + 'id': 'trending', + 'title': 'trending - Gaming', + 'tags': [], + }, + 'playlist_mincount': 30, + }, { + # Shorts url result in shorts tab + 'url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA/shorts', + 'info_dict': { + 'id': 'UCiu-3thuViMebBjw_5nWYrA', + 'title': 'cole-dlp-test-acc - Shorts', + 'uploader_id': 'UCiu-3thuViMebBjw_5nWYrA', + 'channel': 'cole-dlp-test-acc', + 'description': 'test description', + 'channel_id': 'UCiu-3thuViMebBjw_5nWYrA', + 'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', + 'tags': [], + 'uploader': 'cole-dlp-test-acc', + 'uploader_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', + + }, + 'playlist': [{ + 'info_dict': { + '_type': 'url', + 'ie_key': 'Youtube', + 'url': 'https://www.youtube.com/shorts/sSM9J5YH_60', + 'id': 'sSM9J5YH_60', + 'channel_id': 'UCiu-3thuViMebBjw_5nWYrA', + 'title': 'SHORT short', + 'channel': 'cole-dlp-test-acc', + 'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', + 'view_count': int, + 'thumbnails': list, + } + }], + 'params': {'extract_flat': True}, + }, { + # Live video status should be extracted + 'url': 'https://www.youtube.com/channel/UCQvWX73GQygcwXOTSf_VDVg/live', + 'info_dict': { + 'id': 'UCQvWX73GQygcwXOTSf_VDVg', + 'title': 'UCQvWX73GQygcwXOTSf_VDVg - Live', # TODO, should be Minecraft - Live or Minecraft - Topic - Live + 'tags': [] + }, + 'playlist': [{ + 'info_dict': { + '_type': 'url', + 'ie_key': 'Youtube', + 'url': 'startswith:https://www.youtube.com/watch?v=', + 'id': str, + 'title': str, + 'live_status': 'is_live', + 'channel_id': str, + 'channel_url': str, + 'concurrent_view_count': int, + 'channel': str, + } + }], + 'params': {'extract_flat': True, 'playlist_items': '1'}, + 'playlist_mincount': 1 + }, { + # Channel renderer metadata. Contains number of videos on the channel + 'url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA/channels', + 'info_dict': { + 'id': 'UCiu-3thuViMebBjw_5nWYrA', + 'title': 'cole-dlp-test-acc - Channels', + 'uploader_id': 'UCiu-3thuViMebBjw_5nWYrA', + 'channel': 'cole-dlp-test-acc', + 'description': 'test description', + 'channel_id': 'UCiu-3thuViMebBjw_5nWYrA', + 'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', + 'tags': [], + 'uploader': 'cole-dlp-test-acc', + 'uploader_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', + + }, + 'playlist': [{ + 'info_dict': { + '_type': 'url', + 'ie_key': 'YoutubeTab', + 'url': 'https://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw', + 'id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw', + 'channel_id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw', + 'title': 'PewDiePie', + 'channel': 'PewDiePie', + 'channel_url': 'https://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw', + 'thumbnails': list, + 'channel_follower_count': int, + 'playlist_count': int + } + }], + 'params': {'extract_flat': True}, + }, { + 'url': 'https://www.youtube.com/@3blue1brown/about', + 'info_dict': { + 'id': 'UCYO_jab_esuFRV4b17AJtAw', + 'tags': ['Mathematics'], + 'title': '3Blue1Brown - About', + 'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw', + 'channel_follower_count': int, + 'channel_id': 'UCYO_jab_esuFRV4b17AJtAw', + 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw', + 'channel': '3Blue1Brown', + 'uploader': '3Blue1Brown', + 'view_count': int, + 'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw', + 'description': 'md5:e1384e8a133307dd10edee76e875d62f', + }, + 'playlist_count': 0, }] @classmethod def suitable(cls, url): return False if YoutubeIE.suitable(url) else super().suitable(url) - _URL_RE = re.compile(rf'(?P
{_VALID_URL})(?(not_channel)|(?P/\w+))?(?P.*)$')
+    _URL_RE = re.compile(rf'(?P
{_VALID_URL})(?(not_channel)|(?P/[^?#/]+))?(?P.*)$')
+
+    def _get_url_mobj(self, url):
+        mobj = self._URL_RE.match(url).groupdict()
+        mobj.update((k, '') for k, v in mobj.items() if v is None)
+        return mobj
+
+    def _extract_tab_id_and_name(self, tab, base_url='https://www.youtube.com'):
+        tab_name = (tab.get('title') or '').lower()
+        tab_url = urljoin(base_url, traverse_obj(
+            tab, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url')))
+
+        tab_id = (tab_url and self._get_url_mobj(tab_url)['tab'][1:]
+                  or traverse_obj(tab, 'tabIdentifier', expected_type=str))
+        if tab_id:
+            return {
+                'TAB_ID_SPONSORSHIPS': 'membership',
+            }.get(tab_id, tab_id), tab_name
+
+        # Fallback to tab name if we cannot get the tab id.
+        # XXX: should we strip non-ascii letters? e.g. in case of 'let's play' tab example on special gaming channel
+        # Note that in the case of translated tab name this may result in an empty string, which we don't want.
+        if tab_name:
+            self.write_debug(f'Falling back to selected tab name: {tab_name}')
+        return {
+            'home': 'featured',
+            'live': 'streams',
+        }.get(tab_name, tab_name), tab_name
+
+    def _has_tab(self, tabs, tab_id):
+        return any(self._extract_tab_id_and_name(tab)[0] == tab_id for tab in tabs)
 
     @YoutubeTabBaseInfoExtractor.passthrough_smuggled_data
     def _real_extract(self, url, smuggled_data):
@@ -5709,123 +6161,137 @@ def _real_extract(self, url, smuggled_data):
             urllib.parse.urlparse(url)._replace(netloc='www.youtube.com'))
         compat_opts = self.get_param('compat_opts', [])
 
-        def get_mobj(url):
-            mobj = self._URL_RE.match(url).groupdict()
-            mobj.update((k, '') for k, v in mobj.items() if v is None)
-            return mobj
-
-        mobj, redirect_warning = get_mobj(url), None
-        # Youtube returns incomplete data if tabname is not lower case
-        pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
-        if is_channel:
-            if smuggled_data.get('is_music_url'):
-                if item_id[:2] == 'VL':  # Youtube music VL channels have an equivalent playlist
-                    item_id = item_id[2:]
-                    pre, tab, post, is_channel = f'https://www.youtube.com/playlist?list={item_id}', '', '', False
-                elif item_id[:2] == 'MP':  # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
-                    mdata = self._extract_tab_endpoint(
-                        f'https://music.youtube.com/channel/{item_id}', item_id, default_client='web_music')
-                    murl = traverse_obj(mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'),
-                                        get_all=False, expected_type=str)
-                    if not murl:
-                        raise ExtractorError('Failed to resolve album to playlist')
-                    return self.url_result(murl, ie=YoutubeTabIE.ie_key())
-                elif mobj['channel_type'] == 'browse':  # Youtube music /browse/ should be changed to /channel/
-                    pre = f'https://www.youtube.com/channel/{item_id}'
-
-        original_tab_name = tab
+        mobj = self._get_url_mobj(url)
+        pre, tab, post, is_channel = mobj['pre'], mobj['tab'], mobj['post'], not mobj['not_channel']
+        if is_channel and smuggled_data.get('is_music_url'):
+            if item_id[:2] == 'VL':  # Youtube music VL channels have an equivalent playlist
+                return self.url_result(
+                    f'https://music.youtube.com/playlist?list={item_id[2:]}', YoutubeTabIE, item_id[2:])
+            elif item_id[:2] == 'MP':  # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
+                mdata = self._extract_tab_endpoint(
+                    f'https://music.youtube.com/channel/{item_id}', item_id, default_client='web_music')
+                murl = traverse_obj(mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'),
+                                    get_all=False, expected_type=str)
+                if not murl:
+                    raise ExtractorError('Failed to resolve album to playlist')
+                return self.url_result(murl, YoutubeTabIE)
+            elif mobj['channel_type'] == 'browse':  # Youtube music /browse/ should be changed to /channel/
+                return self.url_result(
+                    f'https://music.youtube.com/channel/{item_id}{tab}{post}', YoutubeTabIE, item_id)
+
+        original_tab_id, display_id = tab[1:], f'{item_id}{tab}'
         if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
-            # Home URLs should redirect to /videos/
-            redirect_warning = ('A channel/user page was given. All the channel\'s videos will be downloaded. '
-                                'To download only the videos in the home page, add a "/featured" to the URL')
-            tab = '/videos'
-
-        url = ''.join((pre, tab, post))
-        mobj = get_mobj(url)
+            url = f'{pre}/videos{post}'
+        if smuggled_data.get('is_music_url'):
+            self.report_warning(f'YouTube Music is not directly supported. Redirecting to {url}')
 
         # Handle both video/playlist URLs
         qs = parse_qs(url)
-        video_id, playlist_id = (qs.get(key, [None])[0] for key in ('v', 'list'))
-
+        video_id, playlist_id = [traverse_obj(qs, (key, 0)) for key in ('v', 'list')]
         if not video_id and mobj['not_channel'].startswith('watch'):
             if not playlist_id:
                 # If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
-                raise ExtractorError('Unable to recognize tab page')
+                raise ExtractorError('A video URL was given without video ID', expected=True)
             # Common mistake: https://www.youtube.com/watch?list=playlist_id
             self.report_warning(f'A video URL was given without video ID. Trying to download playlist {playlist_id}')
-            url = f'https://www.youtube.com/playlist?list={playlist_id}'
-            mobj = get_mobj(url)
+            return self.url_result(
+                f'https://www.youtube.com/playlist?list={playlist_id}', YoutubeTabIE, playlist_id)
 
-        if video_id and playlist_id:
-            if self.get_param('noplaylist'):
-                self.to_screen(f'Downloading just video {video_id} because of --no-playlist')
-                return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
-                                       ie=YoutubeIE.ie_key(), video_id=video_id)
-            self.to_screen(f'Downloading playlist {playlist_id}; add --no-playlist to just download video {video_id}')
+        if not self._yes_playlist(playlist_id, video_id):
+            return self.url_result(
+                f'https://www.youtube.com/watch?v={video_id}', YoutubeIE, video_id)
 
-        data, ytcfg = self._extract_data(url, item_id)
+        data, ytcfg = self._extract_data(url, display_id)
 
         # YouTube may provide a non-standard redirect to the regional channel
         # See: https://github.com/yt-dlp/yt-dlp/issues/2694
+        # https://support.google.com/youtube/answer/2976814#zippy=,conditional-redirects
         redirect_url = traverse_obj(
             data, ('onResponseReceivedActions', ..., 'navigateAction', 'endpoint', 'commandMetadata', 'webCommandMetadata', 'url'), get_all=False)
         if redirect_url and 'no-youtube-channel-redirect' not in compat_opts:
-            redirect_url = ''.join((
-                urljoin('https://www.youtube.com', redirect_url), mobj['tab'], mobj['post']))
-            self.to_screen(f'This playlist is likely not available in your region. Following redirect to regional playlist {redirect_url}')
-            return self.url_result(redirect_url, ie=YoutubeTabIE.ie_key())
+            redirect_url = ''.join((urljoin('https://www.youtube.com', redirect_url), tab, post))
+            self.to_screen(f'This playlist is likely not available in your region. Following conditional redirect to {redirect_url}')
+            return self.url_result(redirect_url, YoutubeTabIE)
 
-        tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
-        if tabs:
+        tabs, extra_tabs = self._extract_tab_renderers(data), []
+        if is_channel and tabs and 'no-youtube-channel-redirect' not in compat_opts:
             selected_tab = self._extract_selected_tab(tabs)
-            selected_tab_url = urljoin(
-                url, traverse_obj(selected_tab, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url')))
-            translated_tab_name = selected_tab.get('title', '').lower()
-
-            # Prefer tab name from tab url as it is always in en,
-            # but only when preferred lang is set as it may not extract reliably in all cases.
-            selected_tab_name = (self._preferred_lang in (None, 'en') and translated_tab_name
-                                 or selected_tab_url and get_mobj(selected_tab_url)['tab'][1:]  # primary
-                                 or translated_tab_name)
-
-            if selected_tab_name == 'home':
-                selected_tab_name = 'featured'
-            requested_tab_name = mobj['tab'][1:]
-
-            if 'no-youtube-channel-redirect' not in compat_opts:
-                if requested_tab_name == 'live':  # Live tab should have redirected to the video
-                    raise UserNotLive(video_id=mobj['id'])
-                if requested_tab_name not in ('', selected_tab_name):
-                    redirect_warning = f'The channel does not have a {requested_tab_name} tab'
-                    if not original_tab_name:
-                        if item_id[:2] == 'UC':
-                            # Topic channels don't have /videos. Use the equivalent playlist instead
-                            pl_id = f'UU{item_id[2:]}'
-                            pl_url = f'https://www.youtube.com/playlist?list={pl_id}'
-                            try:
-                                data, ytcfg = self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True, webpage_fatal=True)
-                            except ExtractorError:
-                                redirect_warning += ' and the playlist redirect gave error'
-                            else:
-                                item_id, url, selected_tab_name = pl_id, pl_url, requested_tab_name
-                                redirect_warning += f'. Redirecting to playlist {pl_id} instead'
-                        if selected_tab_name and selected_tab_name != requested_tab_name:
-                            redirect_warning += f'. {selected_tab_name} tab is being downloaded instead'
+            selected_tab_id, selected_tab_name = self._extract_tab_id_and_name(selected_tab, url)  # NB: Name may be translated
+            self.write_debug(f'Selected tab: {selected_tab_id!r} ({selected_tab_name}), Requested tab: {original_tab_id!r}')
+
+            if not original_tab_id and selected_tab_name:
+                self.to_screen('Downloading all uploads of the channel. '
+                               'To download only the videos in a specific tab, pass the tab\'s URL')
+                if self._has_tab(tabs, 'streams'):
+                    extra_tabs.append(''.join((pre, '/streams', post)))
+                if self._has_tab(tabs, 'shorts'):
+                    extra_tabs.append(''.join((pre, '/shorts', post)))
+                # XXX: Members-only tab should also be extracted
+
+                if not extra_tabs and selected_tab_id != 'videos':
+                    # Channel does not have streams, shorts or videos tabs
+                    if item_id[:2] != 'UC':
+                        raise ExtractorError('This channel has no uploads', expected=True)
+
+                    # Topic channels don't have /videos. Use the equivalent playlist instead
+                    pl_id = f'UU{item_id[2:]}'
+                    pl_url = f'https://www.youtube.com/playlist?list={pl_id}'
+                    try:
+                        data, ytcfg = self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True, webpage_fatal=True)
+                    except ExtractorError:
+                        raise ExtractorError('This channel has no uploads', expected=True)
                     else:
-                        raise ExtractorError(redirect_warning, expected=True)
-
-        if redirect_warning:
-            self.to_screen(redirect_warning)
-        self.write_debug(f'Final URL: {url}')
+                        item_id, url = pl_id, pl_url
+                        self.to_screen(
+                            f'The channel does not have a videos, shorts, or live tab. Redirecting to playlist {pl_id} instead')
+
+                elif extra_tabs and selected_tab_id != 'videos':
+                    # When there are shorts/live tabs but not videos tab
+                    url, data = f'{pre}{post}', None
+
+            elif (original_tab_id or 'videos') != selected_tab_id:
+                if original_tab_id == 'live':
+                    # Live tab should have redirected to the video
+                    # Except in the case the channel has an actual live tab
+                    # Example: https://www.youtube.com/channel/UCEH7P7kyJIkS_gJf93VYbmg/live
+                    raise UserNotLive(video_id=item_id)
+                elif selected_tab_name:
+                    raise ExtractorError(f'This channel does not have a {original_tab_id} tab', expected=True)
+
+                # For channels such as https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg
+                url = f'{pre}{post}'
 
         # YouTube sometimes provides a button to reload playlist with unavailable videos.
         if 'no-youtube-unavailable-videos' not in compat_opts:
-            data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
+            data = self._reload_with_unavailable_videos(display_id, data, ytcfg) or data
         self._extract_and_report_alerts(data, only_once=True)
-        tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
-        if tabs:
-            return self._extract_from_tabs(item_id, ytcfg, data, tabs)
 
+        tabs, entries = self._extract_tab_renderers(data), []
+        if tabs:
+            entries = [self._extract_from_tabs(item_id, ytcfg, data, tabs)]
+            entries[0].update({
+                'extractor_key': YoutubeTabIE.ie_key(),
+                'extractor': YoutubeTabIE.IE_NAME,
+                'webpage_url': url,
+            })
+        if self.get_param('playlist_items') == '0':
+            entries.extend(self.url_result(u, YoutubeTabIE) for u in extra_tabs)
+        else:  # Users expect to get all `video_id`s even with `--flat-playlist`. So don't return `url_result`
+            entries.extend(map(self._real_extract, extra_tabs))
+
+        if len(entries) == 1:
+            return entries[0]
+        elif entries:
+            metadata = self._extract_metadata_from_tabs(item_id, data)
+            uploads_url = 'the Uploads (UU) playlist URL'
+            if try_get(metadata, lambda x: x['channel_id'].startswith('UC')):
+                uploads_url = f'https://www.youtube.com/playlist?list=UU{metadata["channel_id"][2:]}'
+            self.to_screen(
+                'Downloading as multiple playlists, separated by tabs. '
+                f'To download as a single playlist instead, pass {uploads_url}')
+            return self.playlist_result(entries, item_id, **metadata)
+
+        # Inline playlist
         playlist = traverse_obj(
             data, ('contents', 'twoColumnWatchNextResults', 'playlist', 'playlist'), expected_type=dict)
         if playlist:
@@ -5834,10 +6300,9 @@ def get_mobj(url):
         video_id = traverse_obj(
             data, ('currentVideoEndpoint', 'watchEndpoint', 'videoId'), expected_type=str) or video_id
         if video_id:
-            if mobj['tab'] != '/live':  # live tab is expected to redirect to video
+            if tab != '/live':  # live tab is expected to redirect to video
                 self.report_warning(f'Unable to recognize playlist. Downloading just video {video_id}')
-            return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
-                                   ie=YoutubeIE.ie_key(), video_id=video_id)
+            return self.url_result(f'https://www.youtube.com/watch?v={video_id}', YoutubeIE, video_id)
 
         raise ExtractorError('Unable to recognize tab page')
 
@@ -5870,12 +6335,13 @@ class YoutubePlaylistIE(InfoExtractor):
             'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
             'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2',
             'view_count': int,
-            'uploader_url': 'https://www.youtube.com/user/Wickydoo',
+            'uploader_url': 'https://www.youtube.com/c/WickmanVT',
             'modified_date': r're:\d{8}',
             'channel_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
             'channel': 'Wickman',
             'tags': [],
-            'channel_url': 'https://www.youtube.com/user/Wickydoo',
+            'channel_url': 'https://www.youtube.com/c/WickmanVT',
+            'availability': 'public',
         },
         'playlist_mincount': 29,
     }, {
@@ -5905,7 +6371,7 @@ class YoutubePlaylistIE(InfoExtractor):
             'uploader_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
             'availability': 'public',
         },
-        'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
+        'expected_warnings': [r'[Uu]navailable videos? (is|are|will be) hidden'],
     }, {
         'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
         'playlist_mincount': 455,
@@ -6032,9 +6498,7 @@ class YoutubeYtUserIE(InfoExtractor):
 
     def _real_extract(self, url):
         user_id = self._match_id(url)
-        return self.url_result(
-            'https://www.youtube.com/user/%s/videos' % user_id,
-            ie=YoutubeTabIE.ie_key(), video_id=user_id)
+        return self.url_result(f'https://www.youtube.com/user/{user_id}', YoutubeTabIE, user_id)
 
 
 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
@@ -6207,6 +6671,30 @@ class YoutubeSearchURLIE(YoutubeTabBaseInfoExtractor):
             #     'title': '#cats',
             # }],
         },
+    }, {
+        # Channel results
+        'url': 'https://www.youtube.com/results?search_query=kurzgesagt&sp=EgIQAg%253D%253D',
+        'info_dict': {
+            'id': 'kurzgesagt',
+            'title': 'kurzgesagt',
+        },
+        'playlist': [{
+            'info_dict': {
+                '_type': 'url',
+                'id': 'UCsXVk37bltHxD1rDPwtNM8Q',
+                'url': 'https://www.youtube.com/channel/UCsXVk37bltHxD1rDPwtNM8Q',
+                'ie_key': 'YoutubeTab',
+                'channel': 'Kurzgesagt – In a Nutshell',
+                'description': 'md5:4ae48dfa9505ffc307dad26342d06bfc',
+                'title': 'Kurzgesagt – In a Nutshell',
+                'channel_id': 'UCsXVk37bltHxD1rDPwtNM8Q',
+                'playlist_count': int,  # XXX: should have a way of saying > 1
+                'channel_url': 'https://www.youtube.com/channel/UCsXVk37bltHxD1rDPwtNM8Q',
+                'thumbnails': list
+            }
+        }],
+        'params': {'extract_flat': True, 'playlist_items': '1'},
+        'playlist_mincount': 1,
     }, {
         'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
         'only_matching': True,
@@ -6492,6 +6980,51 @@ def _real_extract(self, url):
         }
 
 
+class YoutubeConsentRedirectIE(YoutubeBaseInfoExtractor):
+    IE_NAME = 'youtube:consent'
+    IE_DESC = False  # Do not list
+    _VALID_URL = r'https?://consent\.youtube\.com/m\?'
+    _TESTS = [{
+        'url': 'https://consent.youtube.com/m?continue=https%3A%2F%2Fwww.youtube.com%2Flive%2FqVv6vCqciTM%3Fcbrd%3D1&gl=NL&m=0&pc=yt&hl=en&src=1',
+        'info_dict': {
+            'id': 'qVv6vCqciTM',
+            'ext': 'mp4',
+            'age_limit': 0,
+            'uploader_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
+            'comment_count': int,
+            'chapters': 'count:13',
+            'upload_date': '20221223',
+            'thumbnail': 'https://i.ytimg.com/vi/qVv6vCqciTM/maxresdefault.jpg',
+            'channel_url': 'https://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
+            'uploader_url': 'http://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
+            'like_count': int,
+            'release_date': '20221223',
+            'tags': ['Vtuber', '月ノ美兎', '名取さな', 'にじさんじ', 'クリスマス', '3D配信'],
+            'title': '【 #インターネット女クリスマス 】3Dで歌ってはしゃぐインターネットの女たち【月ノ美兎/名取さな】',
+            'view_count': int,
+            'playable_in_embed': True,
+            'duration': 4438,
+            'availability': 'public',
+            'channel_follower_count': int,
+            'channel_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
+            'categories': ['Entertainment'],
+            'live_status': 'was_live',
+            'release_timestamp': 1671793345,
+            'channel': 'さなちゃんねる',
+            'description': 'md5:6aebf95cc4a1d731aebc01ad6cc9806d',
+            'uploader': 'さなちゃんねる',
+        },
+        'add_ie': ['Youtube'],
+        'params': {'skip_download': 'Youtube'},
+    }]
+
+    def _real_extract(self, url):
+        redirect_url = url_or_none(parse_qs(url).get('continue', [None])[-1])
+        if not redirect_url:
+            raise ExtractorError('Invalid cookie consent redirect URL', expected=True)
+        return self.url_result(redirect_url)
+
+
 class YoutubeTruncatedIDIE(InfoExtractor):
     IE_NAME = 'youtube:truncated_id'
     IE_DESC = False  # Do not list