]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/youtube.py
[extractor/youtube] Add hyperpipe instances (#6020)
[yt-dlp.git] / yt_dlp / extractor / youtube.py
index 2afb993d01b09b7b9e67a8b15b3890e3fb7322b5..95ca52b3ae1c3eb7b1cb4d25428963b779a97f8e 100644 (file)
@@ -1,5 +1,6 @@
 import base64
 import calendar
+import collections
 import copy
 import datetime
 import enum
 from ..utils import (
     NO_DEFAULT,
     ExtractorError,
+    LazyList,
     UserNotLive,
     bug_reports_message,
     classproperty,
     clean_html,
     datetime_from_str,
     dict_get,
+    filter_dict,
     float_or_none,
     format_field,
     get_first,
@@ -289,10 +292,10 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
     """Provide base functions for Youtube extractors"""
 
     _RESERVED_NAMES = (
-        r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|'
+        r'channel|c|user|playlist|watch|w|v|embed|e|live|watch_popup|clip|'
         r'shorts|movies|results|search|shared|hashtag|trending|explore|feed|feeds|'
-        r'browse|oembed|get_video_info|iframe_api|s/player|'
-        r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
+        r'browse|oembed|get_video_info|iframe_api|s/player|source|'
+        r'storefront|oops|index|account|t/terms|about|upload|signin|logout')
 
     _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
 
@@ -309,6 +312,40 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
         r'(?:www\.)?invidious\.pussthecat\.org',
         r'(?:www\.)?invidious\.zee\.li',
         r'(?:www\.)?invidious\.ethibox\.fr',
+        r'(?:www\.)?iv\.ggtyler\.dev',
+        r'(?:www\.)?inv\.vern\.i2p',
+        r'(?:www\.)?am74vkcrjp2d5v36lcdqgsj2m6x36tbrkhsruoegwfcizzabnfgf5zyd\.onion',
+        r'(?:www\.)?inv\.riverside\.rocks',
+        r'(?:www\.)?invidious\.silur\.me',
+        r'(?:www\.)?inv\.bp\.projectsegfau\.lt',
+        r'(?:www\.)?invidious\.g4c3eya4clenolymqbpgwz3q3tawoxw56yhzk4vugqrl6dtu3ejvhjid\.onion',
+        r'(?:www\.)?invidious\.slipfox\.xyz',
+        r'(?:www\.)?invidious\.esmail5pdn24shtvieloeedh7ehz3nrwcdivnfhfcedl7gf4kwddhkqd\.onion',
+        r'(?:www\.)?inv\.vernccvbvyi5qhfzyqengccj7lkove6bjot2xhh5kajhwvidqafczrad\.onion',
+        r'(?:www\.)?invidious\.tiekoetter\.com',
+        r'(?:www\.)?iv\.odysfvr23q5wgt7i456o5t3trw2cw5dgn56vbjfbq2m7xsc5vqbqpcyd\.onion',
+        r'(?:www\.)?invidious\.nerdvpn\.de',
+        r'(?:www\.)?invidious\.weblibre\.org',
+        r'(?:www\.)?inv\.odyssey346\.dev',
+        r'(?:www\.)?invidious\.dhusch\.de',
+        r'(?:www\.)?iv\.melmac\.space',
+        r'(?:www\.)?watch\.thekitty\.zone',
+        r'(?:www\.)?invidious\.privacydev\.net',
+        r'(?:www\.)?ng27owmagn5amdm7l5s3rsqxwscl5ynppnis5dqcasogkyxcfqn7psid\.onion',
+        r'(?:www\.)?invidious\.drivet\.xyz',
+        r'(?:www\.)?vid\.priv\.au',
+        r'(?:www\.)?euxxcnhsynwmfidvhjf6uzptsmh4dipkmgdmcmxxuo7tunp3ad2jrwyd\.onion',
+        r'(?:www\.)?inv\.vern\.cc',
+        r'(?:www\.)?invidious\.esmailelbob\.xyz',
+        r'(?:www\.)?invidious\.sethforprivacy\.com',
+        r'(?:www\.)?yt\.oelrichsgarcia\.de',
+        r'(?:www\.)?yt\.artemislena\.eu',
+        r'(?:www\.)?invidious\.flokinet\.to',
+        r'(?:www\.)?invidious\.baczek\.me',
+        r'(?:www\.)?y\.com\.sb',
+        r'(?:www\.)?invidious\.epicsite\.xyz',
+        r'(?:www\.)?invidious\.lidarshield\.cloud',
+        r'(?:www\.)?yt\.funami\.tech',
         r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
         r'(?:www\.)?osbivz6guyeahrwp2lnwyjk2xos342h4ocsxyqrlaopqjuhwn2djiiyd\.onion',
         r'(?:www\.)?u2cvlit75owumwpy4dj2hsmvkq7nvrclkpht7xgyye2pyoxhpmclkrad\.onion',
@@ -367,14 +404,37 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
         r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
         # piped instances from https://github.com/TeamPiped/Piped/wiki/Instances
         r'(?:www\.)?piped\.kavin\.rocks',
-        r'(?:www\.)?piped\.silkky\.cloud',
         r'(?:www\.)?piped\.tokhmi\.xyz',
-        r'(?:www\.)?piped\.moomoo\.me',
-        r'(?:www\.)?il\.ax',
-        r'(?:www\.)?piped\.syncpundit\.com',
+        r'(?:www\.)?piped\.syncpundit\.io',
         r'(?:www\.)?piped\.mha\.fi',
+        r'(?:www\.)?watch\.whatever\.social',
+        r'(?:www\.)?piped\.garudalinux\.org',
+        r'(?:www\.)?piped\.rivo\.lol',
+        r'(?:www\.)?piped-libre\.kavin\.rocks',
+        r'(?:www\.)?yt\.jae\.fi',
         r'(?:www\.)?piped\.mint\.lgbt',
-        r'(?:www\.)?piped\.privacy\.com\.de',
+        r'(?:www\.)?il\.ax',
+        r'(?:www\.)?piped\.esmailelbob\.xyz',
+        r'(?:www\.)?piped\.projectsegfau\.lt',
+        r'(?:www\.)?piped\.privacydev\.net',
+        r'(?:www\.)?piped\.palveluntarjoaja\.eu',
+        r'(?:www\.)?piped\.smnz\.de',
+        r'(?:www\.)?piped\.adminforge\.de',
+        r'(?:www\.)?watch\.whatevertinfoil\.de',
+        r'(?:www\.)?piped\.qdi\.fi',
+        r'(?:www\.)?piped\.video',
+        r'(?:www\.)?piped\.aeong\.one',
+        r'(?:www\.)?piped\.moomoo\.me',
+        r'(?:www\.)?piped\.chauvet\.pro',
+        r'(?:www\.)?watch\.leptons\.xyz',
+        r'(?:www\.)?pd\.vern\.cc',
+        r'(?:www\.)?piped\.hostux\.net',
+        r'(?:www\.)?piped\.lunar\.icu',
+        # Hyperpipe instances from https://hyperpipe.codeberg.page/
+        r'(?:www\.)?hyperpipe\.surge\.sh',
+        r'(?:www\.)?hyperpipe\.esmailelbob\.xyz',
+        r'(?:www\.)?listen\.whatever\.social',
+        r'(?:www\.)?music\.adminforge\.de',
     )
 
     # extracted from account/account_menu ep
@@ -389,6 +449,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
         'si', 'th', 'lo', 'my', 'ka', 'am', 'km', 'zh-CN', 'zh-TW', 'zh-HK', 'ja', 'ko'
     ]
 
+    _IGNORED_WARNINGS = {'Unavailable videos will be hidden during playback'}
+
     @functools.cached_property
     def _preferred_lang(self):
         """
@@ -617,7 +679,7 @@ def generate_api_headers(
         if auth is not None:
             headers['Authorization'] = auth
             headers['X-Origin'] = origin
-        return {h: v for h, v in headers.items() if v is not None}
+        return filter_dict(headers)
 
     def _download_ytcfg(self, client, video_id):
         url = {
@@ -672,20 +734,10 @@ def _extract_continuation(cls, renderer):
         if next_continuation:
             return next_continuation
 
-        contents = []
-        for key in ('contents', 'items'):
-            contents.extend(try_get(renderer, lambda x: x[key], list) or [])
-
-        for content in contents:
-            if not isinstance(content, dict):
-                continue
-            continuation_ep = try_get(
-                content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'],
-                          lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']),
-                dict)
-            continuation = cls._extract_continuation_ep_data(continuation_ep)
-            if continuation:
-                return continuation
+        return traverse_obj(renderer, (
+            ('contents', 'items', 'rows'), ..., 'continuationItemRenderer',
+            ('continuationEndpoint', ('button', 'buttonRenderer', 'command'))
+        ), get_all=False, expected_type=cls._extract_continuation_ep_data)
 
     @classmethod
     def _extract_alerts(cls, data):
@@ -701,12 +753,11 @@ def _extract_alerts(cls, data):
                     yield alert_type, message
 
     def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False):
-        errors = []
-        warnings = []
+        errors, warnings = [], []
         for alert_type, alert_message in alerts:
             if alert_type.lower() == 'error' and fatal:
                 errors.append([alert_type, alert_message])
-            else:
+            elif alert_message not in self._IGNORED_WARNINGS:
                 warnings.append([alert_type, alert_message])
 
         for alert_type, alert_message in (warnings + errors[:-1]):
@@ -739,7 +790,7 @@ def _extract_badges(self, renderer: dict):
         }
 
         badges = []
-        for badge in traverse_obj(renderer, ('badges', ..., 'metadataBadgeRenderer'), default=[]):
+        for badge in traverse_obj(renderer, ('badges', ..., 'metadataBadgeRenderer')):
             badge_type = (
                 privacy_icon_map.get(traverse_obj(badge, ('icon', 'iconType'), expected_type=str))
                 or badge_style_map.get(traverse_obj(badge, 'style'))
@@ -779,7 +830,7 @@ def _get_text(data, *path_list, max_runs=None):
                     runs = item
 
                 runs = runs[:min(len(runs), max_runs or len(runs))]
-                text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[]))
+                text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str))
                 if text:
                     return text
 
@@ -799,7 +850,7 @@ def _extract_thumbnails(data, *path_list):
         """
         thumbnails = []
         for path in path_list or [()]:
-            for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...), default=[]):
+            for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...)):
                 thumbnail_url = url_or_none(thumbnail.get('url'))
                 if not thumbnail_url:
                     continue
@@ -909,29 +960,36 @@ def is_music_url(url):
 
     def _extract_video(self, renderer):
         video_id = renderer.get('videoId')
-        title = self._get_text(renderer, 'title')
+
+        reel_header_renderer = traverse_obj(renderer, (
+            'navigationEndpoint', 'reelWatchEndpoint', 'overlay', 'reelPlayerOverlayRenderer',
+            'reelPlayerHeaderSupportedRenderers', 'reelPlayerHeaderRenderer'))
+
+        title = self._get_text(renderer, 'title', 'headline') or self._get_text(reel_header_renderer, 'reelTitleText')
         description = self._get_text(renderer, 'descriptionSnippet')
-        duration = parse_duration(self._get_text(
-            renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text')))
+
+        duration = int_or_none(renderer.get('lengthSeconds'))
+        if duration is None:
+            duration = parse_duration(self._get_text(
+                renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text')))
         if duration is None:
+            # XXX: should write a parser to be more general to support more cases (e.g. shorts in shorts tab)
             duration = parse_duration(self._search_regex(
                 r'(?i)(ago)(?!.*\1)\s+(?P<duration>[a-z0-9 ,]+?)(?:\s+[\d,]+\s+views)?(?:\s+-\s+play\s+short)?$',
                 traverse_obj(renderer, ('title', 'accessibility', 'accessibilityData', 'label'), default='', expected_type=str),
                 video_id, default=None, group='duration'))
 
-        view_count = self._get_count(renderer, 'viewCountText')
-
-        uploader = self._get_text(renderer, 'ownerText', 'shortBylineText')
         channel_id = traverse_obj(
             renderer, ('shortBylineText', 'runs', ..., 'navigationEndpoint', 'browseEndpoint', 'browseId'),
             expected_type=str, get_all=False)
-        time_text = self._get_text(renderer, 'publishedTimeText') or ''
-        scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False))
+        if not channel_id:
+            channel_id = traverse_obj(reel_header_renderer, ('channelNavigationEndpoint', 'browseEndpoint', 'browseId'))
+
         overlay_style = traverse_obj(
             renderer, ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'style'),
             get_all=False, expected_type=str)
         badges = self._extract_badges(renderer)
-        thumbnails = self._extract_thumbnails(renderer, 'thumbnail')
+
         navigation_url = urljoin('https://www.youtube.com/', traverse_obj(
             renderer, ('navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'),
             expected_type=str)) or ''
@@ -939,6 +997,22 @@ def _extract_video(self, renderer):
         if overlay_style == 'SHORTS' or '/shorts/' in navigation_url:
             url = f'https://www.youtube.com/shorts/{video_id}'
 
+        time_text = (self._get_text(renderer, 'publishedTimeText', 'videoInfo')
+                     or self._get_text(reel_header_renderer, 'timestampText') or '')
+        scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False))
+
+        live_status = (
+            'is_upcoming' if scheduled_timestamp is not None
+            else 'was_live' if 'streamed' in time_text.lower()
+            else 'is_live' if overlay_style == 'LIVE' or self._has_badge(badges, BadgeType.LIVE_NOW)
+            else None)
+
+        # videoInfo is a string like '50K views • 10 years ago'.
+        view_count_text = self._get_text(renderer, 'viewCountText', 'shortViewCountText', 'videoInfo') or ''
+        view_count = (0 if 'no views' in view_count_text.lower()
+                      else self._get_count({'simpleText': view_count_text}))
+        view_count_field = 'concurrent_view_count' if live_status in ('is_live', 'is_upcoming') else 'view_count'
+
         return {
             '_type': 'url',
             'ie_key': YoutubeIE.ie_key(),
@@ -947,17 +1021,14 @@ def _extract_video(self, renderer):
             'title': title,
             'description': description,
             'duration': duration,
-            'view_count': view_count,
-            'uploader': uploader,
             'channel_id': channel_id,
-            'thumbnails': thumbnails,
-            'upload_date': (strftime_or_none(self._parse_time_text(time_text), '%Y%m%d')
-                            if self._configuration_arg('approximate_date', ie_key='youtubetab')
-                            else None),
-            'live_status': ('is_upcoming' if scheduled_timestamp is not None
-                            else 'was_live' if 'streamed' in time_text.lower()
-                            else 'is_live' if overlay_style == 'LIVE' or self._has_badge(badges, BadgeType.LIVE_NOW)
-                            else None),
+            'channel': (self._get_text(renderer, 'ownerText', 'shortBylineText')
+                        or self._get_text(reel_header_renderer, 'channelTitleText')),
+            'channel_url': f'https://www.youtube.com/channel/{channel_id}' if channel_id else None,
+            'thumbnails': self._extract_thumbnails(renderer, 'thumbnail'),
+            'timestamp': (self._parse_time_text(time_text)
+                          if self._configuration_arg('approximate_date', ie_key=YoutubeTabIE)
+                          else None),
             'release_timestamp': scheduled_timestamp,
             'availability':
                 'public' if self._has_badge(badges, BadgeType.AVAILABILITY_PUBLIC)
@@ -965,7 +1036,9 @@ def _extract_video(self, renderer):
                     is_private=self._has_badge(badges, BadgeType.AVAILABILITY_PRIVATE) or None,
                     needs_premium=self._has_badge(badges, BadgeType.AVAILABILITY_PREMIUM) or None,
                     needs_subscription=self._has_badge(badges, BadgeType.AVAILABILITY_SUBSCRIPTION) or None,
-                    is_unlisted=self._has_badge(badges, BadgeType.AVAILABILITY_UNLISTED) or None)
+                    is_unlisted=self._has_badge(badges, BadgeType.AVAILABILITY_UNLISTED) or None),
+            view_count_field: view_count,
+            'live_status': live_status
         }
 
 
@@ -984,7 +1057,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                             youtube\.googleapis\.com)/                        # the various hostnames, with wildcard subdomains
                          (?:.*?\#/)?                                          # handle anchor (#/) redirect urls
                          (?:                                                  # the various things that can precede the ID:
-                             (?:(?:v|embed|e|shorts)/(?!videoseries|live_stream))  # v/ or embed/ or e/ or shorts/
+                             (?:(?:v|embed|e|shorts|live)/(?!videoseries|live_stream))  # v/ or embed/ or e/ or shorts/
                              |(?:                                             # or the v= param in all its forms
                                  (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)?  # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
                                  (?:\?|\#!?)                                  # the params delimiter ? or # or #!
@@ -1009,7 +1082,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
     _EMBED_REGEX = [
         r'''(?x)
             (?:
-                <iframe[^>]+?src=|
+                <(?:[0-9A-Za-z-]+?)?iframe[^>]+?src=|
                 data-video-url=|
                 <embed[^>]+?src=|
                 embedSWF\(?:\s*|
@@ -1025,6 +1098,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             <a\s[^>]*\bhref="(?P<url>https://www\.youtube\.com/watch\?v=[0-9A-Za-z_-]{11})"
             \s[^>]*\bclass="[^"]*\blazy-load-youtube''',
     ]
+    _RETURN_TYPE = 'video'  # XXX: How to handle multifeed?
 
     _PLAYER_INFO_RE = (
         r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
@@ -1555,66 +1629,99 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             'skip': 'This live event has ended.',
         },
         {
-            # Multifeed videos (multiple cameras), URL is for Main Camera
-            'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
+            # Multifeed videos (multiple cameras), URL can be of any Camera
+            'url': 'https://www.youtube.com/watch?v=zaPI8MvL8pg',
             'info_dict': {
-                'id': 'jvGDaLqkpTg',
-                'title': 'Tom Clancy Free Weekend Rainbow Whatever',
-                'description': 'md5:e03b909557865076822aa169218d6a5d',
+                'id': 'zaPI8MvL8pg',
+                'title': 'Terraria 1.2 Live Stream | Let\'s Play - Part 04',
+                'description': 'md5:563ccbc698b39298481ca3c571169519',
             },
             'playlist': [{
                 'info_dict': {
-                    'id': 'jvGDaLqkpTg',
+                    'id': 'j5yGuxZ8lLU',
                     'ext': 'mp4',
-                    'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
-                    'description': 'md5:e03b909557865076822aa169218d6a5d',
-                    'duration': 10643,
-                    'upload_date': '20161111',
-                    'uploader': 'Team PGP',
-                    'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
-                    'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
+                    'title': 'Terraria 1.2 Live Stream | Let\'s Play - Part 04 (Chris)',
+                    'uploader': 'WiiLikeToPlay',
+                    'description': 'md5:563ccbc698b39298481ca3c571169519',
+                    'uploader_url': 'http://www.youtube.com/user/WiiRikeToPray',
+                    'duration': 10120,
+                    'channel_follower_count': int,
+                    'channel_url': 'https://www.youtube.com/channel/UCN2XePorRokPB9TEgRZpddg',
+                    'availability': 'public',
+                    'playable_in_embed': True,
+                    'upload_date': '20131105',
+                    'uploader_id': 'WiiRikeToPray',
+                    'categories': ['Gaming'],
+                    'live_status': 'was_live',
+                    'tags': 'count:24',
+                    'release_timestamp': 1383701910,
+                    'thumbnail': 'https://i.ytimg.com/vi/j5yGuxZ8lLU/maxresdefault.jpg',
+                    'comment_count': int,
+                    'age_limit': 0,
+                    'like_count': int,
+                    'channel_id': 'UCN2XePorRokPB9TEgRZpddg',
+                    'channel': 'WiiLikeToPlay',
+                    'view_count': int,
+                    'release_date': '20131106',
                 },
             }, {
                 'info_dict': {
-                    'id': '3AKt1R1aDnw',
+                    'id': 'zaPI8MvL8pg',
                     'ext': 'mp4',
-                    'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
-                    'description': 'md5:e03b909557865076822aa169218d6a5d',
-                    'duration': 10991,
-                    'upload_date': '20161111',
-                    'uploader': 'Team PGP',
-                    'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
-                    'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
+                    'title': 'Terraria 1.2 Live Stream | Let\'s Play - Part 04 (Tyson)',
+                    'uploader_id': 'WiiRikeToPray',
+                    'availability': 'public',
+                    'channel_url': 'https://www.youtube.com/channel/UCN2XePorRokPB9TEgRZpddg',
+                    'channel': 'WiiLikeToPlay',
+                    'uploader_url': 'http://www.youtube.com/user/WiiRikeToPray',
+                    'channel_follower_count': int,
+                    'description': 'md5:563ccbc698b39298481ca3c571169519',
+                    'duration': 10108,
+                    'age_limit': 0,
+                    'like_count': int,
+                    'tags': 'count:24',
+                    'channel_id': 'UCN2XePorRokPB9TEgRZpddg',
+                    'uploader': 'WiiLikeToPlay',
+                    'release_timestamp': 1383701915,
+                    'comment_count': int,
+                    'upload_date': '20131105',
+                    'thumbnail': 'https://i.ytimg.com/vi/zaPI8MvL8pg/maxresdefault.jpg',
+                    'release_date': '20131106',
+                    'playable_in_embed': True,
+                    'live_status': 'was_live',
+                    'categories': ['Gaming'],
+                    'view_count': int,
                 },
             }, {
                 'info_dict': {
-                    'id': 'RtAMM00gpVc',
+                    'id': 'R7r3vfO7Hao',
                     'ext': 'mp4',
-                    'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
-                    'description': 'md5:e03b909557865076822aa169218d6a5d',
-                    'duration': 10995,
-                    'upload_date': '20161111',
-                    'uploader': 'Team PGP',
-                    'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
-                    'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
-                },
-            }, {
-                'info_dict': {
-                    'id': '6N2fdlP3C5U',
-                    'ext': 'mp4',
-                    'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
-                    'description': 'md5:e03b909557865076822aa169218d6a5d',
-                    'duration': 10990,
-                    'upload_date': '20161111',
-                    'uploader': 'Team PGP',
-                    'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
-                    'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
+                    'title': 'Terraria 1.2 Live Stream | Let\'s Play - Part 04 (Spencer)',
+                    'thumbnail': 'https://i.ytimg.com/vi/R7r3vfO7Hao/maxresdefault.jpg',
+                    'channel_id': 'UCN2XePorRokPB9TEgRZpddg',
+                    'like_count': int,
+                    'availability': 'public',
+                    'playable_in_embed': True,
+                    'upload_date': '20131105',
+                    'description': 'md5:563ccbc698b39298481ca3c571169519',
+                    'uploader_id': 'WiiRikeToPray',
+                    'uploader_url': 'http://www.youtube.com/user/WiiRikeToPray',
+                    'channel_follower_count': int,
+                    'tags': 'count:24',
+                    'release_date': '20131106',
+                    'uploader': 'WiiLikeToPlay',
+                    'comment_count': int,
+                    'channel_url': 'https://www.youtube.com/channel/UCN2XePorRokPB9TEgRZpddg',
+                    'channel': 'WiiLikeToPlay',
+                    'categories': ['Gaming'],
+                    'release_timestamp': 1383701914,
+                    'live_status': 'was_live',
+                    'age_limit': 0,
+                    'duration': 10128,
+                    'view_count': int,
                 },
             }],
-            'params': {
-                'skip_download': True,
-            },
-            'skip': 'Not multifeed anymore',
+            'params': {'skip_download': True},
         },
         {
             # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
@@ -1727,7 +1834,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'live_status': 'not_live',
                 'playable_in_embed': True,
                 'comment_count': int,
-                'channel_follower_count': int
+                'channel_follower_count': int,
+                'chapters': list,
             },
             'params': {
                 'skip_download': True,
@@ -1760,7 +1868,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'live_status': 'not_live',
                 'channel_url': 'https://www.youtube.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
                 'comment_count': int,
-                'channel_follower_count': int
+                'channel_follower_count': int,
+                'chapters': list,
             },
             'params': {
                 'skip_download': True,
@@ -2025,7 +2134,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'duration': 522,
                 'channel': 'kudvenkat',
                 'comment_count': int,
-                'channel_follower_count': int
+                'channel_follower_count': int,
+                'chapters': list,
             },
             'params': {
                 'skip_download': True,
@@ -2175,7 +2285,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'like_count': int,
                 'live_status': 'not_live',
                 'playable_in_embed': True,
-                'channel_follower_count': int
+                'channel_follower_count': int,
+                'chapters': list,
             },
             'params': {
                 'format': '17',  # 3gp format available on android
@@ -2219,7 +2330,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'duration': 248,
                 'categories': ['Education'],
                 'age_limit': 0,
-                'channel_follower_count': int
+                'channel_follower_count': int,
+                'chapters': list,
             }, 'params': {'format': 'mhtml', 'skip_download': True}
         }, {
             # Ensure video upload_date is in UTC timezone (video was uploaded 1641170939)
@@ -2335,6 +2447,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
                 'view_count': int,
                 'playable_in_embed': True,
                 'description': 'md5:2ef1d002cad520f65825346e2084e49d',
+                'concurrent_view_count': int,
             },
             'params': {'skip_download': True}
         }, {
@@ -2448,7 +2561,95 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             'note': '6 channel audio',
             'url': 'https://www.youtube.com/watch?v=zgdo7-RRjgo',
             'only_matching': True,
-        }
+        }, {
+            'note': 'Multiple HLS formats with same itag',
+            'url': 'https://www.youtube.com/watch?v=kX3nB4PpJko',
+            'info_dict': {
+                'id': 'kX3nB4PpJko',
+                'ext': 'mp4',
+                'categories': ['Entertainment'],
+                'description': 'md5:e8031ff6e426cdb6a77670c9b81f6fa6',
+                'uploader_url': 'http://www.youtube.com/user/MrBeast6000',
+                'live_status': 'not_live',
+                'duration': 937,
+                'channel_follower_count': int,
+                'thumbnail': 'https://i.ytimg.com/vi_webp/kX3nB4PpJko/maxresdefault.webp',
+                'title': 'Last To Take Hand Off Jet, Keeps It!',
+                'channel': 'MrBeast',
+                'playable_in_embed': True,
+                'view_count': int,
+                'upload_date': '20221112',
+                'uploader': 'MrBeast',
+                'uploader_id': 'MrBeast6000',
+                'channel_url': 'https://www.youtube.com/channel/UCX6OQ3DkcsbYNE6H8uQQuVA',
+                'age_limit': 0,
+                'availability': 'public',
+                'channel_id': 'UCX6OQ3DkcsbYNE6H8uQQuVA',
+                'like_count': int,
+                'tags': [],
+            },
+            'params': {'extractor_args': {'youtube': {'player_client': ['ios']}}, 'format': '233-1'},
+        }, {
+            'note': 'Audio formats with Dynamic Range Compression',
+            'url': 'https://www.youtube.com/watch?v=Tq92D6wQ1mg',
+            'info_dict': {
+                'id': 'Tq92D6wQ1mg',
+                'ext': 'weba',
+                'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
+                'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
+                'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
+                'channel_follower_count': int,
+                'description': 'md5:17eccca93a786d51bc67646756894066',
+                'upload_date': '20191228',
+                'uploader_url': 'http://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ',
+                'tags': ['mmd', 'dance', 'mikumikudance', 'kpop', 'vtuber'],
+                'playable_in_embed': True,
+                'like_count': int,
+                'categories': ['Entertainment'],
+                'thumbnail': 'https://i.ytimg.com/vi/Tq92D6wQ1mg/sddefault.jpg',
+                'age_limit': 18,
+                'channel': 'Projekt Melody',
+                'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
+                'view_count': int,
+                'availability': 'needs_auth',
+                'comment_count': int,
+                'live_status': 'not_live',
+                'uploader': 'Projekt Melody',
+                'duration': 106,
+            },
+            'params': {'extractor_args': {'youtube': {'player_client': ['tv_embedded']}}, 'format': '251-drc'},
+        },
+        {
+            'url': 'https://www.youtube.com/live/qVv6vCqciTM',
+            'info_dict': {
+                'id': 'qVv6vCqciTM',
+                'ext': 'mp4',
+                'age_limit': 0,
+                'uploader_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
+                'comment_count': int,
+                'chapters': 'count:13',
+                'upload_date': '20221223',
+                'thumbnail': 'https://i.ytimg.com/vi/qVv6vCqciTM/maxresdefault.jpg',
+                'channel_url': 'https://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
+                'uploader_url': 'http://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
+                'like_count': int,
+                'release_date': '20221223',
+                'tags': ['Vtuber', '月ノ美兎', '名取さな', 'にじさんじ', 'クリスマス', '3D配信'],
+                'title': '【 #インターネット女クリスマス 】3Dで歌ってはしゃぐインターネットの女たち【月ノ美兎/名取さな】',
+                'view_count': int,
+                'playable_in_embed': True,
+                'duration': 4438,
+                'availability': 'public',
+                'channel_follower_count': int,
+                'channel_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
+                'categories': ['Entertainment'],
+                'live_status': 'was_live',
+                'release_timestamp': 1671793345,
+                'channel': 'さなちゃんねる',
+                'description': 'md5:6aebf95cc4a1d731aebc01ad6cc9806d',
+                'uploader': 'さなちゃんねる',
+            },
+        },
     ]
 
     _WEBPAGE_TESTS = [
@@ -2501,10 +2702,8 @@ def __init__(self, *args, **kwargs):
         self._code_cache = {}
         self._player_cache = {}
 
-    def _prepare_live_from_start_formats(self, formats, video_id, live_start_time, url, webpage_url, smuggled_data):
+    def _prepare_live_from_start_formats(self, formats, video_id, live_start_time, url, webpage_url, smuggled_data, is_live):
         lock = threading.Lock()
-
-        is_live = True
         start_time = time.time()
         formats = [f for f in formats if f.get('is_from_start')]
 
@@ -2514,38 +2713,44 @@ def refetch_manifest(format_id, delay):
                 return
 
             _, _, prs, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
-            video_details = traverse_obj(
-                prs, (..., 'videoDetails'), expected_type=dict, default=[])
+            video_details = traverse_obj(prs, (..., 'videoDetails'), expected_type=dict)
             microformats = traverse_obj(
                 prs, (..., 'microformat', 'playerMicroformatRenderer'),
-                expected_type=dict, default=[])
-            _, is_live, _, formats, _ = self._list_formats(video_id, microformats, video_details, prs, player_url)
+                expected_type=dict)
+            _, live_status, _, formats, _ = self._list_formats(video_id, microformats, video_details, prs, player_url)
+            is_live = live_status == 'is_live'
             start_time = time.time()
 
         def mpd_feed(format_id, delay):
             """
             @returns (manifest_url, manifest_stream_number, is_live) or None
             """
-            with lock:
-                refetch_manifest(format_id, delay)
-
-            f = next((f for f in formats if f['format_id'] == format_id), None)
-            if not f:
-                if not is_live:
-                    self.to_screen(f'{video_id}: Video is no longer live')
-                else:
-                    self.report_warning(
-                        f'Cannot find refreshed manifest for format {format_id}{bug_reports_message()}')
-                return None
-            return f['manifest_url'], f['manifest_stream_number'], is_live
+            for retry in self.RetryManager(fatal=False):
+                with lock:
+                    refetch_manifest(format_id, delay)
+
+                f = next((f for f in formats if f['format_id'] == format_id), None)
+                if not f:
+                    if not is_live:
+                        retry.error = f'{video_id}: Video is no longer live'
+                    else:
+                        retry.error = f'Cannot find refreshed manifest for format {format_id}{bug_reports_message()}'
+                    continue
+                return f['manifest_url'], f['manifest_stream_number'], is_live
+            return None
 
         for f in formats:
-            f['is_live'] = True
-            f['protocol'] = 'http_dash_segments_generator'
-            f['fragments'] = functools.partial(
-                self._live_dash_fragments, f['format_id'], live_start_time, mpd_feed)
+            f['is_live'] = is_live
+            gen = functools.partial(self._live_dash_fragments, video_id, f['format_id'],
+                                    live_start_time, mpd_feed, not is_live and f.copy())
+            if is_live:
+                f['fragments'] = gen
+                f['protocol'] = 'http_dash_segments_generator'
+            else:
+                f['fragments'] = LazyList(gen({}))
+                del f['is_from_start']
 
-    def _live_dash_fragments(self, format_id, live_start_time, mpd_feed, ctx):
+    def _live_dash_fragments(self, video_id, format_id, live_start_time, mpd_feed, manifestless_orig_fmt, ctx):
         FETCH_SPAN, MAX_DURATION = 5, 432000
 
         mpd_url, stream_number, is_live = None, None, True
@@ -2576,15 +2781,18 @@ def _extract_sequence_from_mpd(refresh_sequence, immediate):
                     return False, last_seq
                 elif old_mpd_url == mpd_url:
                     return True, last_seq
-            try:
-                fmts, _ = self._extract_mpd_formats_and_subtitles(
-                    mpd_url, None, note=False, errnote=False, fatal=False)
-            except ExtractorError:
-                fmts = None
-            if not fmts:
-                no_fragment_score += 2
-                return False, last_seq
-            fmt_info = next(x for x in fmts if x['manifest_stream_number'] == stream_number)
+            if manifestless_orig_fmt:
+                fmt_info = manifestless_orig_fmt
+            else:
+                try:
+                    fmts, _ = self._extract_mpd_formats_and_subtitles(
+                        mpd_url, None, note=False, errnote=False, fatal=False)
+                except ExtractorError:
+                    fmts = None
+                if not fmts:
+                    no_fragment_score += 2
+                    return False, last_seq
+                fmt_info = next(x for x in fmts if x['manifest_stream_number'] == stream_number)
             fragments = fmt_info['fragments']
             fragment_base_url = fmt_info['fragment_base_url']
             assert fragment_base_url
@@ -2592,6 +2800,7 @@ def _extract_sequence_from_mpd(refresh_sequence, immediate):
             _last_seq = int(re.search(r'(?:/|^)sq/(\d+)', fragments[-1]['path']).group(1))
             return True, _last_seq
 
+        self.write_debug(f'[{video_id}] Generating fragments for format {format_id}')
         while is_live:
             fetch_time = time.time()
             if no_fragment_score > 30:
@@ -2645,6 +2854,11 @@ def _extract_sequence_from_mpd(refresh_sequence, immediate):
             except ExtractorError:
                 continue
 
+            if manifestless_orig_fmt:
+                # Stop at the first iteration if running for post-live manifestless;
+                # fragment count no longer increase since it starts
+                break
+
             time.sleep(max(0, FETCH_SPAN + fetch_time - time.time()))
 
     def _extract_player_url(self, *ytcfgs, webpage=None):
@@ -2824,7 +3038,7 @@ def _decrypt_nsig(self, s, video_id, player_url):
             self.report_warning(
                 f'Native nsig extraction failed: Trying with PhantomJS\n'
                 f'         n = {s} ; player = {player_url}', video_id)
-            self.write_debug(e)
+            self.write_debug(e, only_once=True)
 
             args, func_body = func_code
             ret = jsi.execute(
@@ -2942,7 +3156,7 @@ def _mark_watched(self, video_id, player_responses):
                 # these seem to mark watchtime "history" in the real world
                 # they're required, so send in a single value
                 qs.update({
-                    'st': video_length,
+                    'st': 0,
                     'et': video_length,
                 })
 
@@ -3003,7 +3217,7 @@ def _extract_chapters_from_engagement_panel(self, data, duration):
         content_list = traverse_obj(
             data,
             ('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'),
-            expected_type=list, default=[])
+            expected_type=list)
         chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription'))
         chapter_title = lambda chapter: self._get_text(chapter, 'title')
 
@@ -3013,9 +3227,14 @@ def _extract_chapters_from_engagement_panel(self, data, duration):
             for contents in content_list)), [])
 
     def _extract_chapters_from_description(self, description, duration):
+        duration_re = r'(?:\d+:)?\d{1,2}:\d{2}'
+        sep_re = r'(?m)^\s*(%s)\b\W*\s(%s)\s*$'
         return self._extract_chapters(
-            re.findall(r'(?m)^((?:\d+:)?\d{1,2}:\d{2})\b\W*\s(.+?)\s*$', description or ''),
+            re.findall(sep_re % (duration_re, r'.+?'), description or ''),
             chapter_time=lambda x: parse_duration(x[0]), chapter_title=lambda x: x[1],
+            duration=duration, strict=False) or self._extract_chapters(
+            re.findall(sep_re % (r'.+?', duration_re), description or ''),
+            chapter_time=lambda x: parse_duration(x[1]), chapter_title=lambda x: x[0],
             duration=duration, strict=False)
 
     def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration, strict=True):
@@ -3034,8 +3253,9 @@ def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration,
                 self.report_warning(f'Incomplete chapter {idx}')
             elif chapters[-1]['start_time'] <= chapter['start_time'] <= duration:
                 chapters.append(chapter)
-            else:
-                self.report_warning(f'Invalid start time for chapter "{chapter["title"]}"')
+            elif chapter not in chapters:
+                self.report_warning(
+                    f'Invalid start time ({chapter["start_time"]} < {chapters[-1]["start_time"]}) for chapter "{chapter["title"]}"')
         return chapters[1:]
 
     def _extract_comment(self, comment_renderer, parent=None):
@@ -3187,11 +3407,21 @@ def extract_thread(contents):
                 note_prefix = '%sDownloading comment%s API JSON page %d %s' % (
                     '       ' if parent else '', ' replies' if parent else '',
                     page_num, comment_prog_str)
-
-            response = self._extract_response(
-                item_id=None, query=continuation,
-                ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix,
-                check_get_keys='onResponseReceivedEndpoints' if not is_forced_continuation else None)
+            try:
+                response = self._extract_response(
+                    item_id=None, query=continuation,
+                    ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix,
+                    check_get_keys='onResponseReceivedEndpoints' if not is_forced_continuation else None)
+            except ExtractorError as e:
+                # Ignore incomplete data error for replies if retries didn't work.
+                # This is to allow any other parent comments and comment threads to be downloaded.
+                # See: https://github.com/yt-dlp/yt-dlp/issues/4669
+                if 'incomplete data' in str(e).lower() and parent and self.get_param('ignoreerrors') is True:
+                    self.report_warning(
+                        'Received incomplete data for a comment reply thread and retrying did not help. '
+                        'Ignoring to let other comments be downloaded.')
+                else:
+                    raise
             is_forced_continuation = False
             continuation_contents = traverse_obj(
                 response, 'onResponseReceivedEndpoints', expected_type=list, default=[])
@@ -3220,6 +3450,7 @@ def extract_thread(contents):
         message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1)
         if message and not parent and tracker['running_total'] == 0:
             self.report_warning(f'Youtube said: {message}', video_id=video_id, only_once=True)
+            raise self.CommentsDisabled
 
     @staticmethod
     def _generate_comment_continuation(video_id):
@@ -3263,7 +3494,7 @@ def _is_agegated(player_response):
         if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')):
             return True
 
-        reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[])
+        reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')))
         AGE_GATE_REASONS = (
             'confirm your age', 'age-restricted', 'inappropriate',  # reason
             'age_verification_required', 'age_check_required',  # status
@@ -3404,8 +3635,13 @@ def append_client(*client_names):
             self.report_warning(last_error)
         return prs, player_url
 
-    def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, is_live, duration):
-        itags, stream_ids = {}, []
+    def _needs_live_processing(self, live_status, duration):
+        if (live_status == 'is_live' and self.get_param('live_from_start')
+                or live_status == 'post_live' and (duration or 0) > 4 * 3600):
+            return live_status
+
+    def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, live_status, duration):
+        itags, stream_ids = collections.defaultdict(set), []
         itag_qualities, res_qualities = {}, {0: None}
         q = qualities([
             # Normally tiny is the smallest video-only formats. But
@@ -3414,7 +3650,7 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, i
             'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high',  # Audio only formats
             'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
         ])
-        streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[])
+        streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...))
 
         for fmt in streaming_formats:
             if fmt.get('targetDurationSec'):
@@ -3422,7 +3658,7 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, i
 
             itag = str_or_none(fmt.get('itag'))
             audio_track = fmt.get('audioTrack') or {}
-            stream_id = '%s.%s' % (itag or '', audio_track.get('id', ''))
+            stream_id = (itag, audio_track.get('id'), fmt.get('isDrc'))
             if stream_id in stream_ids:
                 continue
 
@@ -3487,10 +3723,6 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, i
                             video_id=video_id, only_once=True)
                     throttled = True
 
-            if itag:
-                itags[itag] = 'https'
-                stream_ids.append(stream_id)
-
             tbr = float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
             language_preference = (
                 10 if audio_track.get('audioIsDefault') and 10
@@ -3507,11 +3739,12 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, i
             dct = {
                 'asr': int_or_none(fmt.get('audioSampleRate')),
                 'filesize': int_or_none(fmt.get('contentLength')),
-                'format_id': itag,
+                'format_id': f'{itag}{"-drc" if fmt.get("isDrc") else ""}',
                 'format_note': join_nonempty(
                     '%s%s' % (audio_track.get('displayName') or '',
                               ' (default)' if language_preference > 0 else ''),
                     fmt.get('qualityLabel') or quality.replace('audio_quality_', ''),
+                    'DRC' if fmt.get('isDrc') else None,
                     try_get(fmt, lambda x: x['projectionType'].replace('RECTANGULAR', '').lower()),
                     try_get(fmt, lambda x: x['spatialAudioType'].replace('SPATIAL_AUDIO_TYPE_', '').lower()),
                     throttled and 'THROTTLED', is_damaged and 'DAMAGED', delim=', '),
@@ -3520,13 +3753,13 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, i
                 'fps': int_or_none(fmt.get('fps')) or None,
                 'audio_channels': fmt.get('audioChannels'),
                 'height': height,
-                'quality': q(quality),
+                'quality': q(quality) - bool(fmt.get('isDrc')) / 2,
                 'has_drm': bool(fmt.get('drmFamilies')),
                 'tbr': tbr,
                 'url': fmt_url,
                 'width': int_or_none(fmt.get('width')),
                 'language': join_nonempty(audio_track.get('id', '').split('.')[0],
-                                          'desc' if language_preference < -1 else ''),
+                                          'desc' if language_preference < -1 else '') or None,
                 'language_preference': language_preference,
                 # Strictly de-prioritize damaged and 3gp formats
                 'preference': -10 if is_damaged else -2 if itag == '17' else None,
@@ -3549,26 +3782,39 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, i
                 }
                 if dct.get('ext'):
                     dct['container'] = dct['ext'] + '_dash'
+
+            if itag:
+                itags[itag].add(('https', dct.get('language')))
+                stream_ids.append(stream_id)
             yield dct
 
-        live_from_start = is_live and self.get_param('live_from_start')
-        skip_manifests = self._configuration_arg('skip')
-        if not self.get_param('youtube_include_hls_manifest', True):
-            skip_manifests.append('hls')
+        needs_live_processing = self._needs_live_processing(live_status, duration)
+        skip_bad_formats = not self._configuration_arg('include_incomplete_formats')
+
+        skip_manifests = set(self._configuration_arg('skip'))
+        if (not self.get_param('youtube_include_hls_manifest', True)
+                or needs_live_processing == 'is_live'  # These will be filtered out by YoutubeDL anyway
+                or needs_live_processing and skip_bad_formats):
+            skip_manifests.add('hls')
+
         if not self.get_param('youtube_include_dash_manifest', True):
-            skip_manifests.append('dash')
-        get_dash = 'dash' not in skip_manifests and (
-            not is_live or live_from_start or self._configuration_arg('include_live_dash'))
-        get_hls = not live_from_start and 'hls' not in skip_manifests
+            skip_manifests.add('dash')
+        if self._configuration_arg('include_live_dash'):
+            self._downloader.deprecated_feature('[youtube] include_live_dash extractor argument is deprecated. '
+                                                'Use include_incomplete_formats extractor argument instead')
+        elif skip_bad_formats and live_status == 'is_live' and needs_live_processing != 'is_live':
+            skip_manifests.add('dash')
 
         def process_manifest_format(f, proto, itag):
-            if itag in itags:
-                if itags[itag] == proto or f'{itag}-{proto}' in itags:
-                    return False
-                itag = f'{itag}-{proto}'
-            if itag:
+            key = (proto, f.get('language'))
+            if key in itags[itag]:
+                return False
+            itags[itag].add(key)
+
+            if any(p != proto for p, _ in itags[itag]):
+                f['format_id'] = f'{itag}-{proto}'
+            elif itag:
                 f['format_id'] = itag
-                itags[itag] = proto
 
             f['quality'] = q(itag_qualities.get(try_get(f, lambda f: f['format_id'].split('-')[0]), -1))
             if f['quality'] == -1 and f.get('height'):
@@ -3577,16 +3823,17 @@ def process_manifest_format(f, proto, itag):
 
         subtitles = {}
         for sd in streaming_data:
-            hls_manifest_url = get_hls and sd.get('hlsManifestUrl')
+            hls_manifest_url = 'hls' not in skip_manifests and sd.get('hlsManifestUrl')
             if hls_manifest_url:
-                fmts, subs = self._extract_m3u8_formats_and_subtitles(hls_manifest_url, video_id, 'mp4', fatal=False, live=is_live)
+                fmts, subs = self._extract_m3u8_formats_and_subtitles(
+                    hls_manifest_url, video_id, 'mp4', fatal=False, live=live_status == 'is_live')
                 subtitles = self._merge_subtitles(subs, subtitles)
                 for f in fmts:
                     if process_manifest_format(f, 'hls', self._search_regex(
                             r'/itag/(\d+)', f['url'], 'itag', default=None)):
                         yield f
 
-            dash_manifest_url = get_dash and sd.get('dashManifestUrl')
+            dash_manifest_url = 'dash' not in skip_manifests and sd.get('dashManifestUrl')
             if dash_manifest_url:
                 formats, subs = self._extract_mpd_formats_and_subtitles(dash_manifest_url, video_id, fatal=False)
                 subtitles = self._merge_subtitles(subs, subtitles)  # Prioritize HLS subs over DASH
@@ -3594,7 +3841,7 @@ def process_manifest_format(f, proto, itag):
                     if process_manifest_format(f, 'dash', f['format_id']):
                         f['filesize'] = int_or_none(self._search_regex(
                             r'/clen/(\d+)', f.get('fragment_base_url') or f['url'], 'file size', default=None))
-                        if live_from_start:
+                        if needs_live_processing:
                             f['is_from_start'] = True
 
                         yield f
@@ -3660,11 +3907,19 @@ def _list_formats(self, video_id, microformats, video_details, player_responses,
         is_live = get_first(video_details, 'isLive')
         if is_live is None:
             is_live = get_first(live_broadcast_details, 'isLiveNow')
+        live_content = get_first(video_details, 'isLiveContent')
+        is_upcoming = get_first(video_details, 'isUpcoming')
+        post_live = get_first(video_details, 'isPostLiveDvr')
+        live_status = ('post_live' if post_live
+                       else 'is_live' if is_live
+                       else 'is_upcoming' if is_upcoming
+                       else 'was_live' if live_content
+                       else 'not_live' if False in (is_live, live_content)
+                       else None)
+        streaming_data = traverse_obj(player_responses, (..., 'streamingData'))
+        *formats, subtitles = self._extract_formats_and_subtitles(streaming_data, video_id, player_url, live_status, duration)
 
-        streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[])
-        *formats, subtitles = self._extract_formats_and_subtitles(streaming_data, video_id, player_url, is_live, duration)
-
-        return live_broadcast_details, is_live, streaming_data, formats, subtitles
+        return live_broadcast_details, live_status, streaming_data, formats, subtitles
 
     def _real_extract(self, url):
         url, smuggled_data = unsmuggle_url(url, {})
@@ -3676,7 +3931,7 @@ def _real_extract(self, url):
         webpage, master_ytcfg, player_responses, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url)
 
         playability_statuses = traverse_obj(
-            player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[])
+            player_responses, (..., 'playabilityStatus'), expected_type=dict)
 
         trailer_video_id = get_first(
             playability_statuses,
@@ -3689,11 +3944,10 @@ def _real_extract(self, url):
         search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None))
                        if webpage else (lambda x: None))
 
-        video_details = traverse_obj(
-            player_responses, (..., 'videoDetails'), expected_type=dict, default=[])
+        video_details = traverse_obj(player_responses, (..., 'videoDetails'), expected_type=dict)
         microformats = traverse_obj(
             player_responses, (..., 'microformat', 'playerMicroformatRenderer'),
-            expected_type=dict, default=[])
+            expected_type=dict)
 
         translated_title = self._get_text(microformats, (..., 'title'))
         video_title = (self._preferred_lang and translated_title
@@ -3751,13 +4005,14 @@ def feed_entry(name):
                 return self.playlist_result(
                     entries, video_id, video_title, video_description)
 
-        duration = int_or_none(
-            get_first(video_details, 'lengthSeconds')
-            or get_first(microformats, 'lengthSeconds')
-            or parse_duration(search_meta('duration'))) or None
+        duration = (int_or_none(get_first(video_details, 'lengthSeconds'))
+                    or int_or_none(get_first(microformats, 'lengthSeconds'))
+                    or parse_duration(search_meta('duration')) or None)
 
-        live_broadcast_details, is_live, streaming_data, formats, automatic_captions = \
-            self._list_formats(video_id, microformats, video_details, player_responses, player_url)
+        live_broadcast_details, live_status, streaming_data, formats, automatic_captions = \
+            self._list_formats(video_id, microformats, video_details, player_responses, player_url, duration)
+        if live_status == 'post_live':
+            self.write_debug(f'{video_id}: Video is in Post-Live Manifestless mode')
 
         if not formats:
             if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')):
@@ -3816,7 +4071,7 @@ def feed_entry(name):
         thumbnails.extend({
             'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format(
                 video_id=video_id, name=name, ext=ext,
-                webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''),
+                webp='_webp' if ext == 'webp' else '', live='_live' if live_status == 'is_live' else ''),
         } for name in thumbnail_names for ext in ('webp', 'jpg'))
         for thumb in thumbnails:
             i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names)
@@ -3831,26 +4086,29 @@ def feed_entry(name):
             or search_meta('channelId'))
         owner_profile_url = get_first(microformats, 'ownerProfileUrl')
 
-        live_content = get_first(video_details, 'isLiveContent')
-        is_upcoming = get_first(video_details, 'isUpcoming')
-        if is_live is None:
-            if is_upcoming or live_content is False:
-                is_live = False
-        if is_upcoming is None and (live_content or is_live):
-            is_upcoming = False
         live_start_time = parse_iso8601(get_first(live_broadcast_details, 'startTimestamp'))
         live_end_time = parse_iso8601(get_first(live_broadcast_details, 'endTimestamp'))
         if not duration and live_end_time and live_start_time:
             duration = live_end_time - live_start_time
 
-        if is_live and self.get_param('live_from_start'):
-            self._prepare_live_from_start_formats(formats, video_id, live_start_time, url, webpage_url, smuggled_data)
+        needs_live_processing = self._needs_live_processing(live_status, duration)
 
-        formats.extend(self._extract_storyboard(player_responses, duration))
+        def is_bad_format(fmt):
+            if needs_live_processing and not fmt.get('is_from_start'):
+                return True
+            elif (live_status == 'is_live' and needs_live_processing != 'is_live'
+                    and fmt.get('protocol') == 'http_dash_segments'):
+                return True
+
+        for fmt in filter(is_bad_format, formats):
+            fmt['preference'] = (fmt.get('preference') or -1) - 10
+            fmt['format_note'] = join_nonempty(fmt.get('format_note'), '(Last 4 hours)', delim=' ')
 
-        # source_preference is lower for throttled/potentially damaged formats
-        self._sort_formats(formats, (
-            'quality', 'res', 'fps', 'hdr:12', 'source', 'vcodec:vp9.2', 'channels', 'acodec', 'lang', 'proto'))
+        if needs_live_processing:
+            self._prepare_live_from_start_formats(
+                formats, video_id, live_start_time, url, webpage_url, smuggled_data, live_status == 'is_live')
+
+        formats.extend(self._extract_storyboard(player_responses, duration))
 
         info = {
             'id': video_id,
@@ -3879,22 +4137,12 @@ def feed_entry(name):
             'categories': [category] if category else None,
             'tags': keywords,
             'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'),
-            'is_live': is_live,
-            'was_live': (False if is_live or is_upcoming or live_content is False
-                         else None if is_live is None or is_upcoming is None
-                         else live_content),
-            'live_status': 'is_upcoming' if is_upcoming else None,  # rest will be set by YoutubeDL
+            'live_status': live_status,
             'release_timestamp': live_start_time,
+            '_format_sort_fields': (  # source_preference is lower for throttled/potentially damaged formats
+                'quality', 'res', 'fps', 'hdr:12', 'source', 'vcodec:vp9.2', 'channels', 'acodec', 'lang', 'proto')
         }
 
-        if get_first(video_details, 'isPostLiveDvr'):
-            self.write_debug('Video is in Post-Live Manifestless mode')
-            info['live_status'] = 'post_live'
-            if (duration or 0) > 4 * 3600:
-                self.report_warning(
-                    'The livestream has not finished processing. Only 4 hours of the video can be currently downloaded. '
-                    'This is a known issue and patches are welcome')
-
         subtitles = {}
         pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict)
         if pctr:
@@ -3905,10 +4153,10 @@ def get_lang_code(track):
             # Converted into dicts to remove duplicates
             captions = {
                 get_lang_code(sub): sub
-                for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
+                for sub in traverse_obj(pctr, (..., 'captionTracks', ...))}
             translation_languages = {
                 lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
-                for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
+                for lang in traverse_obj(pctr, (..., 'translationLanguages', ...))}
 
             def process_language(container, base_url, lang_code, sub_name, query):
                 lang_subs = container.setdefault(lang_code, [])
@@ -3942,7 +4190,7 @@ def process_language(container, base_url, lang_code, sub_name, query):
                     if not trans_code:
                         continue
                     orig_trans_code = trans_code
-                    if caption_track.get('kind') != 'asr':
+                    if caption_track.get('kind') != 'asr' and trans_code != 'und':
                         if not get_translated_subs:
                             continue
                         trans_code += f'-{lang_code}'
@@ -4024,7 +4272,8 @@ def process_language(container, base_url, lang_code, sub_name, query):
                 'url': f'https://www.youtube.com/watch?v={video_id}&bpctr=9999999999&has_verified=1',
                 'video_id': video_id,
                 'ext': 'json',
-                'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay',
+                'protocol': ('youtube_live_chat' if live_status in ('is_live', 'is_upcoming')
+                             else 'youtube_live_chat_replay'),
             }]
 
         if initial_data:
@@ -4061,9 +4310,8 @@ def process_language(container, base_url, lang_code, sub_name, query):
                     list) or []):
                 tbrs = variadic(
                     traverse_obj(
-                        tlb, 'toggleButtonRenderer',
-                        ('segmentedLikeDislikeButtonRenderer', ..., 'toggleButtonRenderer'),
-                        default=[]))
+                        tlb, ('toggleButtonRenderer', ...),
+                        ('segmentedLikeDislikeButtonRenderer', ..., 'toggleButtonRenderer')))
                 for tbr in tbrs:
                     for getter, regex in [(
                             lambda x: x['defaultText']['accessibility']['accessibilityData'],
@@ -4085,6 +4333,15 @@ def process_language(container, base_url, lang_code, sub_name, query):
                     'like_count': str_to_int(like_count),
                     'dislike_count': str_to_int(dislike_count),
                 })
+            vcr = traverse_obj(vpir, ('viewCount', 'videoViewCountRenderer'))
+            if vcr:
+                vc = self._get_count(vcr, 'viewCount')
+                # Upcoming premieres with waiting count are treated as live here
+                if vcr.get('isLive'):
+                    info['concurrent_view_count'] = vc
+                elif info.get('view_count') is None:
+                    info['view_count'] = vc
+
         vsir = get_first(contents, 'videoSecondaryInfoRenderer')
         if vsir:
             vor = traverse_obj(vsir, ('owner', 'videoOwnerRenderer'))
@@ -4131,9 +4388,7 @@ def process_language(container, base_url, lang_code, sub_name, query):
             unified_strdate(get_first(microformats, 'uploadDate'))
             or unified_strdate(search_meta('uploadDate')))
         if not upload_date or (
-            not info.get('is_live')
-            and not info.get('was_live')
-            and info.get('live_status') != 'is_upcoming'
+            live_status in ('not_live', None)
             and 'no-youtube-prefer-utc-upload-date' not in self.get_param('compat_opts', [])
         ):
             upload_date = strftime_or_none(
@@ -4177,15 +4432,19 @@ def process_language(container, base_url, lang_code, sub_name, query):
 
 
 class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
-
     @staticmethod
     def passthrough_smuggled_data(func):
-        def _smuggle(entries, smuggled_data):
-            for entry in entries:
-                # TODO: Convert URL to music.youtube instead.
-                # Do we need to passthrough any other smuggled_data?
-                entry['url'] = smuggle_url(entry['url'], smuggled_data)
-                yield entry
+        def _smuggle(info, smuggled_data):
+            if info.get('_type') not in ('url', 'url_transparent'):
+                return info
+            if smuggled_data.get('is_music_url'):
+                parsed_url = urllib.parse.urlparse(info['url'])
+                if parsed_url.netloc in ('www.youtube.com', 'music.youtube.com'):
+                    smuggled_data.pop('is_music_url')
+                    info['url'] = urllib.parse.urlunparse(parsed_url._replace(netloc='music.youtube.com'))
+            if smuggled_data:
+                info['url'] = smuggle_url(info['url'], smuggled_data)
+            return info
 
         @functools.wraps(func)
         def wrapper(self, url):
@@ -4193,8 +4452,10 @@ def wrapper(self, url):
             if self.is_music_url(url):
                 smuggled_data['is_music_url'] = True
             info_dict = func(self, url, smuggled_data)
-            if smuggled_data and info_dict.get('entries'):
-                info_dict['entries'] = _smuggle(info_dict['entries'], smuggled_data)
+            if smuggled_data:
+                _smuggle(info_dict, smuggled_data)
+                if info_dict.get('entries'):
+                    info_dict['entries'] = (_smuggle(i, smuggled_data.copy()) for i in info_dict['entries'])
             return info_dict
         return wrapper
 
@@ -4225,6 +4486,25 @@ def _extract_basic_item_renderer(item):
             elif key.startswith('grid') and key.endswith('Renderer'):
                 return renderer
 
+    def _extract_channel_renderer(self, renderer):
+        channel_id = renderer['channelId']
+        title = self._get_text(renderer, 'title')
+        channel_url = f'https://www.youtube.com/channel/{channel_id}'
+        return {
+            '_type': 'url',
+            'url': channel_url,
+            'id': channel_id,
+            'ie_key': YoutubeTabIE.ie_key(),
+            'channel': title,
+            'channel_id': channel_id,
+            'channel_url': channel_url,
+            'title': title,
+            'channel_follower_count': self._get_count(renderer, 'subscriberCountText'),
+            'thumbnails': self._extract_thumbnails(renderer, 'thumbnail'),
+            'playlist_count': self._get_count(renderer, 'videoCountText'),
+            'description': self._get_text(renderer, 'descriptionSnippet'),
+        }
+
     def _grid_entries(self, grid_renderer):
         for item in grid_renderer['items']:
             if not isinstance(item, dict):
@@ -4250,9 +4530,7 @@ def _grid_entries(self, grid_renderer):
             # channel
             channel_id = renderer.get('channelId')
             if channel_id:
-                yield self.url_result(
-                    'https://www.youtube.com/channel/%s' % channel_id,
-                    ie=YoutubeTabIE.ie_key(), video_title=title)
+                yield self._extract_channel_renderer(renderer)
                 continue
             # generic endpoint URL support
             ep_url = urljoin('https://www.youtube.com/', try_get(
@@ -4405,6 +4683,13 @@ def _rich_grid_entries(self, contents):
                     yield entry
     '''
 
+    def _report_history_entries(self, renderer):
+        for url in traverse_obj(renderer, (
+                'rows', ..., 'reportHistoryTableRowRenderer', 'cells', ...,
+                'reportHistoryTableCellRenderer', 'cell', 'reportHistoryTableTextCellRenderer', 'text', 'runs', ...,
+                'navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url')):
+            yield self.url_result(urljoin('https://www.youtube.com', url), YoutubeIE)
+
     def _extract_entries(self, parent_renderer, continuation_list):
         # continuation_list is modified in-place with continuation_list = [continuation_token]
         continuation_list[:] = [None]
@@ -4416,12 +4701,16 @@ def _extract_entries(self, parent_renderer, continuation_list):
                 content, 'itemSectionRenderer', 'musicShelfRenderer', 'musicShelfContinuation',
                 expected_type=dict)
             if not is_renderer:
-                renderer = content.get('richItemRenderer')
-                if renderer:
-                    for entry in self._rich_entries(renderer):
+                if content.get('richItemRenderer'):
+                    for entry in self._rich_entries(content['richItemRenderer']):
                         yield entry
                     continuation_list[0] = self._extract_continuation(parent_renderer)
+                elif content.get('reportHistorySectionRenderer'):  # https://www.youtube.com/reporthistory
+                    table = traverse_obj(content, ('reportHistorySectionRenderer', 'table', 'tableRenderer'))
+                    yield from self._report_history_entries(table)
+                    continuation_list[0] = self._extract_continuation(table)
                 continue
+
             isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
             for isr_content in isr_contents:
                 if not isinstance(isr_content, dict):
@@ -4482,26 +4771,6 @@ def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data):
             # See: https://github.com/ytdl-org/youtube-dl/issues/28702
             visitor_data = self._extract_visitor_data(response) or visitor_data
 
-            known_continuation_renderers = {
-                'playlistVideoListContinuation': self._playlist_entries,
-                'gridContinuation': self._grid_entries,
-                'itemSectionContinuation': self._post_thread_continuation_entries,
-                'sectionListContinuation': extract_entries,  # for feeds
-            }
-            continuation_contents = try_get(
-                response, lambda x: x['continuationContents'], dict) or {}
-            continuation_renderer = None
-            for key, value in continuation_contents.items():
-                if key not in known_continuation_renderers:
-                    continue
-                continuation_renderer = value
-                continuation_list = [None]
-                yield from known_continuation_renderers[key](continuation_renderer)
-                continuation = continuation_list[0] or self._extract_continuation(continuation_renderer)
-                break
-            if continuation_renderer:
-                continue
-
             known_renderers = {
                 'videoRenderer': (self._grid_entries, 'items'),  # for membership tab
                 'gridPlaylistRenderer': (self._grid_entries, 'items'),
@@ -4510,79 +4779,81 @@ def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data):
                 'playlistVideoRenderer': (self._playlist_entries, 'contents'),
                 'itemSectionRenderer': (extract_entries, 'contents'),  # for feeds
                 'richItemRenderer': (extract_entries, 'contents'),  # for hashtag
-                'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
+                'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents'),
+                'reportHistoryTableRowRenderer': (self._report_history_entries, 'rows'),
+                'playlistVideoListContinuation': (self._playlist_entries, None),
+                'gridContinuation': (self._grid_entries, None),
+                'itemSectionContinuation': (self._post_thread_continuation_entries, None),
+                'sectionListContinuation': (extract_entries, None),  # for feeds
             }
-            on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
-            continuation_items = try_get(
-                on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
-            continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
+
+            continuation_items = traverse_obj(response, (
+                ('onResponseReceivedActions', 'onResponseReceivedEndpoints'), ...,
+                'appendContinuationItemsAction', 'continuationItems'
+            ), 'continuationContents', get_all=False)
+            continuation_item = traverse_obj(continuation_items, 0, None, expected_type=dict, default={})
+
             video_items_renderer = None
-            for key, value in continuation_item.items():
+            for key in continuation_item.keys():
                 if key not in known_renderers:
                     continue
-                video_items_renderer = {known_renderers[key][1]: continuation_items}
+                func, parent_key = known_renderers[key]
+                video_items_renderer = {parent_key: continuation_items} if parent_key else continuation_items
                 continuation_list = [None]
-                yield from known_renderers[key][0](video_items_renderer)
+                yield from func(video_items_renderer)
                 continuation = continuation_list[0] or self._extract_continuation(video_items_renderer)
+
+            if not video_items_renderer:
                 break
-            if video_items_renderer:
-                continue
-            break
 
     @staticmethod
     def _extract_selected_tab(tabs, fatal=True):
-        for tab in tabs:
-            renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
-            if renderer.get('selected') is True:
-                return renderer
-        else:
-            if fatal:
-                raise ExtractorError('Unable to find selected tab')
-
-    def _extract_uploader(self, data):
-        uploader = {}
-        renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {}
-        owner = try_get(
-            renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
-        if owner:
-            owner_text = owner.get('text')
-            uploader['uploader'] = self._search_regex(
-                r'^by (.+) and \d+ others?$', owner_text, 'uploader', default=owner_text)
-            uploader['uploader_id'] = try_get(
-                owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], str)
-            uploader['uploader_url'] = urljoin(
-                'https://www.youtube.com/',
-                try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], str))
-        return {k: v for k, v in uploader.items() if v is not None}
+        for tab_renderer in tabs:
+            if tab_renderer.get('selected'):
+                return tab_renderer
+        if fatal:
+            raise ExtractorError('Unable to find selected tab')
+
+    @staticmethod
+    def _extract_tab_renderers(response):
+        return traverse_obj(
+            response, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs', ..., ('tabRenderer', 'expandableTabRenderer')), expected_type=dict)
 
     def _extract_from_tabs(self, item_id, ytcfg, data, tabs):
-        playlist_id = title = description = channel_url = channel_name = channel_id = None
-        tags = []
+        metadata = self._extract_metadata_from_tabs(item_id, data)
 
         selected_tab = self._extract_selected_tab(tabs)
-        primary_sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
-        renderer = try_get(
-            data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
-        if renderer:
-            channel_name = renderer.get('title')
-            channel_url = renderer.get('channelUrl')
-            channel_id = renderer.get('externalId')
-        else:
-            renderer = try_get(
-                data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
+        metadata['title'] += format_field(selected_tab, 'title', ' - %s')
+        metadata['title'] += format_field(selected_tab, 'expandedText', ' - %s')
 
-        if renderer:
-            title = renderer.get('title')
-            description = renderer.get('description', '')
-            playlist_id = channel_id
-            tags = renderer.get('keywords', '').split()
+        return self.playlist_result(
+            self._entries(
+                selected_tab, metadata['id'], ytcfg,
+                self._extract_account_syncid(ytcfg, data),
+                self._extract_visitor_data(data, ytcfg)),
+            **metadata)
+
+    def _extract_metadata_from_tabs(self, item_id, data):
+        info = {'id': item_id}
+
+        metadata_renderer = traverse_obj(data, ('metadata', 'channelMetadataRenderer'), expected_type=dict)
+        if metadata_renderer:
+            info.update({
+                'uploader': metadata_renderer.get('title'),
+                'uploader_id': metadata_renderer.get('externalId'),
+                'uploader_url': metadata_renderer.get('channelUrl'),
+            })
+            if info['uploader_id']:
+                info['id'] = info['uploader_id']
+        else:
+            metadata_renderer = traverse_obj(data, ('metadata', 'playlistMetadataRenderer'), expected_type=dict)
 
         # We can get the uncropped banner/avatar by replacing the crop params with '=s0'
         # See: https://github.com/yt-dlp/yt-dlp/issues/2237#issuecomment-1013694714
         def _get_uncropped(url):
             return url_or_none((url or '').split('=')[0] + '=s0')
 
-        avatar_thumbnails = self._extract_thumbnails(renderer, 'avatar')
+        avatar_thumbnails = self._extract_thumbnails(metadata_renderer, 'avatar')
         if avatar_thumbnails:
             uncropped_avatar = _get_uncropped(avatar_thumbnails[0]['url'])
             if uncropped_avatar:
@@ -4593,7 +4864,7 @@ def _get_uncropped(url):
                 })
 
         channel_banners = self._extract_thumbnails(
-            data, ('header', ..., ['banner', 'mobileBanner', 'tvBanner']))
+            data, ('header', ..., ('banner', 'mobileBanner', 'tvBanner')))
         for banner in channel_banners:
             banner['preference'] = -10
 
@@ -4606,46 +4877,64 @@ def _get_uncropped(url):
                     'preference': -5
                 })
 
+        # Deprecated - remove primary_sidebar_renderer when layout discontinued
+        primary_sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
+        playlist_header_renderer = traverse_obj(data, ('header', 'playlistHeaderRenderer'), expected_type=dict)
+
         primary_thumbnails = self._extract_thumbnails(
             primary_sidebar_renderer, ('thumbnailRenderer', ('playlistVideoThumbnailRenderer', 'playlistCustomThumbnailRenderer'), 'thumbnail'))
+        playlist_thumbnails = self._extract_thumbnails(
+            playlist_header_renderer, ('playlistHeaderBanner', 'heroPlaylistThumbnailRenderer', 'thumbnail'))
 
-        if playlist_id is None:
-            playlist_id = item_id
-
-        playlist_stats = traverse_obj(primary_sidebar_renderer, 'stats')
-        last_updated_unix = self._parse_time_text(self._get_text(playlist_stats, 2))
-        if title is None:
-            title = self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag')) or playlist_id
-        title += format_field(selected_tab, 'title', ' - %s')
-        title += format_field(selected_tab, 'expandedText', ' - %s')
-
-        metadata = {
-            'playlist_id': playlist_id,
-            'playlist_title': title,
-            'playlist_description': description,
-            'uploader': channel_name,
-            'uploader_id': channel_id,
-            'uploader_url': channel_url,
-            'thumbnails': primary_thumbnails + avatar_thumbnails + channel_banners,
-            'tags': tags,
-            'view_count': self._get_count(playlist_stats, 1),
+        info.update({
+            'title': (traverse_obj(metadata_renderer, 'title')
+                      or self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag'))
+                      or info['id']),
             'availability': self._extract_availability(data),
-            'modified_date': strftime_or_none(last_updated_unix, '%Y%m%d'),
-            'playlist_count': self._get_count(playlist_stats, 0),
             'channel_follower_count': self._get_count(data, ('header', ..., 'subscriberCountText')),
-        }
-        if not channel_id:
-            metadata.update(self._extract_uploader(data))
-        metadata.update({
-            'channel': metadata['uploader'],
-            'channel_id': metadata['uploader_id'],
-            'channel_url': metadata['uploader_url']})
-        return self.playlist_result(
-            self._entries(
-                selected_tab, playlist_id, ytcfg,
-                self._extract_account_syncid(ytcfg, data),
-                self._extract_visitor_data(data, ytcfg)),
-            **metadata)
+            'description': try_get(metadata_renderer, lambda x: x.get('description', '')),
+            'tags': try_get(metadata_renderer or {}, lambda x: x.get('keywords', '').split()),
+            'thumbnails': (primary_thumbnails or playlist_thumbnails) + avatar_thumbnails + channel_banners,
+        })
+
+        # Playlist stats is a text runs array containing [video count, view count, last updated].
+        # last updated or (view count and last updated) may be missing.
+        playlist_stats = get_first(
+            (primary_sidebar_renderer, playlist_header_renderer), (('stats', 'briefStats', 'numVideosText'), ))
+
+        last_updated_unix = self._parse_time_text(
+            self._get_text(playlist_stats, 2)  # deprecated, remove when old layout discontinued
+            or self._get_text(playlist_header_renderer, ('byline', 1, 'playlistBylineRenderer', 'text')))
+        info['modified_date'] = strftime_or_none(last_updated_unix, '%Y%m%d')
+
+        info['view_count'] = self._get_count(playlist_stats, 1)
+        if info['view_count'] is None:  # 0 is allowed
+            info['view_count'] = self._get_count(playlist_header_renderer, 'viewCountText')
+
+        info['playlist_count'] = self._get_count(playlist_stats, 0)
+        if info['playlist_count'] is None:  # 0 is allowed
+            info['playlist_count'] = self._get_count(playlist_header_renderer, ('byline', 0, 'playlistBylineRenderer', 'text'))
+
+        if not info.get('uploader_id'):
+            owner = traverse_obj(playlist_header_renderer, 'ownerText')
+            if not owner:  # Deprecated
+                owner = traverse_obj(
+                    self._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer'),
+                    ('videoOwner', 'videoOwnerRenderer', 'title'))
+            owner_text = self._get_text(owner)
+            browse_ep = traverse_obj(owner, ('runs', 0, 'navigationEndpoint', 'browseEndpoint')) or {}
+            info.update({
+                'uploader': self._search_regex(r'^by (.+) and \d+ others?$', owner_text, 'uploader', default=owner_text),
+                'uploader_id': browse_ep.get('browseId'),
+                'uploader_url': urljoin('https://www.youtube.com', browse_ep.get('canonicalBaseUrl'))
+            })
+
+        info.update({
+            'channel': info['uploader'],
+            'channel_id': info['uploader_id'],
+            'channel_url': info['uploader_url']
+        })
+        return info
 
     def _extract_inline_playlist(self, playlist, playlist_id, data, ytcfg):
         first_id = last_id = response = None
@@ -4707,19 +4996,21 @@ def _extract_availability(self, data):
         Note: Unless YouTube tells us explicitly, we do not assume it is public
         @param data: response
         """
-        renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {}
+        sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {}
+        playlist_header_renderer = traverse_obj(data, ('header', 'playlistHeaderRenderer')) or {}
+        player_header_privacy = playlist_header_renderer.get('privacy')
 
-        player_header_privacy = traverse_obj(
-            data, ('header', 'playlistHeaderRenderer', 'privacy'), expected_type=str)
-
-        badges = self._extract_badges(renderer)
+        badges = self._extract_badges(sidebar_renderer)
 
         # Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge
-        privacy_setting_icon = traverse_obj(
-            renderer, (
-                'privacyForm', 'dropdownFormFieldRenderer', 'dropdown', 'dropdownRenderer', 'entries',
-                lambda _, v: v['privacyDropdownItemRenderer']['isSelected'], 'privacyDropdownItemRenderer', 'icon', 'iconType'),
-            get_all=False, expected_type=str)
+        privacy_setting_icon = get_first(
+            (playlist_header_renderer, sidebar_renderer),
+            ('privacyForm', 'dropdownFormFieldRenderer', 'dropdown', 'dropdownRenderer', 'entries',
+             lambda _, v: v['privacyDropdownItemRenderer']['isSelected'], 'privacyDropdownItemRenderer', 'icon', 'iconType'),
+            expected_type=str)
+
+        microformats_is_unlisted = traverse_obj(
+            data, ('microformat', 'microformatDataRenderer', 'unlisted'), expected_type=bool)
 
         return (
             'public' if (
@@ -4734,7 +5025,8 @@ def _extract_availability(self, data):
                 is_unlisted=(
                     self._has_badge(badges, BadgeType.AVAILABILITY_UNLISTED)
                     or player_header_privacy == 'UNLISTED' if player_header_privacy is not None
-                    else privacy_setting_icon == 'PRIVACY_UNLISTED' if privacy_setting_icon is not None else None),
+                    else privacy_setting_icon == 'PRIVACY_UNLISTED' if privacy_setting_icon is not None
+                    else microformats_is_unlisted if microformats_is_unlisted is not None else None),
                 needs_subscription=self._has_badge(badges, BadgeType.AVAILABILITY_SUBSCRIPTION) or None,
                 needs_premium=self._has_badge(badges, BadgeType.AVAILABILITY_PREMIUM) or None,
                 needs_auth=False))
@@ -4750,39 +5042,23 @@ def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict):
 
     def _reload_with_unavailable_videos(self, item_id, data, ytcfg):
         """
-        Get playlist with unavailable videos if the 'show unavailable videos' button exists.
+        Reload playlists with unavailable videos (e.g. private videos, region blocked, etc.)
         """
-        browse_id = params = None
-        renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
-        if not renderer:
+        is_playlist = bool(traverse_obj(
+            data, ('metadata', 'playlistMetadataRenderer'), ('header', 'playlistHeaderRenderer')))
+        if not is_playlist:
             return
-        menu_renderer = try_get(
-            renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
-        for menu_item in menu_renderer:
-            if not isinstance(menu_item, dict):
-                continue
-            nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
-            text = try_get(
-                nav_item_renderer, lambda x: x['text']['simpleText'], str)
-            if not text or text.lower() != 'show unavailable videos':
-                continue
-            browse_endpoint = try_get(
-                nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
-            browse_id = browse_endpoint.get('browseId')
-            params = browse_endpoint.get('params')
-            break
-
         headers = self.generate_api_headers(
             ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
             visitor_data=self._extract_visitor_data(data, ytcfg))
         query = {
-            'params': params or 'wgYCCAA=',
-            'browseId': browse_id or 'VL%s' % item_id
+            'params': 'wgYCCAA=',
+            'browseId': f'VL{item_id}'
         }
         return self._extract_response(
             item_id=item_id, headers=headers, query=query,
             check_get_keys='contents', fatal=False, ytcfg=ytcfg,
-            note='Downloading API JSON with unavailable videos')
+            note='Redownloading playlist API JSON with unavailable videos')
 
     @functools.cached_property
     def skip_webpage(self):
@@ -4834,8 +5110,7 @@ def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=Fals
             webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal)
             ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage)
             # Reject webpage data if redirected to home page without explicitly requesting
-            selected_tab = self._extract_selected_tab(traverse_obj(
-                data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list, default=[]), fatal=False) or {}
+            selected_tab = self._extract_selected_tab(self._extract_tab_renderers(data), fatal=False) or {}
             if (url != 'https://www.youtube.com/feed/recommended'
                     and selected_tab.get('tabIdentifier') == 'FEwhat_to_watch'  # Home page
                     and 'no-youtube-channel-redirect' not in self.get_param('compat_opts', [])):
@@ -4906,7 +5181,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
     IE_DESC = 'YouTube Tabs'
     _VALID_URL = r'''(?x:
         https?://
-            (?:\w+\.)?
+            (?!consent\.)(?:\w+\.)?
             (?:
                 youtube(?:kids)?\.com|
                 %(invidious)s
@@ -5280,6 +5555,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
             'channel_url': 'https://www.youtube.com/user/Computerphile',
             'channel': 'Computerphile',
             'availability': 'public',
+            'modified_date': '20190712',
         },
         'playlist_mincount': 11,
     }, {
@@ -5328,18 +5604,19 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
             'categories': ['News & Politics'],
             'tags': list,
             'like_count': int,
-            'release_timestamp': 1642502819,
+            'release_timestamp': int,
             'channel': 'Sky News',
             'channel_id': 'UCoMdktPbSTixAyNGwb-UYkQ',
             'age_limit': 0,
             'view_count': int,
-            'thumbnail': 'https://i.ytimg.com/vi/GgL890LIznQ/maxresdefault_live.jpg',
+            'thumbnail': r're:https?://i\.ytimg\.com/vi/[^/]+/maxresdefault(?:_live)?\.jpg',
             'playable_in_embed': True,
-            'release_date': '20220118',
+            'release_date': r're:\d+',
             'availability': 'public',
             'live_status': 'is_live',
             'channel_url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ',
-            'channel_follower_count': int
+            'channel_follower_count': int,
+            'concurrent_view_count': int,
         },
         'params': {
             'skip_download': True,
@@ -5417,7 +5694,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
             'title': '#cctv9',
             'tags': [],
         },
-        'playlist_mincount': 350,
+        'playlist_mincount': 300,  # not consistent but should be over 300
     }, {
         'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
         'only_matching': True,
@@ -5468,22 +5745,16 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
             'description': '',
             'availability': 'public',
         },
-        'expected_warnings': [
-            'The URL does not have a videos tab',
-            r'[Uu]navailable videos (are|will be) hidden',
-        ],
         'playlist_mincount': 101,
     }, {
-        'note': 'Topic without a UU playlist',
+        # Destination channel with only a hidden self tab (tab id is UCtFRv9O2AHqOZjjynzrv-xg)
+        # Treat as a general feed
         'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg',
         'info_dict': {
             'id': 'UCtFRv9O2AHqOZjjynzrv-xg',
             'title': 'UCtFRv9O2AHqOZjjynzrv-xg',
             'tags': [],
         },
-        'expected_warnings': [
-            'the playlist redirect gave error',
-        ],
         'playlist_mincount': 9,
     }, {
         'note': 'Youtube music Album',
@@ -5551,6 +5822,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
             'skip_download': True,
             'extractor_args': {'youtubetab': {'skip': ['webpage']}}
         },
+        'skip': 'Query for sorting no longer works',
     }, {
         'note': 'API Fallback: Topic, should redirect to playlist?list=UU...',
         'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
@@ -5569,10 +5841,6 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
             'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw',
             'availability': 'public',
         },
-        'expected_warnings': [
-            'does not have a videos tab',
-            r'[Uu]navailable videos (are|will be) hidden',
-        ],
         'playlist_mincount': 101,
         'params': {
             'skip_download': True,
@@ -5609,7 +5877,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
             'tags': [],
             'uploader_id': 'UCiu-3thuViMebBjw_5nWYrA',
             'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA',
-            'description': '',
+            'description': 'test description',
             'title': 'cole-dlp-test-acc - 再生リスト',
             'uploader_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA',
             'uploader': 'cole-dlp-test-acc',
@@ -5650,13 +5918,254 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
         },
         'playlist_mincount': 50,
 
+    }, {
+        # Channel with a real live tab (not to be mistaken with streams tab)
+        # Do not treat like it should redirect to live stream
+        'url': 'https://www.youtube.com/channel/UCEH7P7kyJIkS_gJf93VYbmg/live',
+        'info_dict': {
+            'id': 'UCEH7P7kyJIkS_gJf93VYbmg',
+            'title': 'UCEH7P7kyJIkS_gJf93VYbmg - Live',
+            'tags': [],
+        },
+        'playlist_mincount': 20,
+    }, {
+        # Tab name is not the same as tab id
+        'url': 'https://www.youtube.com/channel/UCQvWX73GQygcwXOTSf_VDVg/letsplay',
+        'info_dict': {
+            'id': 'UCQvWX73GQygcwXOTSf_VDVg',
+            'title': 'UCQvWX73GQygcwXOTSf_VDVg - Let\'s play',
+            'tags': [],
+        },
+        'playlist_mincount': 8,
+    }, {
+        # Home tab id is literally home. Not to get mistaken with featured
+        'url': 'https://www.youtube.com/channel/UCQvWX73GQygcwXOTSf_VDVg/home',
+        'info_dict': {
+            'id': 'UCQvWX73GQygcwXOTSf_VDVg',
+            'title': 'UCQvWX73GQygcwXOTSf_VDVg - Home',
+            'tags': [],
+        },
+        'playlist_mincount': 8,
+    }, {
+        # Should get three playlists for videos, shorts and streams tabs
+        'url': 'https://www.youtube.com/channel/UCK9V2B22uJYu3N7eR_BT9QA',
+        'info_dict': {
+            'id': 'UCK9V2B22uJYu3N7eR_BT9QA',
+            'title': 'Polka Ch. 尾丸ポルカ',
+            'channel_follower_count': int,
+            'channel_id': 'UCK9V2B22uJYu3N7eR_BT9QA',
+            'channel_url': 'https://www.youtube.com/channel/UCK9V2B22uJYu3N7eR_BT9QA',
+            'uploader': 'Polka Ch. 尾丸ポルカ',
+            'description': 'md5:3b8df1ac5af337aa206e37ee3d181ec9',
+            'channel': 'Polka Ch. 尾丸ポルカ',
+            'tags': 'count:35',
+            'uploader_url': 'https://www.youtube.com/channel/UCK9V2B22uJYu3N7eR_BT9QA',
+            'uploader_id': 'UCK9V2B22uJYu3N7eR_BT9QA',
+        },
+        'playlist_count': 3,
+    }, {
+        # Shorts tab with channel with handle
+        'url': 'https://www.youtube.com/@NotJustBikes/shorts',
+        'info_dict': {
+            'id': 'UC0intLFzLaudFG-xAvUEO-A',
+            'title': 'Not Just Bikes - Shorts',
+            'tags': 'count:12',
+            'uploader': 'Not Just Bikes',
+            'channel_url': 'https://www.youtube.com/channel/UC0intLFzLaudFG-xAvUEO-A',
+            'description': 'md5:7513148b1f02b924783157d84c4ea555',
+            'channel_follower_count': int,
+            'uploader_id': 'UC0intLFzLaudFG-xAvUEO-A',
+            'channel_id': 'UC0intLFzLaudFG-xAvUEO-A',
+            'uploader_url': 'https://www.youtube.com/channel/UC0intLFzLaudFG-xAvUEO-A',
+            'channel': 'Not Just Bikes',
+        },
+        'playlist_mincount': 10,
+    }, {
+        # Streams tab
+        'url': 'https://www.youtube.com/channel/UC3eYAvjCVwNHgkaGbXX3sig/streams',
+        'info_dict': {
+            'id': 'UC3eYAvjCVwNHgkaGbXX3sig',
+            'title': '中村悠一 - Live',
+            'tags': 'count:7',
+            'channel_id': 'UC3eYAvjCVwNHgkaGbXX3sig',
+            'channel_url': 'https://www.youtube.com/channel/UC3eYAvjCVwNHgkaGbXX3sig',
+            'uploader_id': 'UC3eYAvjCVwNHgkaGbXX3sig',
+            'channel': '中村悠一',
+            'uploader_url': 'https://www.youtube.com/channel/UC3eYAvjCVwNHgkaGbXX3sig',
+            'channel_follower_count': int,
+            'uploader': '中村悠一',
+            'description': 'md5:e744f6c93dafa7a03c0c6deecb157300',
+        },
+        'playlist_mincount': 60,
+    }, {
+        # Channel with no uploads and hence no videos, streams, shorts tabs or uploads playlist. This should fail.
+        # See test_youtube_lists
+        'url': 'https://www.youtube.com/channel/UC2yXPzFejc422buOIzn_0CA',
+        'only_matching': True,
+    }, {
+        # No uploads and no UCID given. Should fail with no uploads error
+        # See test_youtube_lists
+        'url': 'https://www.youtube.com/news',
+        'only_matching': True
+    }, {
+        # No videos tab but has a shorts tab
+        'url': 'https://www.youtube.com/c/TKFShorts',
+        'info_dict': {
+            'id': 'UCgJ5_1F6yJhYLnyMszUdmUg',
+            'title': 'Shorts Break - Shorts',
+            'tags': 'count:32',
+            'channel_id': 'UCgJ5_1F6yJhYLnyMszUdmUg',
+            'channel': 'Shorts Break',
+            'description': 'md5:a6c234cf3d50d878ef8721e34457cd11',
+            'uploader': 'Shorts Break',
+            'channel_follower_count': int,
+            'uploader_id': 'UCgJ5_1F6yJhYLnyMszUdmUg',
+            'uploader_url': 'https://www.youtube.com/channel/UCgJ5_1F6yJhYLnyMszUdmUg',
+            'channel_url': 'https://www.youtube.com/channel/UCgJ5_1F6yJhYLnyMszUdmUg',
+        },
+        'playlist_mincount': 30,
+    }, {
+        # Trending Now Tab. tab id is empty
+        'url': 'https://www.youtube.com/feed/trending',
+        'info_dict': {
+            'id': 'trending',
+            'title': 'trending - Now',
+            'tags': [],
+        },
+        'playlist_mincount': 30,
+    }, {
+        # Trending Gaming Tab. tab id is empty
+        'url': 'https://www.youtube.com/feed/trending?bp=4gIcGhpnYW1pbmdfY29ycHVzX21vc3RfcG9wdWxhcg%3D%3D',
+        'info_dict': {
+            'id': 'trending',
+            'title': 'trending - Gaming',
+            'tags': [],
+        },
+        'playlist_mincount': 30,
+    }, {
+        # Shorts url result in shorts tab
+        'url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA/shorts',
+        'info_dict': {
+            'id': 'UCiu-3thuViMebBjw_5nWYrA',
+            'title': 'cole-dlp-test-acc - Shorts',
+            'uploader_id': 'UCiu-3thuViMebBjw_5nWYrA',
+            'channel': 'cole-dlp-test-acc',
+            'description': 'test description',
+            'channel_id': 'UCiu-3thuViMebBjw_5nWYrA',
+            'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA',
+            'tags': [],
+            'uploader': 'cole-dlp-test-acc',
+            'uploader_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA',
+
+        },
+        'playlist': [{
+            'info_dict': {
+                '_type': 'url',
+                'ie_key': 'Youtube',
+                'url': 'https://www.youtube.com/shorts/sSM9J5YH_60',
+                'id': 'sSM9J5YH_60',
+                'channel_id': 'UCiu-3thuViMebBjw_5nWYrA',
+                'title': 'SHORT short',
+                'channel': 'cole-dlp-test-acc',
+                'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA',
+                'view_count': int,
+                'thumbnails': list,
+            }
+        }],
+        'params': {'extract_flat': True},
+    }, {
+        # Live video status should be extracted
+        'url': 'https://www.youtube.com/channel/UCQvWX73GQygcwXOTSf_VDVg/live',
+        'info_dict': {
+            'id': 'UCQvWX73GQygcwXOTSf_VDVg',
+            'title': 'UCQvWX73GQygcwXOTSf_VDVg - Live',  # TODO, should be Minecraft - Live or Minecraft - Topic - Live
+            'tags': []
+        },
+        'playlist': [{
+            'info_dict': {
+                '_type': 'url',
+                'ie_key': 'Youtube',
+                'url': 'startswith:https://www.youtube.com/watch?v=',
+                'id': str,
+                'title': str,
+                'live_status': 'is_live',
+                'channel_id': str,
+                'channel_url': str,
+                'concurrent_view_count': int,
+                'channel': str,
+            }
+        }],
+        'params': {'extract_flat': True, 'playlist_items': '1'},
+        'playlist_mincount': 1
+    }, {
+        # Channel renderer metadata. Contains number of videos on the channel
+        'url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA/channels',
+        'info_dict': {
+            'id': 'UCiu-3thuViMebBjw_5nWYrA',
+            'title': 'cole-dlp-test-acc - Channels',
+            'uploader_id': 'UCiu-3thuViMebBjw_5nWYrA',
+            'channel': 'cole-dlp-test-acc',
+            'description': 'test description',
+            'channel_id': 'UCiu-3thuViMebBjw_5nWYrA',
+            'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA',
+            'tags': [],
+            'uploader': 'cole-dlp-test-acc',
+            'uploader_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA',
+
+        },
+        'playlist': [{
+            'info_dict': {
+                '_type': 'url',
+                'ie_key': 'YoutubeTab',
+                'url': 'https://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw',
+                'id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw',
+                'channel_id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw',
+                'title': 'PewDiePie',
+                'channel': 'PewDiePie',
+                'channel_url': 'https://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw',
+                'thumbnails': list,
+                'channel_follower_count': int,
+                'playlist_count': int
+            }
+        }],
+        'params': {'extract_flat': True},
     }]
 
     @classmethod
     def suitable(cls, url):
         return False if YoutubeIE.suitable(url) else super().suitable(url)
 
-    _URL_RE = re.compile(rf'(?P<pre>{_VALID_URL})(?(not_channel)|(?P<tab>/\w+))?(?P<post>.*)$')
+    _URL_RE = re.compile(rf'(?P<pre>{_VALID_URL})(?(not_channel)|(?P<tab>/[^?#/]+))?(?P<post>.*)$')
+
+    def _get_url_mobj(self, url):
+        mobj = self._URL_RE.match(url).groupdict()
+        mobj.update((k, '') for k, v in mobj.items() if v is None)
+        return mobj
+
+    def _extract_tab_id_and_name(self, tab, base_url='https://www.youtube.com'):
+        tab_name = (tab.get('title') or '').lower()
+        tab_url = urljoin(base_url, traverse_obj(
+            tab, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url')))
+
+        tab_id = (tab_url and self._get_url_mobj(tab_url)['tab'][1:]
+                  or traverse_obj(tab, 'tabIdentifier', expected_type=str))
+        if tab_id:
+            return {
+                'TAB_ID_SPONSORSHIPS': 'membership',
+            }.get(tab_id, tab_id), tab_name
+
+        # Fallback to tab name if we cannot get the tab id.
+        # XXX: should we strip non-ascii letters? e.g. in case of 'let's play' tab example on special gaming channel
+        # Note that in the case of translated tab name this may result in an empty string, which we don't want.
+        if tab_name:
+            self.write_debug(f'Falling back to selected tab name: {tab_name}')
+        return {
+            'home': 'featured',
+            'live': 'streams',
+        }.get(tab_name, tab_name), tab_name
+
+    def _has_tab(self, tabs, tab_id):
+        return any(self._extract_tab_id_and_name(tab)[0] == tab_id for tab in tabs)
 
     @YoutubeTabBaseInfoExtractor.passthrough_smuggled_data
     def _real_extract(self, url, smuggled_data):
@@ -5665,123 +6174,135 @@ def _real_extract(self, url, smuggled_data):
             urllib.parse.urlparse(url)._replace(netloc='www.youtube.com'))
         compat_opts = self.get_param('compat_opts', [])
 
-        def get_mobj(url):
-            mobj = self._URL_RE.match(url).groupdict()
-            mobj.update((k, '') for k, v in mobj.items() if v is None)
-            return mobj
-
-        mobj, redirect_warning = get_mobj(url), None
-        # Youtube returns incomplete data if tabname is not lower case
-        pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
-        if is_channel:
-            if smuggled_data.get('is_music_url'):
-                if item_id[:2] == 'VL':  # Youtube music VL channels have an equivalent playlist
-                    item_id = item_id[2:]
-                    pre, tab, post, is_channel = f'https://www.youtube.com/playlist?list={item_id}', '', '', False
-                elif item_id[:2] == 'MP':  # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
-                    mdata = self._extract_tab_endpoint(
-                        f'https://music.youtube.com/channel/{item_id}', item_id, default_client='web_music')
-                    murl = traverse_obj(mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'),
-                                        get_all=False, expected_type=str)
-                    if not murl:
-                        raise ExtractorError('Failed to resolve album to playlist')
-                    return self.url_result(murl, ie=YoutubeTabIE.ie_key())
-                elif mobj['channel_type'] == 'browse':  # Youtube music /browse/ should be changed to /channel/
-                    pre = f'https://www.youtube.com/channel/{item_id}'
-
-        original_tab_name = tab
+        mobj = self._get_url_mobj(url)
+        pre, tab, post, is_channel = mobj['pre'], mobj['tab'], mobj['post'], not mobj['not_channel']
+        if is_channel and smuggled_data.get('is_music_url'):
+            if item_id[:2] == 'VL':  # Youtube music VL channels have an equivalent playlist
+                return self.url_result(
+                    f'https://music.youtube.com/playlist?list={item_id[2:]}', YoutubeTabIE, item_id[2:])
+            elif item_id[:2] == 'MP':  # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
+                mdata = self._extract_tab_endpoint(
+                    f'https://music.youtube.com/channel/{item_id}', item_id, default_client='web_music')
+                murl = traverse_obj(mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'),
+                                    get_all=False, expected_type=str)
+                if not murl:
+                    raise ExtractorError('Failed to resolve album to playlist')
+                return self.url_result(murl, YoutubeTabIE)
+            elif mobj['channel_type'] == 'browse':  # Youtube music /browse/ should be changed to /channel/
+                return self.url_result(
+                    f'https://music.youtube.com/channel/{item_id}{tab}{post}', YoutubeTabIE, item_id)
+
+        original_tab_id, display_id = tab[1:], f'{item_id}{tab}'
         if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
-            # Home URLs should redirect to /videos/
-            redirect_warning = ('A channel/user page was given. All the channel\'s videos will be downloaded. '
-                                'To download only the videos in the home page, add a "/featured" to the URL')
-            tab = '/videos'
-
-        url = ''.join((pre, tab, post))
-        mobj = get_mobj(url)
+            url = f'{pre}/videos{post}'
 
         # Handle both video/playlist URLs
         qs = parse_qs(url)
-        video_id, playlist_id = (qs.get(key, [None])[0] for key in ('v', 'list'))
-
+        video_id, playlist_id = [traverse_obj(qs, (key, 0)) for key in ('v', 'list')]
         if not video_id and mobj['not_channel'].startswith('watch'):
             if not playlist_id:
                 # If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
-                raise ExtractorError('Unable to recognize tab page')
+                raise ExtractorError('A video URL was given without video ID', expected=True)
             # Common mistake: https://www.youtube.com/watch?list=playlist_id
             self.report_warning(f'A video URL was given without video ID. Trying to download playlist {playlist_id}')
-            url = f'https://www.youtube.com/playlist?list={playlist_id}'
-            mobj = get_mobj(url)
+            return self.url_result(
+                f'https://www.youtube.com/playlist?list={playlist_id}', YoutubeTabIE, playlist_id)
 
-        if video_id and playlist_id:
-            if self.get_param('noplaylist'):
-                self.to_screen(f'Downloading just video {video_id} because of --no-playlist')
-                return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
-                                       ie=YoutubeIE.ie_key(), video_id=video_id)
-            self.to_screen(f'Downloading playlist {playlist_id}; add --no-playlist to just download video {video_id}')
+        if not self._yes_playlist(playlist_id, video_id):
+            return self.url_result(
+                f'https://www.youtube.com/watch?v={video_id}', YoutubeIE, video_id)
 
-        data, ytcfg = self._extract_data(url, item_id)
+        data, ytcfg = self._extract_data(url, display_id)
 
         # YouTube may provide a non-standard redirect to the regional channel
         # See: https://github.com/yt-dlp/yt-dlp/issues/2694
+        # https://support.google.com/youtube/answer/2976814#zippy=,conditional-redirects
         redirect_url = traverse_obj(
             data, ('onResponseReceivedActions', ..., 'navigateAction', 'endpoint', 'commandMetadata', 'webCommandMetadata', 'url'), get_all=False)
         if redirect_url and 'no-youtube-channel-redirect' not in compat_opts:
-            redirect_url = ''.join((
-                urljoin('https://www.youtube.com', redirect_url), mobj['tab'], mobj['post']))
-            self.to_screen(f'This playlist is likely not available in your region. Following redirect to regional playlist {redirect_url}')
-            return self.url_result(redirect_url, ie=YoutubeTabIE.ie_key())
+            redirect_url = ''.join((urljoin('https://www.youtube.com', redirect_url), tab, post))
+            self.to_screen(f'This playlist is likely not available in your region. Following conditional redirect to {redirect_url}')
+            return self.url_result(redirect_url, YoutubeTabIE)
 
-        tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
-        if tabs:
+        tabs, extra_tabs = self._extract_tab_renderers(data), []
+        if is_channel and tabs and 'no-youtube-channel-redirect' not in compat_opts:
             selected_tab = self._extract_selected_tab(tabs)
-            selected_tab_url = urljoin(
-                url, traverse_obj(selected_tab, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url')))
-            translated_tab_name = selected_tab.get('title', '').lower()
-
-            # Prefer tab name from tab url as it is always in en,
-            # but only when preferred lang is set as it may not extract reliably in all cases.
-            selected_tab_name = (self._preferred_lang in (None, 'en') and translated_tab_name
-                                 or selected_tab_url and get_mobj(selected_tab_url)['tab'][1:]  # primary
-                                 or translated_tab_name)
-
-            if selected_tab_name == 'home':
-                selected_tab_name = 'featured'
-            requested_tab_name = mobj['tab'][1:]
-
-            if 'no-youtube-channel-redirect' not in compat_opts:
-                if requested_tab_name == 'live':  # Live tab should have redirected to the video
-                    raise UserNotLive(video_id=mobj['id'])
-                if requested_tab_name not in ('', selected_tab_name):
-                    redirect_warning = f'The channel does not have a {requested_tab_name} tab'
-                    if not original_tab_name:
-                        if item_id[:2] == 'UC':
-                            # Topic channels don't have /videos. Use the equivalent playlist instead
-                            pl_id = f'UU{item_id[2:]}'
-                            pl_url = f'https://www.youtube.com/playlist?list={pl_id}'
-                            try:
-                                data, ytcfg = self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True, webpage_fatal=True)
-                            except ExtractorError:
-                                redirect_warning += ' and the playlist redirect gave error'
-                            else:
-                                item_id, url, selected_tab_name = pl_id, pl_url, requested_tab_name
-                                redirect_warning += f'. Redirecting to playlist {pl_id} instead'
-                        if selected_tab_name and selected_tab_name != requested_tab_name:
-                            redirect_warning += f'. {selected_tab_name} tab is being downloaded instead'
+            selected_tab_id, selected_tab_name = self._extract_tab_id_and_name(selected_tab, url)  # NB: Name may be translated
+            self.write_debug(f'Selected tab: {selected_tab_id!r} ({selected_tab_name}), Requested tab: {original_tab_id!r}')
+
+            if not original_tab_id and selected_tab_name:
+                self.to_screen('Downloading all uploads of the channel. '
+                               'To download only the videos in a specific tab, pass the tab\'s URL')
+                if self._has_tab(tabs, 'streams'):
+                    extra_tabs.append(''.join((pre, '/streams', post)))
+                if self._has_tab(tabs, 'shorts'):
+                    extra_tabs.append(''.join((pre, '/shorts', post)))
+                # XXX: Members-only tab should also be extracted
+
+                if not extra_tabs and selected_tab_id != 'videos':
+                    # Channel does not have streams, shorts or videos tabs
+                    if item_id[:2] != 'UC':
+                        raise ExtractorError('This channel has no uploads', expected=True)
+
+                    # Topic channels don't have /videos. Use the equivalent playlist instead
+                    pl_id = f'UU{item_id[2:]}'
+                    pl_url = f'https://www.youtube.com/playlist?list={pl_id}'
+                    try:
+                        data, ytcfg = self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True, webpage_fatal=True)
+                    except ExtractorError:
+                        raise ExtractorError('This channel has no uploads', expected=True)
                     else:
-                        raise ExtractorError(redirect_warning, expected=True)
-
-        if redirect_warning:
-            self.to_screen(redirect_warning)
-        self.write_debug(f'Final URL: {url}')
+                        item_id, url = pl_id, pl_url
+                        self.to_screen(
+                            f'The channel does not have a videos, shorts, or live tab. Redirecting to playlist {pl_id} instead')
+
+                elif extra_tabs and selected_tab_id != 'videos':
+                    # When there are shorts/live tabs but not videos tab
+                    url, data = f'{pre}{post}', None
+
+            elif (original_tab_id or 'videos') != selected_tab_id:
+                if original_tab_id == 'live':
+                    # Live tab should have redirected to the video
+                    # Except in the case the channel has an actual live tab
+                    # Example: https://www.youtube.com/channel/UCEH7P7kyJIkS_gJf93VYbmg/live
+                    raise UserNotLive(video_id=item_id)
+                elif selected_tab_name:
+                    raise ExtractorError(f'This channel does not have a {original_tab_id} tab', expected=True)
+
+                # For channels such as https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg
+                url = f'{pre}{post}'
 
         # YouTube sometimes provides a button to reload playlist with unavailable videos.
         if 'no-youtube-unavailable-videos' not in compat_opts:
-            data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
+            data = self._reload_with_unavailable_videos(display_id, data, ytcfg) or data
         self._extract_and_report_alerts(data, only_once=True)
-        tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
-        if tabs:
-            return self._extract_from_tabs(item_id, ytcfg, data, tabs)
 
+        tabs, entries = self._extract_tab_renderers(data), []
+        if tabs:
+            entries = [self._extract_from_tabs(item_id, ytcfg, data, tabs)]
+            entries[0].update({
+                'extractor_key': YoutubeTabIE.ie_key(),
+                'extractor': YoutubeTabIE.IE_NAME,
+                'webpage_url': url,
+            })
+        if self.get_param('playlist_items') == '0':
+            entries.extend(self.url_result(u, YoutubeTabIE) for u in extra_tabs)
+        else:  # Users expect to get all `video_id`s even with `--flat-playlist`. So don't return `url_result`
+            entries.extend(map(self._real_extract, extra_tabs))
+
+        if len(entries) == 1:
+            return entries[0]
+        elif entries:
+            metadata = self._extract_metadata_from_tabs(item_id, data)
+            uploads_url = 'the Uploads (UU) playlist URL'
+            if try_get(metadata, lambda x: x['channel_id'].startswith('UC')):
+                uploads_url = f'https://www.youtube.com/playlist?list=UU{metadata["channel_id"][2:]}'
+            self.to_screen(
+                'Downloading as multiple playlists, separated by tabs. '
+                f'To download as a single playlist instead, pass {uploads_url}')
+            return self.playlist_result(entries, item_id, **metadata)
+
+        # Inline playlist
         playlist = traverse_obj(
             data, ('contents', 'twoColumnWatchNextResults', 'playlist', 'playlist'), expected_type=dict)
         if playlist:
@@ -5790,10 +6311,9 @@ def get_mobj(url):
         video_id = traverse_obj(
             data, ('currentVideoEndpoint', 'watchEndpoint', 'videoId'), expected_type=str) or video_id
         if video_id:
-            if mobj['tab'] != '/live':  # live tab is expected to redirect to video
+            if tab != '/live':  # live tab is expected to redirect to video
                 self.report_warning(f'Unable to recognize playlist. Downloading just video {video_id}')
-            return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
-                                   ie=YoutubeIE.ie_key(), video_id=video_id)
+            return self.url_result(f'https://www.youtube.com/watch?v={video_id}', YoutubeIE, video_id)
 
         raise ExtractorError('Unable to recognize tab page')
 
@@ -5826,12 +6346,13 @@ class YoutubePlaylistIE(InfoExtractor):
             'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
             'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2',
             'view_count': int,
-            'uploader_url': 'https://www.youtube.com/user/Wickydoo',
+            'uploader_url': 'https://www.youtube.com/c/WickmanVT',
             'modified_date': r're:\d{8}',
             'channel_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
             'channel': 'Wickman',
             'tags': [],
-            'channel_url': 'https://www.youtube.com/user/Wickydoo',
+            'channel_url': 'https://www.youtube.com/c/WickmanVT',
+            'availability': 'public',
         },
         'playlist_mincount': 29,
     }, {
@@ -5861,7 +6382,7 @@ class YoutubePlaylistIE(InfoExtractor):
             'uploader_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
             'availability': 'public',
         },
-        'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
+        'expected_warnings': [r'[Uu]navailable videos? (is|are|will be) hidden'],
     }, {
         'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
         'playlist_mincount': 455,
@@ -5988,9 +6509,7 @@ class YoutubeYtUserIE(InfoExtractor):
 
     def _real_extract(self, url):
         user_id = self._match_id(url)
-        return self.url_result(
-            'https://www.youtube.com/user/%s/videos' % user_id,
-            ie=YoutubeTabIE.ie_key(), video_id=user_id)
+        return self.url_result(f'https://www.youtube.com/user/{user_id}', YoutubeTabIE, user_id)
 
 
 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
@@ -6067,9 +6586,9 @@ def _extract_notification_renderer(self, notification):
         title = self._search_regex(
             rf'{re.escape(channel or "")}[^:]+: (.+)', notification_title,
             'video title', default=None)
-        upload_date = (strftime_or_none(self._parse_time_text(self._get_text(notification, 'sentTimeText')), '%Y%m%d')
-                       if self._configuration_arg('approximate_date', ie_key=YoutubeTabIE.ie_key())
-                       else None)
+        timestamp = (self._parse_time_text(self._get_text(notification, 'sentTimeText'))
+                     if self._configuration_arg('approximate_date', ie_key=YoutubeTabIE)
+                     else None)
         return {
             '_type': 'url',
             'url': url,
@@ -6079,7 +6598,7 @@ def _extract_notification_renderer(self, notification):
             'channel_id': channel_id,
             'channel': channel,
             'thumbnails': self._extract_thumbnails(notification, 'videoThumbnail'),
-            'upload_date': upload_date,
+            'timestamp': timestamp,
         }
 
     def _notification_menu_entries(self, ytcfg):
@@ -6163,6 +6682,30 @@ class YoutubeSearchURLIE(YoutubeTabBaseInfoExtractor):
             #     'title': '#cats',
             # }],
         },
+    }, {
+        # Channel results
+        'url': 'https://www.youtube.com/results?search_query=kurzgesagt&sp=EgIQAg%253D%253D',
+        'info_dict': {
+            'id': 'kurzgesagt',
+            'title': 'kurzgesagt',
+        },
+        'playlist': [{
+            'info_dict': {
+                '_type': 'url',
+                'id': 'UCsXVk37bltHxD1rDPwtNM8Q',
+                'url': 'https://www.youtube.com/channel/UCsXVk37bltHxD1rDPwtNM8Q',
+                'ie_key': 'YoutubeTab',
+                'channel': 'Kurzgesagt – In a Nutshell',
+                'description': 'md5:4ae48dfa9505ffc307dad26342d06bfc',
+                'title': 'Kurzgesagt – In a Nutshell',
+                'channel_id': 'UCsXVk37bltHxD1rDPwtNM8Q',
+                'playlist_count': int,  # XXX: should have a way of saying > 1
+                'channel_url': 'https://www.youtube.com/channel/UCsXVk37bltHxD1rDPwtNM8Q',
+                'thumbnails': list
+            }
+        }],
+        'params': {'extract_flat': True, 'playlist_items': '1'},
+        'playlist_mincount': 1,
     }, {
         'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
         'only_matching': True,
@@ -6318,14 +6861,11 @@ def _real_extract(self, url):
 
 
 class YoutubeShortsAudioPivotIE(InfoExtractor):
-    IE_DESC = 'YouTube Shorts audio pivot (Shorts using audio of a given video); "ytshortsap:" prefix'
+    IE_DESC = 'YouTube Shorts audio pivot (Shorts using audio of a given video)'
     IE_NAME = 'youtube:shorts:pivot:audio'
-    _VALID_URL = f'(?x)^ytshortsap:{YoutubeIE._VALID_URL[5:]}'
+    _VALID_URL = r'https?://(?:www\.)?youtube\.com/source/(?P<id>[\w-]{11})/shorts'
     _TESTS = [{
-        'url': 'ytshortsap:https://www.youtube.com/shorts/Lyj-MZSAA9o?feature=share',
-        'only_matching': True,
-    }, {
-        'url': 'ytshortsap:Lyj-MZSAA9o',
+        'url': 'https://www.youtube.com/source/Lyj-MZSAA9o/shorts',
         'only_matching': True,
     }]
 
@@ -6451,6 +6991,51 @@ def _real_extract(self, url):
         }
 
 
+class YoutubeConsentRedirectIE(YoutubeBaseInfoExtractor):
+    IE_NAME = 'youtube:consent'
+    IE_DESC = False  # Do not list
+    _VALID_URL = r'https?://consent\.youtube\.com/m\?'
+    _TESTS = [{
+        'url': 'https://consent.youtube.com/m?continue=https%3A%2F%2Fwww.youtube.com%2Flive%2FqVv6vCqciTM%3Fcbrd%3D1&gl=NL&m=0&pc=yt&hl=en&src=1',
+        'info_dict': {
+            'id': 'qVv6vCqciTM',
+            'ext': 'mp4',
+            'age_limit': 0,
+            'uploader_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
+            'comment_count': int,
+            'chapters': 'count:13',
+            'upload_date': '20221223',
+            'thumbnail': 'https://i.ytimg.com/vi/qVv6vCqciTM/maxresdefault.jpg',
+            'channel_url': 'https://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
+            'uploader_url': 'http://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
+            'like_count': int,
+            'release_date': '20221223',
+            'tags': ['Vtuber', '月ノ美兎', '名取さな', 'にじさんじ', 'クリスマス', '3D配信'],
+            'title': '【 #インターネット女クリスマス 】3Dで歌ってはしゃぐインターネットの女たち【月ノ美兎/名取さな】',
+            'view_count': int,
+            'playable_in_embed': True,
+            'duration': 4438,
+            'availability': 'public',
+            'channel_follower_count': int,
+            'channel_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
+            'categories': ['Entertainment'],
+            'live_status': 'was_live',
+            'release_timestamp': 1671793345,
+            'channel': 'さなちゃんねる',
+            'description': 'md5:6aebf95cc4a1d731aebc01ad6cc9806d',
+            'uploader': 'さなちゃんねる',
+        },
+        'add_ie': ['Youtube'],
+        'params': {'skip_download': 'Youtube'},
+    }]
+
+    def _real_extract(self, url):
+        redirect_url = url_or_none(parse_qs(url).get('continue', [None])[-1])
+        if not redirect_url:
+            raise ExtractorError('Invalid cookie consent redirect URL', expected=True)
+        return self.url_result(redirect_url)
+
+
 class YoutubeTruncatedIDIE(InfoExtractor):
     IE_NAME = 'youtube:truncated_id'
     IE_DESC = False  # Do not list