X-Git-Url: https://jfr.im/git/yt-dlp.git/blobdiff_plain/a0c830f488170db9007979da0ba13ebf9ebad5b1..6dc00acf0f1f1107a626c21befd1691403e6aeeb:/yt_dlp/extractor/youtube.py diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index 4ad8cf900..80edcd77d 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -1,7 +1,9 @@ import base64 import calendar +import collections import copy import datetime +import enum import hashlib import itertools import json @@ -17,17 +19,20 @@ import urllib.parse from .common import InfoExtractor, SearchInfoExtractor +from .openload import PhantomJSwrapper from ..compat import functools from ..jsinterp import JSInterpreter from ..utils import ( NO_DEFAULT, ExtractorError, + LazyList, UserNotLive, bug_reports_message, classproperty, clean_html, datetime_from_str, dict_get, + filter_dict, float_or_none, format_field, get_first, @@ -61,6 +66,7 @@ variadic, ) +STREAMING_DATA_CLIENT_NAME = '__yt_dlp_client' # any clients starting with _ cannot be explicitly requested by the user INNERTUBE_CLIENTS = { 'web': { @@ -109,7 +115,9 @@ 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'ANDROID', - 'clientVersion': '17.28.34', + 'clientVersion': '17.31.35', + 'androidSdkVersion': 30, + 'userAgent': 'com.google.android.youtube/17.31.35 (Linux; U; Android 11) gzip' } }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 3, @@ -120,7 +128,9 @@ 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'ANDROID_EMBEDDED_PLAYER', - 'clientVersion': '17.28.34', + 'clientVersion': '17.31.35', + 'androidSdkVersion': 30, + 'userAgent': 'com.google.android.youtube/17.31.35 (Linux; U; Android 11) gzip' }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 55, @@ -132,6 +142,8 @@ 'client': { 'clientName': 'ANDROID_MUSIC', 'clientVersion': '5.16.51', + 'androidSdkVersion': 30, + 'userAgent': 'com.google.android.apps.youtube.music/5.16.51 (Linux; U; Android 11) gzip' } }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 21, @@ -142,7 +154,9 @@ 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'ANDROID_CREATOR', - 'clientVersion': '22.28.100', + 'clientVersion': '22.30.100', + 'androidSdkVersion': 30, + 'userAgent': 'com.google.android.apps.youtube.creator/22.30.100 (Linux; U; Android 11) gzip' }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 14, @@ -155,8 +169,9 @@ 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'IOS', - 'clientVersion': '17.30.1', + 'clientVersion': '17.33.2', 'deviceModel': 'iPhone14,3', + 'userAgent': 'com.google.ios.youtube/17.33.2 (iPhone14,3; U; CPU iOS 15_6 like Mac OS X)' } }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 5, @@ -166,8 +181,9 @@ 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'IOS_MESSAGES_EXTENSION', - 'clientVersion': '17.30.1', + 'clientVersion': '17.33.2', 'deviceModel': 'iPhone14,3', + 'userAgent': 'com.google.ios.youtube/17.33.2 (iPhone14,3; U; CPU iOS 15_6 like Mac OS X)' }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 66, @@ -178,7 +194,9 @@ 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'IOS_MUSIC', - 'clientVersion': '5.18', + 'clientVersion': '5.21', + 'deviceModel': 'iPhone14,3', + 'userAgent': 'com.google.ios.youtubemusic/5.21 (iPhone14,3; U; CPU iOS 15_6 like Mac OS X)' }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 26, @@ -188,7 +206,9 @@ 'INNERTUBE_CONTEXT': { 'client': { 'clientName': 'IOS_CREATOR', - 'clientVersion': '22.29.101', + 'clientVersion': '22.33.101', + 'deviceModel': 'iPhone14,3', + 'userAgent': 'com.google.ios.ytcreator/22.33.101 (iPhone14,3; U; CPU iOS 15_6 like Mac OS X)' }, }, 'INNERTUBE_CONTEXT_CLIENT_NAME': 15, @@ -229,6 +249,11 @@ def _split_innertube_client(client_name): return client_name, base, variant[0] if variant else None +def short_client_name(client_name): + main, *parts = _split_innertube_client(client_name)[0].replace('embedscreen', 'e_s').split('_') + return join_nonempty(main[:4], ''.join(x[0] for x in parts)).upper() + + def build_innertube_clients(): THIRD_PARTY = { 'embedUrl': 'https://www.youtube.com/', # Can be any valid URL @@ -260,14 +285,23 @@ def build_innertube_clients(): build_innertube_clients() +class BadgeType(enum.Enum): + AVAILABILITY_UNLISTED = enum.auto() + AVAILABILITY_PRIVATE = enum.auto() + AVAILABILITY_PUBLIC = enum.auto() + AVAILABILITY_PREMIUM = enum.auto() + AVAILABILITY_SUBSCRIPTION = enum.auto() + LIVE_NOW = enum.auto() + + class YoutubeBaseInfoExtractor(InfoExtractor): """Provide base functions for Youtube extractors""" _RESERVED_NAMES = ( - r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|' + r'channel|c|user|playlist|watch|w|v|embed|e|live|watch_popup|clip|' r'shorts|movies|results|search|shared|hashtag|trending|explore|feed|feeds|' - r'browse|oembed|get_video_info|iframe_api|s/player|' - r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout') + r'browse|oembed|get_video_info|iframe_api|s/player|source|' + r'storefront|oops|index|account|t/terms|about|upload|signin|logout') _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)' @@ -284,6 +318,40 @@ class YoutubeBaseInfoExtractor(InfoExtractor): r'(?:www\.)?invidious\.pussthecat\.org', r'(?:www\.)?invidious\.zee\.li', r'(?:www\.)?invidious\.ethibox\.fr', + r'(?:www\.)?iv\.ggtyler\.dev', + r'(?:www\.)?inv\.vern\.i2p', + r'(?:www\.)?am74vkcrjp2d5v36lcdqgsj2m6x36tbrkhsruoegwfcizzabnfgf5zyd\.onion', + r'(?:www\.)?inv\.riverside\.rocks', + r'(?:www\.)?invidious\.silur\.me', + r'(?:www\.)?inv\.bp\.projectsegfau\.lt', + r'(?:www\.)?invidious\.g4c3eya4clenolymqbpgwz3q3tawoxw56yhzk4vugqrl6dtu3ejvhjid\.onion', + r'(?:www\.)?invidious\.slipfox\.xyz', + r'(?:www\.)?invidious\.esmail5pdn24shtvieloeedh7ehz3nrwcdivnfhfcedl7gf4kwddhkqd\.onion', + r'(?:www\.)?inv\.vernccvbvyi5qhfzyqengccj7lkove6bjot2xhh5kajhwvidqafczrad\.onion', + r'(?:www\.)?invidious\.tiekoetter\.com', + r'(?:www\.)?iv\.odysfvr23q5wgt7i456o5t3trw2cw5dgn56vbjfbq2m7xsc5vqbqpcyd\.onion', + r'(?:www\.)?invidious\.nerdvpn\.de', + r'(?:www\.)?invidious\.weblibre\.org', + r'(?:www\.)?inv\.odyssey346\.dev', + r'(?:www\.)?invidious\.dhusch\.de', + r'(?:www\.)?iv\.melmac\.space', + r'(?:www\.)?watch\.thekitty\.zone', + r'(?:www\.)?invidious\.privacydev\.net', + r'(?:www\.)?ng27owmagn5amdm7l5s3rsqxwscl5ynppnis5dqcasogkyxcfqn7psid\.onion', + r'(?:www\.)?invidious\.drivet\.xyz', + r'(?:www\.)?vid\.priv\.au', + r'(?:www\.)?euxxcnhsynwmfidvhjf6uzptsmh4dipkmgdmcmxxuo7tunp3ad2jrwyd\.onion', + r'(?:www\.)?inv\.vern\.cc', + r'(?:www\.)?invidious\.esmailelbob\.xyz', + r'(?:www\.)?invidious\.sethforprivacy\.com', + r'(?:www\.)?yt\.oelrichsgarcia\.de', + r'(?:www\.)?yt\.artemislena\.eu', + r'(?:www\.)?invidious\.flokinet\.to', + r'(?:www\.)?invidious\.baczek\.me', + r'(?:www\.)?y\.com\.sb', + r'(?:www\.)?invidious\.epicsite\.xyz', + r'(?:www\.)?invidious\.lidarshield\.cloud', + r'(?:www\.)?yt\.funami\.tech', r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion', r'(?:www\.)?osbivz6guyeahrwp2lnwyjk2xos342h4ocsxyqrlaopqjuhwn2djiiyd\.onion', r'(?:www\.)?u2cvlit75owumwpy4dj2hsmvkq7nvrclkpht7xgyye2pyoxhpmclkrad\.onion', @@ -342,16 +410,88 @@ class YoutubeBaseInfoExtractor(InfoExtractor): r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion', # piped instances from https://github.com/TeamPiped/Piped/wiki/Instances r'(?:www\.)?piped\.kavin\.rocks', - r'(?:www\.)?piped\.silkky\.cloud', r'(?:www\.)?piped\.tokhmi\.xyz', - r'(?:www\.)?piped\.moomoo\.me', - r'(?:www\.)?il\.ax', - r'(?:www\.)?piped\.syncpundit\.com', + r'(?:www\.)?piped\.syncpundit\.io', r'(?:www\.)?piped\.mha\.fi', + r'(?:www\.)?watch\.whatever\.social', + r'(?:www\.)?piped\.garudalinux\.org', + r'(?:www\.)?piped\.rivo\.lol', + r'(?:www\.)?piped-libre\.kavin\.rocks', + r'(?:www\.)?yt\.jae\.fi', r'(?:www\.)?piped\.mint\.lgbt', - r'(?:www\.)?piped\.privacy\.com\.de', + r'(?:www\.)?il\.ax', + r'(?:www\.)?piped\.esmailelbob\.xyz', + r'(?:www\.)?piped\.projectsegfau\.lt', + r'(?:www\.)?piped\.privacydev\.net', + r'(?:www\.)?piped\.palveluntarjoaja\.eu', + r'(?:www\.)?piped\.smnz\.de', + r'(?:www\.)?piped\.adminforge\.de', + r'(?:www\.)?watch\.whatevertinfoil\.de', + r'(?:www\.)?piped\.qdi\.fi', + r'(?:www\.)?piped\.video', + r'(?:www\.)?piped\.aeong\.one', + r'(?:www\.)?piped\.moomoo\.me', + r'(?:www\.)?piped\.chauvet\.pro', + r'(?:www\.)?watch\.leptons\.xyz', + r'(?:www\.)?pd\.vern\.cc', + r'(?:www\.)?piped\.hostux\.net', + r'(?:www\.)?piped\.lunar\.icu', + # Hyperpipe instances from https://hyperpipe.codeberg.page/ + r'(?:www\.)?hyperpipe\.surge\.sh', + r'(?:www\.)?hyperpipe\.esmailelbob\.xyz', + r'(?:www\.)?listen\.whatever\.social', + r'(?:www\.)?music\.adminforge\.de', ) + # extracted from account/account_menu ep + # XXX: These are the supported YouTube UI and API languages, + # which is slightly different from languages supported for translation in YouTube studio + _SUPPORTED_LANG_CODES = [ + 'af', 'az', 'id', 'ms', 'bs', 'ca', 'cs', 'da', 'de', 'et', 'en-IN', 'en-GB', 'en', 'es', + 'es-419', 'es-US', 'eu', 'fil', 'fr', 'fr-CA', 'gl', 'hr', 'zu', 'is', 'it', 'sw', 'lv', + 'lt', 'hu', 'nl', 'no', 'uz', 'pl', 'pt-PT', 'pt', 'ro', 'sq', 'sk', 'sl', 'sr-Latn', 'fi', + 'sv', 'vi', 'tr', 'be', 'bg', 'ky', 'kk', 'mk', 'mn', 'ru', 'sr', 'uk', 'el', 'hy', 'iw', + 'ur', 'ar', 'fa', 'ne', 'mr', 'hi', 'as', 'bn', 'pa', 'gu', 'or', 'ta', 'te', 'kn', 'ml', + 'si', 'th', 'lo', 'my', 'ka', 'am', 'km', 'zh-CN', 'zh-TW', 'zh-HK', 'ja', 'ko' + ] + + _IGNORED_WARNINGS = {'Unavailable videos will be hidden during playback'} + + _YT_HANDLE_RE = r'@[\w.-]{3,30}' # https://support.google.com/youtube/answer/11585688?hl=en + _YT_CHANNEL_UCID_RE = r'UC[\w-]{22}' + + def ucid_or_none(self, ucid): + return self._search_regex(rf'^({self._YT_CHANNEL_UCID_RE})$', ucid, 'UC-id', default=None) + + def handle_or_none(self, handle): + return self._search_regex(rf'^({self._YT_HANDLE_RE})$', handle, '@-handle', default=None) + + def handle_from_url(self, url): + return self._search_regex(rf'^(?:https?://(?:www\.)?youtube\.com)?/({self._YT_HANDLE_RE})', + url, 'channel handle', default=None) + + def ucid_from_url(self, url): + return self._search_regex(rf'^(?:https?://(?:www\.)?youtube\.com)?/({self._YT_CHANNEL_UCID_RE})', + url, 'channel id', default=None) + + @functools.cached_property + def _preferred_lang(self): + """ + Returns a language code supported by YouTube for the user preferred language. + Returns None if no preferred language set. + """ + preferred_lang = self._configuration_arg('lang', ie_key='Youtube', casesense=True, default=[''])[0] + if not preferred_lang: + return + if preferred_lang not in self._SUPPORTED_LANG_CODES: + raise ExtractorError( + f'Unsupported language code: {preferred_lang}. Supported language codes (case-sensitive): {join_nonempty(*self._SUPPORTED_LANG_CODES, delim=", ")}.', + expected=True) + elif preferred_lang != 'en': + self.report_warning( + f'Preferring "{preferred_lang}" translated fields. Note that some metadata extraction may fail or be incorrect.') + return preferred_lang + def _initialize_consent(self): cookies = self._get_cookies('https://www.youtube.com/') if cookies.get('__Secure-3PSID'): @@ -376,7 +516,7 @@ def _initialize_pref(self): pref = dict(urllib.parse.parse_qsl(pref_cookie.value)) except ValueError: self.report_warning('Failed to parse user PREF cookie' + bug_reports_message()) - pref.update({'hl': 'en', 'tz': 'UTC'}) + pref.update({'hl': self._preferred_lang or 'en', 'tz': 'UTC'}) self._set_cookie('.youtube.com', name='PREF', value=urllib.parse.urlencode(pref)) def _real_initialize(self): @@ -424,7 +564,7 @@ def _extract_context(self, ytcfg=None, default_client='web'): (ytcfg, self._get_default_ytcfg(default_client)), 'INNERTUBE_CONTEXT', expected_type=dict) # Enforce language and tz for extraction client_context = traverse_obj(context, 'client', expected_type=dict, default={}) - client_context.update({'hl': 'en', 'timeZone': 'UTC', 'utcOffsetMinutes': 0}) + client_context.update({'hl': self._preferred_lang or 'en', 'timeZone': 'UTC', 'utcOffsetMinutes': 0}) return context _SAPISID = None @@ -550,7 +690,8 @@ def generate_api_headers( 'Origin': origin, 'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg), 'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg), - 'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg) + 'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg), + 'User-Agent': self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT']['client']['userAgent'], default_client=default_client) } if session_index is None: session_index = self._extract_session_index(ytcfg) @@ -561,7 +702,7 @@ def generate_api_headers( if auth is not None: headers['Authorization'] = auth headers['X-Origin'] = origin - return {h: v for h, v in headers.items() if v is not None} + return filter_dict(headers) def _download_ytcfg(self, client, video_id): url = { @@ -616,20 +757,10 @@ def _extract_continuation(cls, renderer): if next_continuation: return next_continuation - contents = [] - for key in ('contents', 'items'): - contents.extend(try_get(renderer, lambda x: x[key], list) or []) - - for content in contents: - if not isinstance(content, dict): - continue - continuation_ep = try_get( - content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'], - lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']), - dict) - continuation = cls._extract_continuation_ep_data(continuation_ep) - if continuation: - return continuation + return traverse_obj(renderer, ( + ('contents', 'items', 'rows'), ..., 'continuationItemRenderer', + ('continuationEndpoint', ('button', 'buttonRenderer', 'command')) + ), get_all=False, expected_type=cls._extract_continuation_ep_data) @classmethod def _extract_alerts(cls, data): @@ -645,12 +776,11 @@ def _extract_alerts(cls, data): yield alert_type, message def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False): - errors = [] - warnings = [] + errors, warnings = [], [] for alert_type, alert_message in alerts: if alert_type.lower() == 'error' and fatal: errors.append([alert_type, alert_message]) - else: + elif alert_message not in self._IGNORED_WARNINGS: warnings.append([alert_type, alert_message]) for alert_type, alert_message in (warnings + errors[:-1]): @@ -662,13 +792,49 @@ def _extract_and_report_alerts(self, data, *args, **kwargs): return self._report_alerts(self._extract_alerts(data), *args, **kwargs) def _extract_badges(self, renderer: dict): - badges = set() - for badge in try_get(renderer, lambda x: x['badges'], list) or []: - label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label'], str) - if label: - badges.add(label.lower()) + privacy_icon_map = { + 'PRIVACY_UNLISTED': BadgeType.AVAILABILITY_UNLISTED, + 'PRIVACY_PRIVATE': BadgeType.AVAILABILITY_PRIVATE, + 'PRIVACY_PUBLIC': BadgeType.AVAILABILITY_PUBLIC + } + + badge_style_map = { + 'BADGE_STYLE_TYPE_MEMBERS_ONLY': BadgeType.AVAILABILITY_SUBSCRIPTION, + 'BADGE_STYLE_TYPE_PREMIUM': BadgeType.AVAILABILITY_PREMIUM, + 'BADGE_STYLE_TYPE_LIVE_NOW': BadgeType.LIVE_NOW + } + + label_map = { + 'unlisted': BadgeType.AVAILABILITY_UNLISTED, + 'private': BadgeType.AVAILABILITY_PRIVATE, + 'members only': BadgeType.AVAILABILITY_SUBSCRIPTION, + 'live': BadgeType.LIVE_NOW, + 'premium': BadgeType.AVAILABILITY_PREMIUM + } + + badges = [] + for badge in traverse_obj(renderer, ('badges', ..., 'metadataBadgeRenderer')): + badge_type = ( + privacy_icon_map.get(traverse_obj(badge, ('icon', 'iconType'), expected_type=str)) + or badge_style_map.get(traverse_obj(badge, 'style')) + ) + if badge_type: + badges.append({'type': badge_type}) + continue + + # fallback, won't work in some languages + label = traverse_obj(badge, 'label', expected_type=str, default='') + for match, label_badge_type in label_map.items(): + if match in label.lower(): + badges.append({'type': badge_type}) + continue + return badges + @staticmethod + def _has_badge(badges, badge_type): + return bool(traverse_obj(badges, lambda _, v: v['type'] == badge_type)) + @staticmethod def _get_text(data, *path_list, max_runs=None): for path in path_list or [None]: @@ -687,7 +853,7 @@ def _get_text(data, *path_list, max_runs=None): runs = item runs = runs[:min(len(runs), max_runs or len(runs))] - text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[])) + text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str)) if text: return text @@ -707,7 +873,7 @@ def _extract_thumbnails(data, *path_list): """ thumbnails = [] for path in path_list or [()]: - for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...), default=[]): + for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...)): thumbnail_url = url_or_none(thumbnail.get('url')) if not thumbnail_url: continue @@ -739,9 +905,9 @@ def extract_relative_time(relative_time_text): except ValueError: return None - def _extract_time_text(self, renderer, *path_list): - """@returns (timestamp, time_text)""" - text = self._get_text(renderer, *path_list) or '' + def _parse_time_text(self, text): + if not text: + return dt = self.extract_relative_time(text) timestamp = None if isinstance(dt, datetime.datetime): @@ -754,9 +920,10 @@ def _extract_time_text(self, renderer, *path_list): (r'([a-z]+\s*\d{1,2},?\s*20\d{2})', r'(?:.+|^)(?:live|premieres|ed|ing)(?:\s*(?:on|for))?\s*(.+\d)'), text.lower(), 'time text', default=None))) - if text and timestamp is None: - self.report_warning(f"Cannot parse localized time text '{text}'" + bug_reports_message(), only_once=True) - return timestamp, text + if text and timestamp is None and self._preferred_lang in (None, 'en'): + self.report_warning( + f'Cannot parse localized time text "{text}"', only_once=True) + return timestamp def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None, ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None, @@ -805,40 +972,49 @@ def _extract_response(self, item_id, query, note='Downloading API JSON', headers # Youtube sometimes sends incomplete data # See: https://github.com/ytdl-org/youtube-dl/issues/28194 if not traverse_obj(response, *variadic(check_get_keys)): - retry.error = ExtractorError('Incomplete data received') + retry.error = ExtractorError('Incomplete data received', expected=True) continue return response @staticmethod def is_music_url(url): - return re.match(r'https?://music\.youtube\.com/', url) is not None + return re.match(r'(https?://)?music\.youtube\.com/', url) is not None def _extract_video(self, renderer): video_id = renderer.get('videoId') - title = self._get_text(renderer, 'title') + + reel_header_renderer = traverse_obj(renderer, ( + 'navigationEndpoint', 'reelWatchEndpoint', 'overlay', 'reelPlayerOverlayRenderer', + 'reelPlayerHeaderSupportedRenderers', 'reelPlayerHeaderRenderer')) + + title = self._get_text(renderer, 'title', 'headline') or self._get_text(reel_header_renderer, 'reelTitleText') description = self._get_text(renderer, 'descriptionSnippet') - duration = parse_duration(self._get_text( - renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text'))) + + duration = int_or_none(renderer.get('lengthSeconds')) + if duration is None: + duration = parse_duration(self._get_text( + renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text'))) if duration is None: + # XXX: should write a parser to be more general to support more cases (e.g. shorts in shorts tab) duration = parse_duration(self._search_regex( r'(?i)(ago)(?!.*\1)\s+(?P[a-z0-9 ,]+?)(?:\s+[\d,]+\s+views)?(?:\s+-\s+play\s+short)?$', traverse_obj(renderer, ('title', 'accessibility', 'accessibilityData', 'label'), default='', expected_type=str), video_id, default=None, group='duration')) - view_count = self._get_count(renderer, 'viewCountText') - - uploader = self._get_text(renderer, 'ownerText', 'shortBylineText') channel_id = traverse_obj( renderer, ('shortBylineText', 'runs', ..., 'navigationEndpoint', 'browseEndpoint', 'browseId'), expected_type=str, get_all=False) - timestamp, time_text = self._extract_time_text(renderer, 'publishedTimeText') - scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False)) + if not channel_id: + channel_id = traverse_obj(reel_header_renderer, ('channelNavigationEndpoint', 'browseEndpoint', 'browseId')) + + channel_id = self.ucid_or_none(channel_id) + overlay_style = traverse_obj( renderer, ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'style'), get_all=False, expected_type=str) badges = self._extract_badges(renderer) - thumbnails = self._extract_thumbnails(renderer, 'thumbnail') + navigation_url = urljoin('https://www.youtube.com/', traverse_obj( renderer, ('navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'), expected_type=str)) or '' @@ -846,6 +1022,22 @@ def _extract_video(self, renderer): if overlay_style == 'SHORTS' or '/shorts/' in navigation_url: url = f'https://www.youtube.com/shorts/{video_id}' + time_text = (self._get_text(renderer, 'publishedTimeText', 'videoInfo') + or self._get_text(reel_header_renderer, 'timestampText') or '') + scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False)) + + live_status = ( + 'is_upcoming' if scheduled_timestamp is not None + else 'was_live' if 'streamed' in time_text.lower() + else 'is_live' if overlay_style == 'LIVE' or self._has_badge(badges, BadgeType.LIVE_NOW) + else None) + + # videoInfo is a string like '50K views • 10 years ago'. + view_count_text = self._get_text(renderer, 'viewCountText', 'shortViewCountText', 'videoInfo') or '' + view_count = (0 if 'no views' in view_count_text.lower() + else self._get_count({'simpleText': view_count_text})) + view_count_field = 'concurrent_view_count' if live_status in ('is_live', 'is_upcoming') else 'view_count' + return { '_type': 'url', 'ie_key': YoutubeIE.ie_key(), @@ -854,19 +1046,24 @@ def _extract_video(self, renderer): 'title': title, 'description': description, 'duration': duration, - 'view_count': view_count, - 'uploader': uploader, 'channel_id': channel_id, - 'thumbnails': thumbnails, - 'upload_date': (strftime_or_none(timestamp, '%Y%m%d') - if self._configuration_arg('approximate_date', ie_key='youtubetab') - else None), - 'live_status': ('is_upcoming' if scheduled_timestamp is not None - else 'was_live' if 'streamed' in time_text.lower() - else 'is_live' if overlay_style is not None and overlay_style == 'LIVE' or 'live now' in badges - else None), + 'channel': (self._get_text(renderer, 'ownerText', 'shortBylineText') + or self._get_text(reel_header_renderer, 'channelTitleText')), + 'channel_url': f'https://www.youtube.com/channel/{channel_id}' if channel_id else None, + 'thumbnails': self._extract_thumbnails(renderer, 'thumbnail'), + 'timestamp': (self._parse_time_text(time_text) + if self._configuration_arg('approximate_date', ie_key=YoutubeTabIE) + else None), 'release_timestamp': scheduled_timestamp, - 'availability': self._availability(needs_premium='premium' in badges, needs_subscription='members only' in badges) + 'availability': + 'public' if self._has_badge(badges, BadgeType.AVAILABILITY_PUBLIC) + else self._availability( + is_private=self._has_badge(badges, BadgeType.AVAILABILITY_PRIVATE) or None, + needs_premium=self._has_badge(badges, BadgeType.AVAILABILITY_PREMIUM) or None, + needs_subscription=self._has_badge(badges, BadgeType.AVAILABILITY_SUBSCRIPTION) or None, + is_unlisted=self._has_badge(badges, BadgeType.AVAILABILITY_UNLISTED) or None), + view_count_field: view_count, + 'live_status': live_status } @@ -885,7 +1082,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains (?:.*?\#/)? # handle anchor (#/) redirect urls (?: # the various things that can precede the ID: - (?:(?:v|embed|e|shorts)/(?!videoseries|live_stream)) # v/ or embed/ or e/ or shorts/ + (?:(?:v|embed|e|shorts|live)/(?!videoseries|live_stream)) # v/ or embed/ or e/ or shorts/ |(?: # or the v= param in all its forms (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) (?:\?|\#!?) # the params delimiter ? or # or #! @@ -907,19 +1104,27 @@ class YoutubeIE(YoutubeBaseInfoExtractor): (?:\#|$)""" % { 'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES), } - _EMBED_REGEX = [r'''(?x) - (?: - ]+?src=| - data-video-url=| - ]+?src=| - embedSWF\(?:\s*| - ]+data=| - new\s+SWFObject\( - ) - (["\']) - (?P(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/ - (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?) - \1'''] + _EMBED_REGEX = [ + r'''(?x) + (?: + <(?:[0-9A-Za-z-]+?)?iframe[^>]+?src=| + data-video-url=| + ]+?src=| + embedSWF\(?:\s*| + ]+data=| + new\s+SWFObject\( + ) + (["\']) + (?P(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/ + (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?) + \1''', + # https://wordpress.org/plugins/lazy-load-for-videos/ + r'''(?xs) + ]*\bhref="(?Phttps://www\.youtube\.com/watch\?v=[0-9A-Za-z_-]{11})" + \s[^>]*\bclass="[^"]*\blazy-load-youtube''', + ] + _RETURN_TYPE = 'video' # XXX: How to handle multifeed? + _PLAYER_INFO_RE = ( r'/s/player/(?P[a-zA-Z0-9_-]{8,})/player', r'/(?P[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$', @@ -1046,9 +1251,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'id': 'BaW_jenozKc', 'ext': 'mp4', 'title': 'youtube-dl test video "\'/\\ä↭𝕐', - 'uploader': 'Philipp Hagemeister', - 'uploader_id': 'phihag', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag', 'channel': 'Philipp Hagemeister', 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q', 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q', @@ -1067,7 +1269,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'start_time': 1, 'end_time': 9, 'comment_count': int, - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader': 'Philipp Hagemeister', + 'uploader_url': 'https://www.youtube.com/@PhilippHagemeister', + 'uploader_id': '@PhilippHagemeister', + 'heatmap': 'count:100', } }, { @@ -1079,9 +1285,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'upload_date': '20120608', 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012', 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7', - 'uploader': 'SET India', - 'uploader_id': 'setindia', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia', 'age_limit': 18, }, 'skip': 'Private video', @@ -1093,9 +1296,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'id': 'BaW_jenozKc', 'ext': 'mp4', 'title': 'youtube-dl test video "\'/\\ä↭𝕐', - 'uploader': 'Philipp Hagemeister', - 'uploader_id': 'phihag', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag', 'channel': 'Philipp Hagemeister', 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q', 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q', @@ -1112,7 +1312,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'live_status': 'not_live', 'age_limit': 0, 'comment_count': int, - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader': 'Philipp Hagemeister', + 'uploader_url': 'https://www.youtube.com/@PhilippHagemeister', + 'uploader_id': '@PhilippHagemeister', }, 'params': { 'skip_download': True, @@ -1125,10 +1328,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'id': 'a9LDPn-MO4I', 'ext': 'm4a', 'upload_date': '20121002', - 'uploader_id': '8KVIDEO', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO', 'description': '', - 'uploader': '8KVIDEO', 'title': 'UHDTV TEST 8K VIDEO.mp4' }, 'params': { @@ -1146,8 +1346,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson', 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf', 'duration': 244, - 'uploader': 'AfrojackVEVO', - 'uploader_id': 'AfrojackVEVO', 'upload_date': '20131011', 'abr': 129.495, 'like_count': int, @@ -1159,13 +1357,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'live_status': 'not_live', 'thumbnail': 'https://i.ytimg.com/vi_webp/IB3lcPjvWLA/maxresdefault.webp', 'channel': 'Afrojack', - 'uploader_url': 'http://www.youtube.com/user/AfrojackVEVO', 'tags': 'count:19', 'availability': 'public', 'categories': ['Music'], 'age_limit': 0, 'alt_title': 'The Spark', - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader': 'Afrojack', + 'uploader_url': 'https://www.youtube.com/@Afrojack', + 'uploader_id': '@Afrojack', }, 'params': { 'youtube_include_dash_manifest': True, @@ -1182,9 +1382,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer', 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}', 'duration': 142, - 'uploader': 'The Witcher', - 'uploader_id': 'WitcherGame', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame', 'upload_date': '20140605', 'age_limit': 18, 'categories': ['Gaming'], @@ -1198,7 +1395,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'channel_id': 'UCzybXLxv08IApdjdN0mJhEg', 'playable_in_embed': True, 'view_count': int, - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader': 'The Witcher', + 'uploader_url': 'https://www.youtube.com/@thewitcher', + 'uploader_id': '@thewitcher', }, }, { @@ -1210,12 +1410,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'title': 'Godzilla 2 (Official Video)', 'description': 'md5:bf77e03fcae5529475e500129b05668a', 'upload_date': '20200408', - 'uploader_id': 'FlyingKitty900', - 'uploader': 'FlyingKitty', 'age_limit': 18, 'availability': 'needs_auth', 'channel_id': 'UCYQT13AtrJC0gsM1far_zJg', - 'uploader_url': 'http://www.youtube.com/user/FlyingKitty900', 'channel': 'FlyingKitty', 'channel_url': 'https://www.youtube.com/channel/UCYQT13AtrJC0gsM1far_zJg', 'view_count': int, @@ -1226,7 +1423,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'like_count': int, 'duration': 177, 'playable_in_embed': True, - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader': 'FlyingKitty', + 'uploader_url': 'https://www.youtube.com/@FlyingKitty900', + 'uploader_id': '@FlyingKitty900', + 'comment_count': int, }, }, { @@ -1237,13 +1438,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'title': '[MMD] Adios - EVERGLOW [+Motion DL]', 'ext': 'mp4', 'upload_date': '20191228', - 'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ', - 'uploader': 'Projekt Melody', 'description': 'md5:17eccca93a786d51bc67646756894066', 'age_limit': 18, 'like_count': int, 'availability': 'needs_auth', - 'uploader_url': 'http://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ', 'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ', 'view_count': int, 'thumbnail': 'https://i.ytimg.com/vi_webp/Tq92D6wQ1mg/sddefault.webp', @@ -1255,7 +1453,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'duration': 106, 'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ', 'comment_count': int, - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader': 'Projekt Melody', + 'uploader_url': 'https://www.youtube.com/@ProjektMelody', + 'uploader_id': '@ProjektMelody', }, }, { @@ -1265,8 +1466,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'id': 'MeJVWBSsPAY', 'ext': 'mp4', 'title': 'OOMPH! - Such Mich Find Mich (Lyrics)', - 'uploader': 'Herr Lurik', - 'uploader_id': 'st3in234', 'description': 'Fan Video. Music & Lyrics by OOMPH!.', 'upload_date': '20130730', 'track': 'Such mich find mich', @@ -1283,11 +1482,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'channel_id': 'UCdR3RSDPqub28LjZx0v9-aA', 'categories': ['Music'], 'availability': 'public', - 'uploader_url': 'http://www.youtube.com/user/st3in234', 'channel_url': 'https://www.youtube.com/channel/UCdR3RSDPqub28LjZx0v9-aA', 'live_status': 'not_live', 'artist': 'OOMPH!', - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader': 'Herr Lurik', + 'uploader_url': 'https://www.youtube.com/@HerrLurik', + 'uploader_id': '@HerrLurik', }, }, { @@ -1304,11 +1505,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'ext': 'mp4', 'duration': 266, 'upload_date': '20100430', - 'uploader_id': 'deadmau5', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5', 'creator': 'deadmau5', 'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336', - 'uploader': 'deadmau5', 'title': 'Deadmau5 - Some Chords (HD)', 'alt_title': 'Some Chords', 'availability': 'public', @@ -1326,7 +1524,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'channel_url': 'https://www.youtube.com/channel/UCYEK6xds6eo-3tr4xRdflmQ', 'categories': ['Music'], 'album': 'Some Chords', - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader': 'deadmau5', + 'uploader_url': 'https://www.youtube.com/@deadmau5', + 'uploader_id': '@deadmau5', }, 'expected_warnings': [ 'DASH manifest missing', @@ -1340,10 +1541,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'ext': 'mp4', 'duration': 6085, 'upload_date': '20150827', - 'uploader_id': 'olympic', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic', 'description': 'md5:04bbbf3ccceb6795947572ca36f45904', - 'uploader': 'Olympics', 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games', 'like_count': int, 'release_timestamp': 1343767800, @@ -1359,7 +1557,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'live_status': 'was_live', 'view_count': int, 'channel_url': 'https://www.youtube.com/channel/UCTl3QQTvqHFjurroKxexy2Q', - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader': 'Olympics', + 'uploader_url': 'https://www.youtube.com/@Olympics', + 'uploader_id': '@Olympics', }, 'params': { 'skip_download': 'requires avconv', @@ -1374,10 +1575,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'stretched_ratio': 16 / 9., 'duration': 85, 'upload_date': '20110310', - 'uploader_id': 'AllenMeow', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow', 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯', - 'uploader': '孫ᄋᄅ', 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人', 'playable_in_embed': True, 'channel': '孫ᄋᄅ', @@ -1392,7 +1590,10 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'live_status': 'not_live', 'availability': 'unlisted', 'comment_count': int, - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader': '孫ᄋᄅ', + 'uploader_url': 'https://www.youtube.com/@AllenMeow', + 'uploader_id': '@AllenMeow', }, }, # url_encoded_fmt_stream_map is empty string @@ -1404,8 +1605,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге', 'description': '', 'upload_date': '20150404', - 'uploader_id': 'spbelect', - 'uploader': 'Наблюдатели Петербурга', }, 'params': { 'skip_download': 'requires avconv', @@ -1422,9 +1621,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'description': 'md5:116377fd2963b81ec4ce64b542173306', 'duration': 220, 'upload_date': '20150625', - 'uploader_id': 'dorappi2000', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000', - 'uploader': 'dorappi2000', 'formats': 'mincount:31', }, 'skip': 'not actual anymore', @@ -1437,9 +1633,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): 'id': 'CsmdDsKjzN8', 'ext': 'mp4', 'upload_date': '20150501', # According to ' embed - { - 'url': 'http://www.improbable.com/2017/04/03/untrained-modern-youths-and-ancient-masters-in-selfie-portraits/', - 'md5': '873c81d308b979f0e23ee7e620b312a3', + }, { + # Prefer primary title+description language metadata by default + # Do not prefer translated description if primary is empty + 'url': 'https://www.youtube.com/watch?v=el3E4MbxRqQ', 'info_dict': { - 'id': 'msN87y-iEx0', + 'id': 'el3E4MbxRqQ', 'ext': 'mp4', - 'title': 'Feynman: Mirrors FUN TO IMAGINE 6', - 'upload_date': '20080526', - 'description': 'md5:873c81d308b979f0e23ee7e620b312a3', - 'uploader': 'Christopher Sykes', - 'uploader_id': 'ChristopherJSykes', - 'age_limit': 0, - 'tags': ['feynman', 'mirror', 'science', 'physics', 'imagination', 'fun', 'cool', 'puzzle'], - 'channel_id': 'UCCeo--lls1vna5YJABWAcVA', - 'playable_in_embed': True, - 'thumbnail': 'https://i.ytimg.com/vi/msN87y-iEx0/hqdefault.jpg', + 'title': 'dlp test video 2 - primary sv no desc', + 'description': '', + 'channel': 'cole-dlp-test-acc', + 'tags': [], + 'view_count': int, + 'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', 'like_count': int, - 'comment_count': int, - 'channel': 'Christopher Sykes', + 'playable_in_embed': True, + 'availability': 'unlisted', + 'thumbnail': r're:^https?://.*\.jpg', + 'age_limit': 0, + 'duration': 5, 'live_status': 'not_live', - 'channel_url': 'https://www.youtube.com/channel/UCCeo--lls1vna5YJABWAcVA', - 'availability': 'public', - 'duration': 195, + 'upload_date': '20220908', + 'categories': ['People & Blogs'], + 'channel_id': 'UCiu-3thuViMebBjw_5nWYrA', + 'uploader_url': 'https://www.youtube.com/@coletdjnz', + 'uploader_id': '@coletdjnz', + 'uploader': 'cole-dlp-test-acc', + }, + 'params': {'skip_download': True} + }, { + # Extractor argument: prefer translated title+description + 'url': 'https://www.youtube.com/watch?v=gHKT4uU8Zng', + 'info_dict': { + 'id': 'gHKT4uU8Zng', + 'ext': 'mp4', + 'channel': 'cole-dlp-test-acc', + 'tags': [], + 'duration': 5, + 'live_status': 'not_live', + 'channel_id': 'UCiu-3thuViMebBjw_5nWYrA', + 'upload_date': '20220728', 'view_count': int, - 'categories': ['Science & Technology'], + 'categories': ['People & Blogs'], + 'thumbnail': r're:^https?://.*\.jpg', + 'title': 'dlp test video title translated (fr)', + 'availability': 'public', + 'age_limit': 0, + 'description': 'dlp test video description translated (fr)', + 'playable_in_embed': True, + 'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', + 'uploader_url': 'https://www.youtube.com/@coletdjnz', + 'uploader_id': '@coletdjnz', + 'uploader': 'cole-dlp-test-acc', + }, + 'params': {'skip_download': True, 'extractor_args': {'youtube': {'lang': ['fr']}}}, + 'expected_warnings': [r'Preferring "fr" translated fields'], + }, { + 'note': '6 channel audio', + 'url': 'https://www.youtube.com/watch?v=zgdo7-RRjgo', + 'only_matching': True, + }, { + 'note': 'Multiple HLS formats with same itag', + 'url': 'https://www.youtube.com/watch?v=kX3nB4PpJko', + 'info_dict': { + 'id': 'kX3nB4PpJko', + 'ext': 'mp4', + 'categories': ['Entertainment'], + 'description': 'md5:e8031ff6e426cdb6a77670c9b81f6fa6', + 'live_status': 'not_live', + 'duration': 937, 'channel_follower_count': int, - 'uploader_url': 'http://www.youtube.com/user/ChristopherJSykes', + 'thumbnail': 'https://i.ytimg.com/vi_webp/kX3nB4PpJko/maxresdefault.webp', + 'title': 'Last To Take Hand Off Jet, Keeps It!', + 'channel': 'MrBeast', + 'playable_in_embed': True, + 'view_count': int, + 'upload_date': '20221112', + 'channel_url': 'https://www.youtube.com/channel/UCX6OQ3DkcsbYNE6H8uQQuVA', + 'age_limit': 0, + 'availability': 'public', + 'channel_id': 'UCX6OQ3DkcsbYNE6H8uQQuVA', + 'like_count': int, + 'tags': [], + 'uploader': 'MrBeast', + 'uploader_url': 'https://www.youtube.com/@MrBeast', + 'uploader_id': '@MrBeast', }, - 'params': { - 'skip_download': True, - } - }, - ] - - @classmethod - def suitable(cls, url): - from ..utils import parse_qs - - qs = parse_qs(url) - if qs.get('list', [None])[0]: - return False - return super().suitable(url) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._code_cache = {} + 'params': {'extractor_args': {'youtube': {'player_client': ['ios']}}, 'format': '233-1'}, + }, { + 'note': 'Audio formats with Dynamic Range Compression', + 'url': 'https://www.youtube.com/watch?v=Tq92D6wQ1mg', + 'info_dict': { + 'id': 'Tq92D6wQ1mg', + 'ext': 'webm', + 'title': '[MMD] Adios - EVERGLOW [+Motion DL]', + 'channel_url': 'https://www.youtube.com/channel/UC1yoRdFoFJaCY-AGfD9W0wQ', + 'channel_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ', + 'channel_follower_count': int, + 'description': 'md5:17eccca93a786d51bc67646756894066', + 'upload_date': '20191228', + 'tags': ['mmd', 'dance', 'mikumikudance', 'kpop', 'vtuber'], + 'playable_in_embed': True, + 'like_count': int, + 'categories': ['Entertainment'], + 'thumbnail': 'https://i.ytimg.com/vi/Tq92D6wQ1mg/sddefault.jpg', + 'age_limit': 18, + 'channel': 'Projekt Melody', + 'view_count': int, + 'availability': 'needs_auth', + 'comment_count': int, + 'live_status': 'not_live', + 'duration': 106, + 'uploader': 'Projekt Melody', + 'uploader_id': '@ProjektMelody', + 'uploader_url': 'https://www.youtube.com/@ProjektMelody', + }, + 'params': {'extractor_args': {'youtube': {'player_client': ['tv_embedded']}}, 'format': '251-drc'}, + }, + { + 'url': 'https://www.youtube.com/live/qVv6vCqciTM', + 'info_dict': { + 'id': 'qVv6vCqciTM', + 'ext': 'mp4', + 'age_limit': 0, + 'comment_count': int, + 'chapters': 'count:13', + 'upload_date': '20221223', + 'thumbnail': 'https://i.ytimg.com/vi/qVv6vCqciTM/maxresdefault.jpg', + 'channel_url': 'https://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA', + 'like_count': int, + 'release_date': '20221223', + 'tags': ['Vtuber', '月ノ美兎', '名取さな', 'にじさんじ', 'クリスマス', '3D配信'], + 'title': '【 #インターネット女クリスマス 】3Dで歌ってはしゃぐインターネットの女たち【月ノ美兎/名取さな】', + 'view_count': int, + 'playable_in_embed': True, + 'duration': 4438, + 'availability': 'public', + 'channel_follower_count': int, + 'channel_id': 'UCIdEIHpS0TdkqRkHL5OkLtA', + 'categories': ['Entertainment'], + 'live_status': 'was_live', + 'release_timestamp': 1671793345, + 'channel': 'さなちゃんねる', + 'description': 'md5:6aebf95cc4a1d731aebc01ad6cc9806d', + 'uploader': 'さなちゃんねる', + 'uploader_url': 'https://www.youtube.com/@sana_natori', + 'uploader_id': '@sana_natori', + }, + }, + { + # Fallbacks when webpage and web client is unavailable + 'url': 'https://www.youtube.com/watch?v=wSSmNUl9Snw', + 'info_dict': { + 'id': 'wSSmNUl9Snw', + 'ext': 'mp4', + # 'categories': ['Science & Technology'], + 'view_count': int, + 'chapters': 'count:2', + 'channel': 'Scott Manley', + 'like_count': int, + 'age_limit': 0, + # 'availability': 'public', + 'channel_follower_count': int, + 'live_status': 'not_live', + 'upload_date': '20170831', + 'duration': 682, + 'tags': 'count:8', + 'uploader_url': 'https://www.youtube.com/@scottmanley', + 'description': 'md5:f4bed7b200404b72a394c2f97b782c02', + 'uploader': 'Scott Manley', + 'uploader_id': '@scottmanley', + 'title': 'The Computer Hack That Saved Apollo 14', + 'channel_id': 'UCxzC4EngIsMrPmbm6Nxvb-A', + 'thumbnail': r're:^https?://.*\.webp', + 'channel_url': 'https://www.youtube.com/channel/UCxzC4EngIsMrPmbm6Nxvb-A', + 'playable_in_embed': True, + }, + 'params': { + 'extractor_args': {'youtube': {'player_client': ['android'], 'player_skip': ['webpage']}}, + }, + }, + ] + + _WEBPAGE_TESTS = [ + # YouTube embed + { + 'url': 'http://www.improbable.com/2017/04/03/untrained-modern-youths-and-ancient-masters-in-selfie-portraits/', + 'md5': '873c81d308b979f0e23ee7e620b312a3', + 'info_dict': { + 'id': 'msN87y-iEx0', + 'ext': 'mp4', + 'title': 'Feynman: Mirrors FUN TO IMAGINE 6', + 'upload_date': '20080526', + 'description': 'md5:873c81d308b979f0e23ee7e620b312a3', + 'age_limit': 0, + 'tags': ['feynman', 'mirror', 'science', 'physics', 'imagination', 'fun', 'cool', 'puzzle'], + 'channel_id': 'UCCeo--lls1vna5YJABWAcVA', + 'playable_in_embed': True, + 'thumbnail': 'https://i.ytimg.com/vi/msN87y-iEx0/hqdefault.jpg', + 'like_count': int, + 'comment_count': int, + 'channel': 'Christopher Sykes', + 'live_status': 'not_live', + 'channel_url': 'https://www.youtube.com/channel/UCCeo--lls1vna5YJABWAcVA', + 'availability': 'public', + 'duration': 195, + 'view_count': int, + 'categories': ['Science & Technology'], + 'channel_follower_count': int, + 'uploader': 'Christopher Sykes', + 'uploader_url': 'https://www.youtube.com/@ChristopherSykesDocumentaries', + 'uploader_id': '@ChristopherSykesDocumentaries', + }, + 'params': { + 'skip_download': True, + } + }, + ] + + @classmethod + def suitable(cls, url): + from ..utils import parse_qs + + qs = parse_qs(url) + if qs.get('list', [None])[0]: + return False + return super().suitable(url) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._code_cache = {} self._player_cache = {} - def _prepare_live_from_start_formats(self, formats, video_id, live_start_time, url, webpage_url, smuggled_data): + def _prepare_live_from_start_formats(self, formats, video_id, live_start_time, url, webpage_url, smuggled_data, is_live): lock = threading.Lock() - - is_live = True start_time = time.time() formats = [f for f in formats if f.get('is_from_start')] @@ -2320,38 +2736,44 @@ def refetch_manifest(format_id, delay): return _, _, prs, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url) - video_details = traverse_obj( - prs, (..., 'videoDetails'), expected_type=dict, default=[]) + video_details = traverse_obj(prs, (..., 'videoDetails'), expected_type=dict) microformats = traverse_obj( prs, (..., 'microformat', 'playerMicroformatRenderer'), - expected_type=dict, default=[]) - _, is_live, _, formats, _ = self._list_formats(video_id, microformats, video_details, prs, player_url) + expected_type=dict) + _, live_status, _, formats, _ = self._list_formats(video_id, microformats, video_details, prs, player_url) + is_live = live_status == 'is_live' start_time = time.time() def mpd_feed(format_id, delay): """ @returns (manifest_url, manifest_stream_number, is_live) or None """ - with lock: - refetch_manifest(format_id, delay) - - f = next((f for f in formats if f['format_id'] == format_id), None) - if not f: - if not is_live: - self.to_screen(f'{video_id}: Video is no longer live') - else: - self.report_warning( - f'Cannot find refreshed manifest for format {format_id}{bug_reports_message()}') - return None - return f['manifest_url'], f['manifest_stream_number'], is_live + for retry in self.RetryManager(fatal=False): + with lock: + refetch_manifest(format_id, delay) + + f = next((f for f in formats if f['format_id'] == format_id), None) + if not f: + if not is_live: + retry.error = f'{video_id}: Video is no longer live' + else: + retry.error = f'Cannot find refreshed manifest for format {format_id}{bug_reports_message()}' + continue + return f['manifest_url'], f['manifest_stream_number'], is_live + return None for f in formats: - f['is_live'] = True - f['protocol'] = 'http_dash_segments_generator' - f['fragments'] = functools.partial( - self._live_dash_fragments, f['format_id'], live_start_time, mpd_feed) + f['is_live'] = is_live + gen = functools.partial(self._live_dash_fragments, video_id, f['format_id'], + live_start_time, mpd_feed, not is_live and f.copy()) + if is_live: + f['fragments'] = gen + f['protocol'] = 'http_dash_segments_generator' + else: + f['fragments'] = LazyList(gen({})) + del f['is_from_start'] - def _live_dash_fragments(self, format_id, live_start_time, mpd_feed, ctx): + def _live_dash_fragments(self, video_id, format_id, live_start_time, mpd_feed, manifestless_orig_fmt, ctx): FETCH_SPAN, MAX_DURATION = 5, 432000 mpd_url, stream_number, is_live = None, None, True @@ -2382,15 +2804,18 @@ def _extract_sequence_from_mpd(refresh_sequence, immediate): return False, last_seq elif old_mpd_url == mpd_url: return True, last_seq - try: - fmts, _ = self._extract_mpd_formats_and_subtitles( - mpd_url, None, note=False, errnote=False, fatal=False) - except ExtractorError: - fmts = None - if not fmts: - no_fragment_score += 2 - return False, last_seq - fmt_info = next(x for x in fmts if x['manifest_stream_number'] == stream_number) + if manifestless_orig_fmt: + fmt_info = manifestless_orig_fmt + else: + try: + fmts, _ = self._extract_mpd_formats_and_subtitles( + mpd_url, None, note=False, errnote=False, fatal=False) + except ExtractorError: + fmts = None + if not fmts: + no_fragment_score += 2 + return False, last_seq + fmt_info = next(x for x in fmts if x['manifest_stream_number'] == stream_number) fragments = fmt_info['fragments'] fragment_base_url = fmt_info['fragment_base_url'] assert fragment_base_url @@ -2398,6 +2823,7 @@ def _extract_sequence_from_mpd(refresh_sequence, immediate): _last_seq = int(re.search(r'(?:/|^)sq/(\d+)', fragments[-1]['path']).group(1)) return True, _last_seq + self.write_debug(f'[{video_id}] Generating fragments for format {format_id}') while is_live: fetch_time = time.time() if no_fragment_score > 30: @@ -2451,6 +2877,11 @@ def _extract_sequence_from_mpd(refresh_sequence, immediate): except ExtractorError: continue + if manifestless_orig_fmt: + # Stop at the first iteration if running for post-live manifestless; + # fragment count no longer increase since it starts + break + time.sleep(max(0, FETCH_SPAN + fetch_time - time.time())) def _extract_player_url(self, *ytcfgs, webpage=None): @@ -2504,20 +2935,17 @@ def _extract_signature_function(self, video_id, player_url, example_sig): assert os.path.basename(func_id) == func_id self.write_debug(f'Extracting signature function {func_id}') - cache_spec = self.cache.load('youtube-sigfuncs', func_id) - if cache_spec is not None: - return lambda s: ''.join(s[i] for i in cache_spec) + cache_spec, code = self.cache.load('youtube-sigfuncs', func_id), None - code = self._load_player(video_id, player_url) + if not cache_spec: + code = self._load_player(video_id, player_url) if code: res = self._parse_sig_js(code) - test_string = ''.join(map(chr, range(len(example_sig)))) - cache_res = res(test_string) - cache_spec = [ord(c) for c in cache_res] - + cache_spec = [ord(c) for c in res(test_string)] self.cache.store('youtube-sigfuncs', func_id, cache_spec) - return res + + return lambda s: ''.join(s[i] for i in cache_spec) def _print_sig_code(self, func, example_sig): if not self.get_param('youtube_print_sig_code'): @@ -2567,17 +2995,14 @@ def _parse_sig_js(self, jscode): r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P[a-zA-Z0-9$]+)\(', r'\bm=(?P[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)', r'\bc&&\(c=(?P[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)', - r'(?:\b|[^a-zA-Z0-9$])(?P[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)', - r'(?:\b|[^a-zA-Z0-9$])(?P[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', + r'(?:\b|[^a-zA-Z0-9$])(?P[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\))?', r'(?P[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # Obsolete patterns - r'(["\'])signature\1\s*,\s*(?P[a-zA-Z0-9$]+)\(', + r'("|\')signature\1\s*,\s*(?P[a-zA-Z0-9$]+)\(', r'\.sig\|\|(?P[a-zA-Z0-9$]+)\(', r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P[a-zA-Z0-9$]+)\(', r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P[a-zA-Z0-9$]+)\(', r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P[a-zA-Z0-9$]+)\(', - r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P[a-zA-Z0-9$]+)\(', - r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P[a-zA-Z0-9$]+)\(', r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P[a-zA-Z0-9$]+)\('), jscode, 'Initial JS player signature function name', group='sig') @@ -2585,18 +3010,29 @@ def _parse_sig_js(self, jscode): initial_function = jsi.extract_function(funcname) return lambda s: initial_function([s]) + def _cached(self, func, *cache_id): + def inner(*args, **kwargs): + if cache_id not in self._player_cache: + try: + self._player_cache[cache_id] = func(*args, **kwargs) + except ExtractorError as e: + self._player_cache[cache_id] = e + except Exception as e: + self._player_cache[cache_id] = ExtractorError(traceback.format_exc(), cause=e) + + ret = self._player_cache[cache_id] + if isinstance(ret, Exception): + raise ret + return ret + return inner + def _decrypt_signature(self, s, video_id, player_url): """Turn the encrypted s field into a working signature""" - try: - player_id = (player_url, self._signature_cache_id(s)) - if player_id not in self._player_cache: - func = self._extract_signature_function(video_id, player_url, s) - self._player_cache[player_id] = func - func = self._player_cache[player_id] - self._print_sig_code(func, s) - return func(s) - except Exception as e: - raise ExtractorError(traceback.format_exc(), cause=e, video_id=video_id) + extract_sig = self._cached( + self._extract_signature_function, 'sig', player_url, self._signature_cache_id(s)) + func = extract_sig(video_id, player_url, s) + self._print_sig_code(func, s) + return func(s) def _decrypt_nsig(self, s, video_id, player_url): """Turn the encrypted n field into a working signature""" @@ -2604,48 +3040,87 @@ def _decrypt_nsig(self, s, video_id, player_url): raise ExtractorError('Cannot decrypt nsig without player_url') player_url = urljoin('https://www.youtube.com', player_url) - sig_id = ('nsig_value', s) - if sig_id in self._player_cache: - return self._player_cache[sig_id] + try: + jsi, player_id, func_code = self._extract_n_function_code(video_id, player_url) + except ExtractorError as e: + raise ExtractorError('Unable to extract nsig function code', cause=e) + if self.get_param('youtube_print_sig_code'): + self.to_screen(f'Extracted nsig function from {player_id}:\n{func_code[1]}\n') try: - player_id = ('nsig', player_url) - if player_id not in self._player_cache: - self._player_cache[player_id] = self._extract_n_function(video_id, player_url) - func = self._player_cache[player_id] - self._player_cache[sig_id] = func(s) - self.write_debug(f'Decrypted nsig {s} => {self._player_cache[sig_id]}') - return self._player_cache[sig_id] - except Exception as e: - raise ExtractorError(traceback.format_exc(), cause=e, video_id=video_id) + extract_nsig = self._cached(self._extract_n_function_from_code, 'nsig func', player_url) + ret = extract_nsig(jsi, func_code)(s) + except JSInterpreter.Exception as e: + try: + jsi = PhantomJSwrapper(self, timeout=5000) + except ExtractorError: + raise e + self.report_warning( + f'Native nsig extraction failed: Trying with PhantomJS\n' + f' n = {s} ; player = {player_url}', video_id) + self.write_debug(e, only_once=True) + + args, func_body = func_code + ret = jsi.execute( + f'console.log(function({", ".join(args)}) {{ {func_body} }}({s!r}));', + video_id=video_id, note='Executing signature code').strip() + + self.write_debug(f'Decrypted nsig {s} => {ret}') + return ret def _extract_n_function_name(self, jscode): - nfunc, idx = self._search_regex( + funcname, idx = self._search_regex( r'\.get\("n"\)\)&&\(b=(?P[a-zA-Z0-9$]+)(?:\[(?P\d+)\])?\([a-zA-Z0-9]\)', jscode, 'Initial JS player n function name', group=('nfunc', 'idx')) if not idx: - return nfunc + return funcname + return json.loads(js_to_json(self._search_regex( - rf'var {re.escape(nfunc)}\s*=\s*(\[.+?\]);', jscode, - f'Initial JS player n function list ({nfunc}.{idx})')))[int(idx)] + rf'var {re.escape(funcname)}\s*=\s*(\[.+?\]);', jscode, + f'Initial JS player n function list ({funcname}.{idx})')))[int(idx)] - def _extract_n_function(self, video_id, player_url): + def _extract_n_function_code(self, video_id, player_url): player_id = self._extract_player_info(player_url) - func_code = self.cache.load('youtube-nsig', player_id) + func_code = self.cache.load('youtube-nsig', player_id, min_ver='2022.09.1') + jscode = func_code or self._load_player(video_id, player_url) + jsi = JSInterpreter(jscode) if func_code: - jsi = JSInterpreter(func_code) + return jsi, player_id, func_code + + func_name = self._extract_n_function_name(jscode) + + # For redundancy + func_code = self._search_regex( + r'''(?xs)%s\s*=\s*function\s*\((?P[\w$]+)\)\s* + # NB: The end of the regex is intentionally kept strict + {(?P.+?}\s*return\ [\w$]+.join\(""\))};''' % func_name, + jscode, 'nsig function', group=('var', 'code'), default=None) + if func_code: + func_code = ([func_code[0]], func_code[1]) else: - jscode = self._load_player(video_id, player_url) - funcname = self._extract_n_function_name(jscode) - jsi = JSInterpreter(jscode) - func_code = jsi.extract_function_code(funcname) - self.cache.store('youtube-nsig', player_id, func_code) + self.write_debug('Extracting nsig function with jsinterp') + func_code = jsi.extract_function_code(func_name) - if self.get_param('youtube_print_sig_code'): - self.to_screen(f'Extracted nsig function from {player_id}:\n{func_code[1]}\n') + self.cache.store('youtube-nsig', player_id, func_code) + return jsi, player_id, func_code + + def _extract_n_function_from_code(self, jsi, func_code): + func = jsi.extract_function_from_code(*func_code) - return lambda s: jsi.extract_function_from_code(*func_code)([s]) + def extract_nsig(s): + try: + ret = func([s]) + except JSInterpreter.Exception: + raise + except Exception as e: + raise JSInterpreter.Exception(traceback.format_exc(), cause=e) + + if ret.startswith('enhanced_except_'): + raise JSInterpreter.Exception('Signature function returned an exception') + return ret + + return extract_nsig def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False): """ @@ -2701,7 +3176,7 @@ def _mark_watched(self, video_id, player_responses): # these seem to mark watchtime "history" in the real world # they're required, so send in a single value qs.update({ - 'st': video_length, + 'st': 0, 'et': video_length, }) @@ -2750,11 +3225,11 @@ def _extract_chapters_from_json(self, data, duration): 'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters' ), expected_type=list) - return self._extract_chapters( + return self._extract_chapters_helper( chapter_list, - chapter_time=lambda chapter: float_or_none( + start_function=lambda chapter: float_or_none( traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000), - chapter_title=lambda chapter: traverse_obj( + title_function=lambda chapter: traverse_obj( chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str), duration=duration) @@ -2762,40 +3237,25 @@ def _extract_chapters_from_engagement_panel(self, data, duration): content_list = traverse_obj( data, ('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'), - expected_type=list, default=[]) + expected_type=list) chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription')) chapter_title = lambda chapter: self._get_text(chapter, 'title') return next(filter(None, ( - self._extract_chapters(traverse_obj(contents, (..., 'macroMarkersListItemRenderer')), - chapter_time, chapter_title, duration) + self._extract_chapters_helper(traverse_obj(contents, (..., 'macroMarkersListItemRenderer')), + chapter_time, chapter_title, duration) for contents in content_list)), []) - def _extract_chapters_from_description(self, description, duration): - return self._extract_chapters( - re.findall(r'(?m)^((?:\d+:)?\d{1,2}:\d{2})\b\W*\s(.+?)\s*$', description or ''), - chapter_time=lambda x: parse_duration(x[0]), chapter_title=lambda x: x[1], - duration=duration, strict=False) - - def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration, strict=True): - if not duration: - return - chapter_list = [{ - 'start_time': chapter_time(chapter), - 'title': chapter_title(chapter), - } for chapter in chapter_list or []] - if not strict: - chapter_list.sort(key=lambda c: c['start_time'] or 0) - - chapters = [{'start_time': 0}] - for idx, chapter in enumerate(chapter_list): - if chapter['start_time'] is None: - self.report_warning(f'Incomplete chapter {idx}') - elif chapters[-1]['start_time'] <= chapter['start_time'] <= duration: - chapters.append(chapter) - else: - self.report_warning(f'Invalid start time for chapter "{chapter["title"]}"') - return chapters[1:] + def _extract_heatmap_from_player_overlay(self, data): + content_list = traverse_obj(data, ( + 'playerOverlays', 'playerOverlayRenderer', 'decoratedPlayerBarRenderer', 'decoratedPlayerBarRenderer', 'playerBar', + 'multiMarkersPlayerBarRenderer', 'markersMap', ..., 'value', 'heatmap', 'heatmapRenderer', 'heatMarkers', {list})) + return next(filter(None, ( + traverse_obj(contents, (..., 'heatMarkerRenderer', { + 'start_time': ('timeRangeStartMillis', {functools.partial(float_or_none, scale=1000)}), + 'end_time': {lambda x: (x['timeRangeStartMillis'] + x['markerDurationMillis']) / 1000}, + 'value': ('heatMarkerIntensityScoreNormalized', {float_or_none}), + })) for contents in content_list)), None) def _extract_comment(self, comment_renderer, parent=None): comment_id = comment_renderer.get('commentId') @@ -2804,8 +3264,10 @@ def _extract_comment(self, comment_renderer, parent=None): text = self._get_text(comment_renderer, 'contentText') - # note: timestamp is an estimate calculated from the current time and time_text - timestamp, time_text = self._extract_time_text(comment_renderer, 'publishedTimeText') + # Timestamp is an estimate calculated from the current time and time_text + time_text = self._get_text(comment_renderer, 'publishedTimeText') or '' + timestamp = self._parse_time_text(time_text) + author = self._get_text(comment_renderer, 'authorText') author_id = try_get(comment_renderer, lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], str) @@ -2878,6 +3340,21 @@ def extract_thread(contents): comment = self._extract_comment(comment_renderer, parent) if not comment: continue + is_pinned = bool(traverse_obj(comment_renderer, 'pinnedCommentBadge')) + comment_id = comment['id'] + if is_pinned: + tracker['pinned_comment_ids'].add(comment_id) + # Sometimes YouTube may break and give us infinite looping comments. + # See: https://github.com/yt-dlp/yt-dlp/issues/6290 + if comment_id in tracker['seen_comment_ids']: + if comment_id in tracker['pinned_comment_ids'] and not is_pinned: + # Pinned comments may appear a second time in newest first sort + # See: https://github.com/yt-dlp/yt-dlp/issues/6712 + continue + self.report_warning('Detected YouTube comments looping. Stopping comment extraction as we probably cannot get any more.') + yield + else: + tracker['seen_comment_ids'].add(comment['id']) tracker['running_total'] += 1 tracker['total_reply_comments' if parent else 'total_parent_comments'] += 1 @@ -2902,14 +3379,17 @@ def extract_thread(contents): est_total=0, current_page_thread=0, total_parent_comments=0, - total_reply_comments=0) + total_reply_comments=0, + seen_comment_ids=set(), + pinned_comment_ids=set() + ) # TODO: Deprecated # YouTube comments have a max depth of 2 max_depth = int_or_none(get_single_config_arg('max_comment_depth')) if max_depth: - self._downloader.deprecation_warning( - '[youtube] max_comment_depth extractor argument is deprecated. Set max replies in the max-comments extractor argument instead.') + self._downloader.deprecated_feature('[youtube] max_comment_depth extractor argument is deprecated. ' + 'Set max replies in the max-comments extractor argument instead') if max_depth == 1 and parent: return @@ -2944,11 +3424,21 @@ def extract_thread(contents): note_prefix = '%sDownloading comment%s API JSON page %d %s' % ( ' ' if parent else '', ' replies' if parent else '', page_num, comment_prog_str) - - response = self._extract_response( - item_id=None, query=continuation, - ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix, - check_get_keys='onResponseReceivedEndpoints' if not is_forced_continuation else None) + try: + response = self._extract_response( + item_id=None, query=continuation, + ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix, + check_get_keys='onResponseReceivedEndpoints' if not is_forced_continuation else None) + except ExtractorError as e: + # Ignore incomplete data error for replies if retries didn't work. + # This is to allow any other parent comments and comment threads to be downloaded. + # See: https://github.com/yt-dlp/yt-dlp/issues/4669 + if 'incomplete data' in str(e).lower() and parent and self.get_param('ignoreerrors') is True: + self.report_warning( + 'Received incomplete data for a comment reply thread and retrying did not help. ' + 'Ignoring to let other comments be downloaded.') + else: + raise is_forced_continuation = False continuation_contents = traverse_obj( response, 'onResponseReceivedEndpoints', expected_type=list, default=[]) @@ -2977,6 +3467,7 @@ def extract_thread(contents): message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1) if message and not parent and tracker['running_total'] == 0: self.report_warning(f'Youtube said: {message}', video_id=video_id, only_once=True) + raise self.CommentsDisabled @staticmethod def _generate_comment_continuation(video_id): @@ -3020,7 +3511,7 @@ def _is_agegated(player_response): if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')): return True - reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[]) + reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason'))) AGE_GATE_REASONS = ( 'confirm your age', 'age-restricted', 'inappropriate', # reason 'age_verification_required', 'age_check_required', # status @@ -3031,7 +3522,9 @@ def _is_agegated(player_response): def _is_unplayable(player_response): return traverse_obj(player_response, ('playabilityStatus', 'status')) == 'UNPLAYABLE' - def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr): + _STORY_PLAYER_PARAMS = '8AEB' + + def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr, smuggled_data): session_index = self._extract_session_index(player_ytcfg, master_ytcfg) syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr) @@ -3041,8 +3534,10 @@ def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, yt_query = { 'videoId': video_id, - 'params': '8AEB' # enable stories } + if smuggled_data.get('is_story') or _split_innertube_client(client)[0] == 'android': + yt_query['params'] = self._STORY_PLAYER_PARAMS + yt_query.update(self._generate_player_context(sts)) return self._extract_response( item_id=video_id, ep='player', query=yt_query, @@ -3075,7 +3570,7 @@ def _get_requested_clients(self, url, smuggled_data): return orderedSet(requested_clients) - def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg): + def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg, smuggled_data): initial_pr = None if webpage: initial_pr = self._search_json( @@ -3125,7 +3620,7 @@ def append_client(*client_names): try: pr = initial_pr if client == 'web' and initial_pr else self._extract_player_response( - client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr) + client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr, smuggled_data) except ExtractorError as e: if last_error: self.report_warning(last_error) @@ -3133,7 +3628,20 @@ def append_client(*client_names): continue if pr: - prs.append(pr) + # YouTube may return a different video player response than expected. + # See: https://github.com/TeamNewPipe/NewPipe/issues/8713 + pr_video_id = traverse_obj(pr, ('videoDetails', 'videoId')) + if pr_video_id and pr_video_id != video_id: + self.report_warning( + f'Skipping player response from {client} client (got player response for video "{pr_video_id}" instead of "{video_id}")' + bug_reports_message()) + else: + # Save client name for introspection later + name = short_client_name(client) + sd = traverse_obj(pr, ('streamingData', {dict})) or {} + sd[STREAMING_DATA_CLIENT_NAME] = name + for f in traverse_obj(sd, (('formats', 'adaptiveFormats'), ..., {dict})): + f[STREAMING_DATA_CLIENT_NAME] = name + prs.append(pr) # creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in if variant == 'embedded' and self._is_unplayable(pr) and self.is_authenticated: @@ -3150,9 +3658,15 @@ def append_client(*client_names): self.report_warning(last_error) return prs, player_url - def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, is_live, duration): - itags, stream_ids = {}, [] - itag_qualities, res_qualities = {}, {} + def _needs_live_processing(self, live_status, duration): + if (live_status == 'is_live' and self.get_param('live_from_start') + or live_status == 'post_live' and (duration or 0) > 4 * 3600): + return live_status + + def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, live_status, duration): + CHUNK_SIZE = 10 << 20 + itags, stream_ids = collections.defaultdict(set), [] + itag_qualities, res_qualities = {}, {0: None} q = qualities([ # Normally tiny is the smallest video-only formats. But # audio-only formats with unknown quality may get tagged as tiny @@ -3160,7 +3674,15 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, i 'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres' ]) - streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[]) + streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...)) + all_formats = self._configuration_arg('include_duplicate_formats') + + def build_fragments(f): + return LazyList({ + 'url': update_url_query(f['url'], { + 'range': f'{range_start}-{min(range_start + CHUNK_SIZE - 1, f["filesize"])}' + }) + } for range_start in range(0, f['filesize'], CHUNK_SIZE)) for fmt in streaming_formats: if fmt.get('targetDurationSec'): @@ -3168,9 +3690,10 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, i itag = str_or_none(fmt.get('itag')) audio_track = fmt.get('audioTrack') or {} - stream_id = '%s.%s' % (itag or '', audio_track.get('id', '')) - if stream_id in stream_ids: - continue + stream_id = (itag, audio_track.get('id'), fmt.get('isDrc')) + if not all_formats: + if stream_id in stream_ids: + continue quality = fmt.get('quality') height = int_or_none(fmt.get('height')) @@ -3204,7 +3727,8 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, i self._decrypt_signature(encrypted_sig, video_id, player_url) ) except ExtractorError as e: - self.report_warning('Signature extraction failed: Some formats may be missing', only_once=True) + self.report_warning('Signature extraction failed: Some formats may be missing', + video_id=video_id, only_once=True) self.write_debug(e, only_once=True) continue @@ -3212,52 +3736,67 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, i throttled = False if query.get('n'): try: + decrypt_nsig = self._cached(self._decrypt_nsig, 'nsig', query['n'][0]) fmt_url = update_url_query(fmt_url, { - 'n': self._decrypt_nsig(query['n'][0], video_id, player_url)}) + 'n': decrypt_nsig(query['n'][0], video_id, player_url) + }) except ExtractorError as e: - self.report_warning( - 'nsig extraction failed: You may experience throttling for some formats\n' - f'n = {query["n"][0]} ; player = {player_url}', only_once=True) - self.write_debug(e, only_once=True) + phantomjs_hint = '' + if isinstance(e, JSInterpreter.Exception): + phantomjs_hint = (f' Install {self._downloader._format_err("PhantomJS", self._downloader.Styles.EMPHASIS)} ' + f'to workaround the issue. {PhantomJSwrapper.INSTALL_HINT}\n') + if player_url: + self.report_warning( + f'nsig extraction failed: You may experience throttling for some formats\n{phantomjs_hint}' + f' n = {query["n"][0]} ; player = {player_url}', video_id=video_id, only_once=True) + self.write_debug(e, only_once=True) + else: + self.report_warning( + 'Cannot decrypt nsig without player_url: You may experience throttling for some formats', + video_id=video_id, only_once=True) throttled = True - if itag: - itags[itag] = 'https' - stream_ids.append(stream_id) - tbr = float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) language_preference = ( 10 if audio_track.get('audioIsDefault') and 10 else -10 if 'descriptive' in (audio_track.get('displayName') or '').lower() and -10 else -1) # Some formats may have much smaller duration than others (possibly damaged during encoding) - # Eg: 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823 + # E.g. 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823 # Make sure to avoid false positives with small duration differences. - # Eg: __2ABJjxzNo, ySuUZEjARPY + # E.g. __2ABJjxzNo, ySuUZEjARPY is_damaged = try_get(fmt, lambda x: float(x['approxDurationMs']) / duration < 500) if is_damaged: self.report_warning( f'{video_id}: Some formats are possibly damaged. They will be deprioritized', only_once=True) + + client_name = fmt.get(STREAMING_DATA_CLIENT_NAME) dct = { 'asr': int_or_none(fmt.get('audioSampleRate')), 'filesize': int_or_none(fmt.get('contentLength')), - 'format_id': itag, + 'format_id': f'{itag}{"-drc" if fmt.get("isDrc") else ""}', 'format_note': join_nonempty( - '%s%s' % (audio_track.get('displayName') or '', - ' (default)' if language_preference > 0 else ''), + join_nonempty(audio_track.get('displayName'), + language_preference > 0 and ' (default)', delim=''), fmt.get('qualityLabel') or quality.replace('audio_quality_', ''), - throttled and 'THROTTLED', is_damaged and 'DAMAGED', delim=', '), + fmt.get('isDrc') and 'DRC', + try_get(fmt, lambda x: x['projectionType'].replace('RECTANGULAR', '').lower()), + try_get(fmt, lambda x: x['spatialAudioType'].replace('SPATIAL_AUDIO_TYPE_', '').lower()), + throttled and 'THROTTLED', is_damaged and 'DAMAGED', + (self.get_param('verbose') or all_formats) and client_name, + delim=', '), # Format 22 is likely to be damaged. See https://github.com/yt-dlp/yt-dlp/issues/3372 'source_preference': -10 if throttled else -5 if itag == '22' else -1, 'fps': int_or_none(fmt.get('fps')) or None, + 'audio_channels': fmt.get('audioChannels'), 'height': height, - 'quality': q(quality), + 'quality': q(quality) - bool(fmt.get('isDrc')) / 2, 'has_drm': bool(fmt.get('drmFamilies')), 'tbr': tbr, 'url': fmt_url, 'width': int_or_none(fmt.get('width')), 'language': join_nonempty(audio_track.get('id', '').split('.')[0], - 'desc' if language_preference < -1 else ''), + 'desc' if language_preference < -1 else '') or None, 'language_preference': language_preference, # Strictly de-prioritize damaged and 3gp formats 'preference': -10 if is_damaged else -2 if itag == '17' else None, @@ -3267,66 +3806,83 @@ def _extract_formats_and_subtitles(self, streaming_data, video_id, player_url, i if mime_mobj: dct['ext'] = mimetype2ext(mime_mobj.group(1)) dct.update(parse_codecs(mime_mobj.group(2))) - no_audio = dct.get('acodec') == 'none' - no_video = dct.get('vcodec') == 'none' - if no_audio: - dct['vbr'] = tbr - if no_video: - dct['abr'] = tbr - if no_audio or no_video: - dct['downloader_options'] = { - # Youtube throttles chunks >~10M - 'http_chunk_size': 10485760, + if itag: + itags[itag].add(('https', dct.get('language'))) + stream_ids.append(stream_id) + single_stream = 'none' in (dct.get('acodec'), dct.get('vcodec')) + if single_stream and dct.get('ext'): + dct['container'] = dct['ext'] + '_dash' + + if all_formats and dct['filesize']: + yield { + **dct, + 'format_id': f'{dct["format_id"]}-dashy' if all_formats else dct['format_id'], + 'protocol': 'http_dash_segments', + 'fragments': build_fragments(dct), } - if dct.get('ext'): - dct['container'] = dct['ext'] + '_dash' + dct['downloader_options'] = {'http_chunk_size': CHUNK_SIZE} yield dct - live_from_start = is_live and self.get_param('live_from_start') - skip_manifests = self._configuration_arg('skip') - if not self.get_param('youtube_include_hls_manifest', True): - skip_manifests.append('hls') + needs_live_processing = self._needs_live_processing(live_status, duration) + skip_bad_formats = not self._configuration_arg('include_incomplete_formats') + + skip_manifests = set(self._configuration_arg('skip')) + if (not self.get_param('youtube_include_hls_manifest', True) + or needs_live_processing == 'is_live' # These will be filtered out by YoutubeDL anyway + or needs_live_processing and skip_bad_formats): + skip_manifests.add('hls') + if not self.get_param('youtube_include_dash_manifest', True): - skip_manifests.append('dash') - get_dash = 'dash' not in skip_manifests and ( - not is_live or live_from_start or self._configuration_arg('include_live_dash')) - get_hls = not live_from_start and 'hls' not in skip_manifests - - def process_manifest_format(f, proto, itag): - if itag in itags: - if itags[itag] == proto or f'{itag}-{proto}' in itags: - return False - itag = f'{itag}-{proto}' - if itag: + skip_manifests.add('dash') + if self._configuration_arg('include_live_dash'): + self._downloader.deprecated_feature('[youtube] include_live_dash extractor argument is deprecated. ' + 'Use include_incomplete_formats extractor argument instead') + elif skip_bad_formats and live_status == 'is_live' and needs_live_processing != 'is_live': + skip_manifests.add('dash') + + def process_manifest_format(f, proto, client_name, itag): + key = (proto, f.get('language')) + if not all_formats and key in itags[itag]: + return False + itags[itag].add(key) + + if itag and all_formats: + f['format_id'] = f'{itag}-{proto}' + elif any(p != proto for p, _ in itags[itag]): + f['format_id'] = f'{itag}-{proto}' + elif itag: f['format_id'] = itag - itags[itag] = proto - f['quality'] = next(( - q(qdict[val]) - for val, qdict in ((f.get('format_id', '').split('-')[0], itag_qualities), (f.get('height'), res_qualities)) - if val in qdict), -1) + f['quality'] = q(itag_qualities.get(try_get(f, lambda f: f['format_id'].split('-')[0]), -1)) + if f['quality'] == -1 and f.get('height'): + f['quality'] = q(res_qualities[min(res_qualities, key=lambda x: abs(x - f['height']))]) + if self.get_param('verbose'): + f['format_note'] = join_nonempty(f.get('format_note'), client_name, delim=', ') return True subtitles = {} for sd in streaming_data: - hls_manifest_url = get_hls and sd.get('hlsManifestUrl') + client_name = sd.get(STREAMING_DATA_CLIENT_NAME) + + hls_manifest_url = 'hls' not in skip_manifests and sd.get('hlsManifestUrl') if hls_manifest_url: - fmts, subs = self._extract_m3u8_formats_and_subtitles(hls_manifest_url, video_id, 'mp4', fatal=False, live=is_live) + fmts, subs = self._extract_m3u8_formats_and_subtitles( + hls_manifest_url, video_id, 'mp4', fatal=False, live=live_status == 'is_live') subtitles = self._merge_subtitles(subs, subtitles) for f in fmts: - if process_manifest_format(f, 'hls', self._search_regex( + if process_manifest_format(f, 'hls', client_name, self._search_regex( r'/itag/(\d+)', f['url'], 'itag', default=None)): yield f - dash_manifest_url = get_dash and sd.get('dashManifestUrl') + dash_manifest_url = 'dash' not in skip_manifests and sd.get('dashManifestUrl') if dash_manifest_url: formats, subs = self._extract_mpd_formats_and_subtitles(dash_manifest_url, video_id, fatal=False) subtitles = self._merge_subtitles(subs, subtitles) # Prioritize HLS subs over DASH for f in formats: - if process_manifest_format(f, 'dash', f['format_id']): + if process_manifest_format(f, 'dash', client_name, f['format_id']): f['filesize'] = int_or_none(self._search_regex( r'/clen/(\d+)', f.get('fragment_base_url') or f['url'], 'file size', default=None)) - if live_from_start: + if needs_live_processing: f['is_from_start'] = True yield f @@ -3373,14 +3929,17 @@ def _extract_storyboard(self, player_responses, duration): def _download_player_responses(self, url, smuggled_data, video_id, webpage_url): webpage = None if 'webpage' not in self._configuration_arg('player_skip'): + query = {'bpctr': '9999999999', 'has_verified': '1'} + if smuggled_data.get('is_story'): + query['pp'] = self._STORY_PLAYER_PARAMS webpage = self._download_webpage( - webpage_url + '&bpctr=9999999999&has_verified=1&pp=8AEB', video_id, fatal=False) + webpage_url, video_id, fatal=False, query=query) master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg() player_responses, player_url = self._extract_player_responses( self._get_requested_clients(url, smuggled_data), - video_id, webpage, master_ytcfg) + video_id, webpage, master_ytcfg, smuggled_data) return webpage, master_ytcfg, player_responses, player_url @@ -3389,11 +3948,19 @@ def _list_formats(self, video_id, microformats, video_details, player_responses, is_live = get_first(video_details, 'isLive') if is_live is None: is_live = get_first(live_broadcast_details, 'isLiveNow') + live_content = get_first(video_details, 'isLiveContent') + is_upcoming = get_first(video_details, 'isUpcoming') + post_live = get_first(video_details, 'isPostLiveDvr') + live_status = ('post_live' if post_live + else 'is_live' if is_live + else 'is_upcoming' if is_upcoming + else 'was_live' if live_content + else 'not_live' if False in (is_live, live_content) + else None) + streaming_data = traverse_obj(player_responses, (..., 'streamingData')) + *formats, subtitles = self._extract_formats_and_subtitles(streaming_data, video_id, player_url, live_status, duration) - streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[]) - *formats, subtitles = self._extract_formats_and_subtitles(streaming_data, video_id, player_url, is_live, duration) - - return live_broadcast_details, is_live, streaming_data, formats, subtitles + return live_broadcast_details, live_status, streaming_data, formats, subtitles def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) @@ -3405,7 +3972,7 @@ def _real_extract(self, url): webpage, master_ytcfg, player_responses, player_url = self._download_player_responses(url, smuggled_data, video_id, webpage_url) playability_statuses = traverse_obj( - player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[]) + player_responses, (..., 'playabilityStatus'), expected_type=dict) trailer_video_id = get_first( playability_statuses, @@ -3418,16 +3985,23 @@ def _real_extract(self, url): search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None)) if webpage else (lambda x: None)) - video_details = traverse_obj( - player_responses, (..., 'videoDetails'), expected_type=dict, default=[]) + video_details = traverse_obj(player_responses, (..., 'videoDetails'), expected_type=dict) microformats = traverse_obj( player_responses, (..., 'microformat', 'playerMicroformatRenderer'), - expected_type=dict, default=[]) - video_title = ( - get_first(video_details, 'title') - or self._get_text(microformats, (..., 'title')) - or search_meta(['og:title', 'twitter:title', 'title'])) - video_description = get_first(video_details, 'shortDescription') + expected_type=dict) + + translated_title = self._get_text(microformats, (..., 'title')) + video_title = (self._preferred_lang and translated_title + or get_first(video_details, 'title') # primary + or translated_title + or search_meta(['og:title', 'twitter:title', 'title'])) + translated_description = self._get_text(microformats, (..., 'description')) + original_description = get_first(video_details, 'shortDescription') + video_description = ( + self._preferred_lang and translated_description + # If original description is blank, it will be an empty string. + # Do not prefer translated description in this case. + or original_description if original_description is not None else translated_description) multifeed_metadata_list = get_first( player_responses, @@ -3472,13 +4046,14 @@ def feed_entry(name): return self.playlist_result( entries, video_id, video_title, video_description) - duration = int_or_none( - get_first(video_details, 'lengthSeconds') - or get_first(microformats, 'lengthSeconds') - or parse_duration(search_meta('duration'))) or None + duration = (int_or_none(get_first(video_details, 'lengthSeconds')) + or int_or_none(get_first(microformats, 'lengthSeconds')) + or parse_duration(search_meta('duration')) or None) - live_broadcast_details, is_live, streaming_data, formats, automatic_captions = \ - self._list_formats(video_id, microformats, video_details, player_responses, player_url) + live_broadcast_details, live_status, streaming_data, formats, automatic_captions = \ + self._list_formats(video_id, microformats, video_details, player_responses, player_url, duration) + if live_status == 'post_live': + self.write_debug(f'{video_id}: Video is in Post-Live Manifestless mode') if not formats: if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')): @@ -3537,7 +4112,7 @@ def feed_entry(name): thumbnails.extend({ 'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format( video_id=video_id, name=name, ext=ext, - webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''), + webp='_webp' if ext == 'webp' else '', live='_live' if live_status == 'is_live' else ''), } for name in thumbnail_names for ext in ('webp', 'jpg')) for thumb in thumbnails: i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names) @@ -3546,31 +4121,37 @@ def feed_entry(name): self._downloader._sort_thumbnails(original_thumbnails) category = get_first(microformats, 'category') or search_meta('genre') - channel_id = str_or_none( + channel_id = self.ucid_or_none(str_or_none( get_first(video_details, 'channelId') or get_first(microformats, 'externalChannelId') - or search_meta('channelId')) + or search_meta('channelId'))) owner_profile_url = get_first(microformats, 'ownerProfileUrl') - live_content = get_first(video_details, 'isLiveContent') - is_upcoming = get_first(video_details, 'isUpcoming') - if is_live is None: - if is_upcoming or live_content is False: - is_live = False - if is_upcoming is None and (live_content or is_live): - is_upcoming = False live_start_time = parse_iso8601(get_first(live_broadcast_details, 'startTimestamp')) live_end_time = parse_iso8601(get_first(live_broadcast_details, 'endTimestamp')) if not duration and live_end_time and live_start_time: duration = live_end_time - live_start_time - if is_live and self.get_param('live_from_start'): - self._prepare_live_from_start_formats(formats, video_id, live_start_time, url, webpage_url, smuggled_data) + needs_live_processing = self._needs_live_processing(live_status, duration) + + def is_bad_format(fmt): + if needs_live_processing and not fmt.get('is_from_start'): + return True + elif (live_status == 'is_live' and needs_live_processing != 'is_live' + and fmt.get('protocol') == 'http_dash_segments'): + return True + + for fmt in filter(is_bad_format, formats): + fmt['preference'] = (fmt.get('preference') or -1) - 10 + fmt['format_note'] = join_nonempty(fmt.get('format_note'), '(Last 4 hours)', delim=' ') + + if needs_live_processing: + self._prepare_live_from_start_formats( + formats, video_id, live_start_time, url, webpage_url, smuggled_data, live_status == 'is_live') formats.extend(self._extract_storyboard(player_responses, duration)) - # source_preference is lower for throttled/potentially damaged formats - self._sort_formats(formats, ('quality', 'res', 'fps', 'hdr:12', 'source', 'codec:vp9.2', 'lang', 'proto')) + channel_handle = self.handle_from_url(owner_profile_url) info = { 'id': video_id, @@ -3581,11 +4162,8 @@ def feed_entry(name): # URL checking if user don't care about getting the best possible thumbnail 'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')), 'description': video_description, - 'uploader': get_first(video_details, 'author'), - 'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None, - 'uploader_url': owner_profile_url, 'channel_id': channel_id, - 'channel_url': format_field(channel_id, None, 'https://www.youtube.com/channel/%s'), + 'channel_url': format_field(channel_id, None, 'https://www.youtube.com/channel/%s', default=None), 'duration': duration, 'view_count': int_or_none( get_first((video_details, microformats), (..., 'viewCount')) @@ -3599,22 +4177,12 @@ def feed_entry(name): 'categories': [category] if category else None, 'tags': keywords, 'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'), - 'is_live': is_live, - 'was_live': (False if is_live or is_upcoming or live_content is False - else None if is_live is None or is_upcoming is None - else live_content), - 'live_status': 'is_upcoming' if is_upcoming else None, # rest will be set by YoutubeDL + 'live_status': live_status, 'release_timestamp': live_start_time, + '_format_sort_fields': ( # source_preference is lower for throttled/potentially damaged formats + 'quality', 'res', 'fps', 'hdr:12', 'source', 'vcodec:vp9.2', 'channels', 'acodec', 'lang', 'proto') } - if get_first(video_details, 'isPostLiveDvr'): - self.write_debug('Video is in Post-Live Manifestless mode') - info['live_status'] = 'post_live' - if (duration or 0) > 4 * 3600: - self.report_warning( - 'The livestream has not finished processing. Only 4 hours of the video can be currently downloaded. ' - 'This is a known issue and patches are welcome') - subtitles = {} pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict) if pctr: @@ -3625,10 +4193,10 @@ def get_lang_code(track): # Converted into dicts to remove duplicates captions = { get_lang_code(sub): sub - for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])} + for sub in traverse_obj(pctr, (..., 'captionTracks', ...))} translation_languages = { lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1) - for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])} + for lang in traverse_obj(pctr, (..., 'translationLanguages', ...))} def process_language(container, base_url, lang_code, sub_name, query): lang_subs = container.setdefault(lang_code, []) @@ -3662,7 +4230,7 @@ def process_language(container, base_url, lang_code, sub_name, query): if not trans_code: continue orig_trans_code = trans_code - if caption_track.get('kind') != 'asr': + if caption_track.get('kind') != 'asr' and trans_code != 'und': if not get_translated_subs: continue trans_code += f'-{lang_code}' @@ -3717,22 +4285,25 @@ def process_language(container, base_url, lang_code, sub_name, query): initial_data = None if webpage: initial_data = self.extract_yt_initial_data(video_id, webpage, fatal=False) + if not traverse_obj(initial_data, 'contents'): + self.report_warning('Incomplete data received in embedded initial data; re-fetching using API.') + initial_data = None if not initial_data: query = {'videoId': video_id} query.update(self._get_checkok_params()) initial_data = self._extract_response( item_id=video_id, ep='next', fatal=False, - ytcfg=master_ytcfg, query=query, + ytcfg=master_ytcfg, query=query, check_get_keys='contents', headers=self.generate_api_headers(ytcfg=master_ytcfg), note='Downloading initial data API JSON') info['comment_count'] = traverse_obj(initial_data, ( 'contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents', ..., 'itemSectionRenderer', - 'contents', ..., 'commentsEntryPointHeaderRenderer', 'commentCount', 'simpleText' + 'contents', ..., 'commentsEntryPointHeaderRenderer', 'commentCount' ), ( 'engagementPanels', lambda _, v: v['engagementPanelSectionListRenderer']['panelIdentifier'] == 'comment-item-section', - 'engagementPanelSectionListRenderer', 'header', 'engagementPanelTitleHeaderRenderer', 'contextualInfo', 'runs', ..., 'text' - ), expected_type=int_or_none, get_all=False) + 'engagementPanelSectionListRenderer', 'header', 'engagementPanelTitleHeaderRenderer', 'contextualInfo' + ), expected_type=self._get_count, get_all=False) try: # This will error if there is no livechat initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation'] @@ -3744,7 +4315,8 @@ def process_language(container, base_url, lang_code, sub_name, query): 'url': f'https://www.youtube.com/watch?v={video_id}&bpctr=9999999999&has_verified=1', 'video_id': video_id, 'ext': 'json', - 'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay', + 'protocol': ('youtube_live_chat' if live_status in ('is_live', 'is_upcoming') + else 'youtube_live_chat_replay'), }] if initial_data: @@ -3754,6 +4326,8 @@ def process_language(container, base_url, lang_code, sub_name, query): or self._extract_chapters_from_description(video_description, duration) or None) + info['heatmap'] = self._extract_heatmap_from_player_overlay(initial_data) + contents = traverse_obj( initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents'), expected_type=list, default=[]) @@ -3779,19 +4353,23 @@ def process_language(container, base_url, lang_code, sub_name, query): vpir, lambda x: x['videoActions']['menuRenderer']['topLevelButtons'], list) or []): - tbr = tlb.get('toggleButtonRenderer') or {} - for getter, regex in [( - lambda x: x['defaultText']['accessibility']['accessibilityData'], - r'(?P[\d,]+)\s*(?P(?:dis)?like)'), ([ - lambda x: x['accessibility'], - lambda x: x['accessibilityData']['accessibilityData'], - ], r'(?P(?:dis)?like) this video along with (?P[\d,]+) other people')]: - label = (try_get(tbr, getter, dict) or {}).get('label') - if label: - mobj = re.match(regex, label) - if mobj: - info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count')) - break + tbrs = variadic( + traverse_obj( + tlb, ('toggleButtonRenderer', ...), + ('segmentedLikeDislikeButtonRenderer', ..., 'toggleButtonRenderer'))) + for tbr in tbrs: + for getter, regex in [( + lambda x: x['defaultText']['accessibility']['accessibilityData'], + r'(?P[\d,]+)\s*(?P(?:dis)?like)'), ([ + lambda x: x['accessibility'], + lambda x: x['accessibilityData']['accessibilityData'], + ], r'(?P(?:dis)?like) this video along with (?P[\d,]+) other people')]: + label = (try_get(tbr, getter, dict) or {}).get('label') + if label: + mobj = re.match(regex, label) + if mobj: + info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count')) + break sbr_tooltip = try_get( vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip']) if sbr_tooltip: @@ -3800,6 +4378,15 @@ def process_language(container, base_url, lang_code, sub_name, query): 'like_count': str_to_int(like_count), 'dislike_count': str_to_int(dislike_count), }) + vcr = traverse_obj(vpir, ('viewCount', 'videoViewCountRenderer')) + if vcr: + vc = self._get_count(vcr, 'viewCount') + # Upcoming premieres with waiting count are treated as live here + if vcr.get('isLive'): + info['concurrent_view_count'] = vc + elif info.get('view_count') is None: + info['view_count'] = vc + vsir = get_first(contents, 'videoSecondaryInfoRenderer') if vsir: vor = traverse_obj(vsir, ('owner', 'videoOwnerRenderer')) @@ -3807,6 +4394,13 @@ def process_language(container, base_url, lang_code, sub_name, query): 'channel': self._get_text(vor, 'title'), 'channel_follower_count': self._get_count(vor, 'subscriberCountText')}) + if not channel_handle: + channel_handle = self.handle_from_url( + traverse_obj(vor, ( + ('navigationEndpoint', ('title', 'runs', ..., 'navigationEndpoint')), + (('commandMetadata', 'webCommandMetadata', 'url'), ('browseEndpoint', 'canonicalBaseUrl')), + {str}), get_all=False)) + rows = try_get( vsir, lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'], @@ -3833,58 +4427,49 @@ def process_language(container, base_url, lang_code, sub_name, query): elif mrr_title == 'Song': info['track'] = mrr_contents_text - fallbacks = { - 'channel': 'uploader', - 'channel_id': 'uploader_id', - 'channel_url': 'uploader_url', - } - + info.update({ + 'uploader': info.get('channel'), + 'uploader_id': channel_handle, + 'uploader_url': format_field(channel_handle, None, 'https://www.youtube.com/%s', default=None), + }) # The upload date for scheduled, live and past live streams / premieres in microformats # may be different from the stream date. Although not in UTC, we will prefer it in this case. # See: https://github.com/yt-dlp/yt-dlp/pull/2223#issuecomment-1008485139 upload_date = ( unified_strdate(get_first(microformats, 'uploadDate')) or unified_strdate(search_meta('uploadDate'))) - if not upload_date or (not info.get('is_live') and not info.get('was_live') and info.get('live_status') != 'is_upcoming'): - upload_date = strftime_or_none(self._extract_time_text(vpir, 'dateText')[0], '%Y%m%d') or upload_date + if not upload_date or ( + live_status in ('not_live', None) + and 'no-youtube-prefer-utc-upload-date' not in self.get_param('compat_opts', []) + ): + upload_date = strftime_or_none( + self._parse_time_text(self._get_text(vpir, 'dateText')), '%Y%m%d') or upload_date info['upload_date'] = upload_date - for to, frm in fallbacks.items(): - if not info.get(to): - info[to] = info.get(frm) - for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]: v = info.get(s_k) if v: info[d_k] = v - is_private = get_first(video_details, 'isPrivate', expected_type=bool) - is_unlisted = get_first(microformats, 'isUnlisted', expected_type=bool) - is_membersonly = None - is_premium = None - if initial_data and is_private is not None: - is_membersonly = False - is_premium = False - contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or [] - badge_labels = set() - for content in contents: - if not isinstance(content, dict): - continue - badge_labels.update(self._extract_badges(content.get('videoPrimaryInfoRenderer'))) - for badge_label in badge_labels: - if badge_label.lower() == 'members only': - is_membersonly = True - elif badge_label.lower() == 'premium': - is_premium = True - elif badge_label.lower() == 'unlisted': - is_unlisted = True - - info['availability'] = self._availability( - is_private=is_private, - needs_premium=is_premium, - needs_subscription=is_membersonly, - needs_auth=info['age_limit'] >= 18, - is_unlisted=None if is_private is None else is_unlisted) + badges = self._extract_badges(traverse_obj(contents, (..., 'videoPrimaryInfoRenderer'), get_all=False)) + + is_private = (self._has_badge(badges, BadgeType.AVAILABILITY_PRIVATE) + or get_first(video_details, 'isPrivate', expected_type=bool)) + + info['availability'] = ( + 'public' if self._has_badge(badges, BadgeType.AVAILABILITY_PUBLIC) + else self._availability( + is_private=is_private, + needs_premium=( + self._has_badge(badges, BadgeType.AVAILABILITY_PREMIUM) + or False if initial_data and is_private is not None else None), + needs_subscription=( + self._has_badge(badges, BadgeType.AVAILABILITY_SUBSCRIPTION) + or False if initial_data and is_private is not None else None), + needs_auth=info['age_limit'] >= 18, + is_unlisted=None if is_private is None else ( + self._has_badge(badges, BadgeType.AVAILABILITY_UNLISTED) + or get_first(microformats, 'isUnlisted', expected_type=bool)))) info['__post_extractor'] = self.extract_comments(master_ytcfg, video_id, contents, webpage) @@ -3894,15 +4479,19 @@ def process_language(container, base_url, lang_code, sub_name, query): class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor): - @staticmethod def passthrough_smuggled_data(func): - def _smuggle(entries, smuggled_data): - for entry in entries: - # TODO: Convert URL to music.youtube instead. - # Do we need to passthrough any other smuggled_data? - entry['url'] = smuggle_url(entry['url'], smuggled_data) - yield entry + def _smuggle(info, smuggled_data): + if info.get('_type') not in ('url', 'url_transparent'): + return info + if smuggled_data.get('is_music_url'): + parsed_url = urllib.parse.urlparse(info['url']) + if parsed_url.netloc in ('www.youtube.com', 'music.youtube.com'): + smuggled_data.pop('is_music_url') + info['url'] = urllib.parse.urlunparse(parsed_url._replace(netloc='music.youtube.com')) + if smuggled_data: + info['url'] = smuggle_url(info['url'], smuggled_data) + return info @functools.wraps(func) def wrapper(self, url): @@ -3910,24 +4499,13 @@ def wrapper(self, url): if self.is_music_url(url): smuggled_data['is_music_url'] = True info_dict = func(self, url, smuggled_data) - if smuggled_data and info_dict.get('entries'): - info_dict['entries'] = _smuggle(info_dict['entries'], smuggled_data) + if smuggled_data: + _smuggle(info_dict, smuggled_data) + if info_dict.get('entries'): + info_dict['entries'] = (_smuggle(i, smuggled_data.copy()) for i in info_dict['entries']) return info_dict return wrapper - def _extract_channel_id(self, webpage): - channel_id = self._html_search_meta( - 'channelId', webpage, 'channel id', default=None) - if channel_id: - return channel_id - channel_url = self._html_search_meta( - ('og:url', 'al:ios:url', 'al:android:url', 'al:web:url', - 'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad', - 'twitter:app:url:googleplay'), webpage, 'channel url') - return self._search_regex( - r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+', - channel_url, 'channel id') - @staticmethod def _extract_basic_item_renderer(item): # Modified from _extract_grid_item_renderer @@ -3942,6 +4520,35 @@ def _extract_basic_item_renderer(item): elif key.startswith('grid') and key.endswith('Renderer'): return renderer + def _extract_channel_renderer(self, renderer): + channel_id = self.ucid_or_none(renderer['channelId']) + title = self._get_text(renderer, 'title') + channel_url = format_field(channel_id, None, 'https://www.youtube.com/channel/%s', default=None) + # As of 2023-03-01 YouTube doesn't use the channel handles on these renderers yet. + # However we can expect them to change that in the future. + channel_handle = self.handle_from_url( + traverse_obj(renderer, ( + 'navigationEndpoint', (('commandMetadata', 'webCommandMetadata', 'url'), + ('browseEndpoint', 'canonicalBaseUrl')), + {str}), get_all=False)) + return { + '_type': 'url', + 'url': channel_url, + 'id': channel_id, + 'ie_key': YoutubeTabIE.ie_key(), + 'channel': title, + 'uploader': title, + 'channel_id': channel_id, + 'channel_url': channel_url, + 'title': title, + 'uploader_id': channel_handle, + 'uploader_url': format_field(channel_handle, None, 'https://www.youtube.com/%s', default=None), + 'channel_follower_count': self._get_count(renderer, 'subscriberCountText'), + 'thumbnails': self._extract_thumbnails(renderer, 'thumbnail'), + 'playlist_count': self._get_count(renderer, 'videoCountText'), + 'description': self._get_text(renderer, 'descriptionSnippet'), + } + def _grid_entries(self, grid_renderer): for item in grid_renderer['items']: if not isinstance(item, dict): @@ -3967,9 +4574,7 @@ def _grid_entries(self, grid_renderer): # channel channel_id = renderer.get('channelId') if channel_id: - yield self.url_result( - 'https://www.youtube.com/channel/%s' % channel_id, - ie=YoutubeTabIE.ie_key(), video_title=title) + yield self._extract_channel_renderer(renderer) continue # generic endpoint URL support ep_url = urljoin('https://www.youtube.com/', try_get( @@ -3985,8 +4590,11 @@ def _grid_entries(self, grid_renderer): def _music_reponsive_list_entry(self, renderer): video_id = traverse_obj(renderer, ('playlistItemData', 'videoId')) if video_id: + title = traverse_obj(renderer, ( + 'flexColumns', 0, 'musicResponsiveListItemFlexColumnRenderer', + 'text', 'runs', 0, 'text')) return self.url_result(f'https://music.youtube.com/watch?v={video_id}', - ie=YoutubeIE.ie_key(), video_id=video_id) + ie=YoutubeIE.ie_key(), video_id=video_id, title=title) playlist_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'playlistId')) if playlist_id: video_id = traverse_obj(renderer, ('navigationEndpoint', 'watchEndpoint', 'videoId')) @@ -4044,12 +4652,20 @@ def _playlist_entries(self, video_list_renderer): yield self._extract_video(renderer) def _rich_entries(self, rich_grid_renderer): - renderer = try_get( - rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {} + renderer = traverse_obj( + rich_grid_renderer, + ('content', ('videoRenderer', 'reelItemRenderer', 'playlistRenderer')), get_all=False) or {} video_id = renderer.get('videoId') - if not video_id: + if video_id: + yield self._extract_video(renderer) + return + playlist_id = renderer.get('playlistId') + if playlist_id: + yield self.url_result( + f'https://www.youtube.com/playlist?list={playlist_id}', + ie=YoutubeTabIE.ie_key(), video_id=playlist_id, + video_title=self._get_text(renderer, 'title')) return - yield self._extract_video(renderer) def _video_entry(self, video_renderer): video_id = video_renderer.get('videoId') @@ -4122,6 +4738,13 @@ def _rich_grid_entries(self, contents): yield entry ''' + def _report_history_entries(self, renderer): + for url in traverse_obj(renderer, ( + 'rows', ..., 'reportHistoryTableRowRenderer', 'cells', ..., + 'reportHistoryTableCellRenderer', 'cell', 'reportHistoryTableTextCellRenderer', 'text', 'runs', ..., + 'navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url')): + yield self.url_result(urljoin('https://www.youtube.com', url), YoutubeIE) + def _extract_entries(self, parent_renderer, continuation_list): # continuation_list is modified in-place with continuation_list = [continuation_token] continuation_list[:] = [None] @@ -4133,12 +4756,16 @@ def _extract_entries(self, parent_renderer, continuation_list): content, 'itemSectionRenderer', 'musicShelfRenderer', 'musicShelfContinuation', expected_type=dict) if not is_renderer: - renderer = content.get('richItemRenderer') - if renderer: - for entry in self._rich_entries(renderer): + if content.get('richItemRenderer'): + for entry in self._rich_entries(content['richItemRenderer']): yield entry continuation_list[0] = self._extract_continuation(parent_renderer) + elif content.get('reportHistorySectionRenderer'): # https://www.youtube.com/reporthistory + table = traverse_obj(content, ('reportHistorySectionRenderer', 'table', 'tableRenderer')) + yield from self._report_history_entries(table) + continuation_list[0] = self._extract_continuation(table) continue + isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or [] for isr_content in isr_contents: if not isinstance(isr_content, dict): @@ -4199,26 +4826,6 @@ def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data): # See: https://github.com/ytdl-org/youtube-dl/issues/28702 visitor_data = self._extract_visitor_data(response) or visitor_data - known_continuation_renderers = { - 'playlistVideoListContinuation': self._playlist_entries, - 'gridContinuation': self._grid_entries, - 'itemSectionContinuation': self._post_thread_continuation_entries, - 'sectionListContinuation': extract_entries, # for feeds - } - continuation_contents = try_get( - response, lambda x: x['continuationContents'], dict) or {} - continuation_renderer = None - for key, value in continuation_contents.items(): - if key not in known_continuation_renderers: - continue - continuation_renderer = value - continuation_list = [None] - yield from known_continuation_renderers[key](continuation_renderer) - continuation = continuation_list[0] or self._extract_continuation(continuation_renderer) - break - if continuation_renderer: - continue - known_renderers = { 'videoRenderer': (self._grid_entries, 'items'), # for membership tab 'gridPlaylistRenderer': (self._grid_entries, 'items'), @@ -4227,79 +4834,82 @@ def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data): 'playlistVideoRenderer': (self._playlist_entries, 'contents'), 'itemSectionRenderer': (extract_entries, 'contents'), # for feeds 'richItemRenderer': (extract_entries, 'contents'), # for hashtag - 'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents') + 'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents'), + 'reportHistoryTableRowRenderer': (self._report_history_entries, 'rows'), + 'playlistVideoListContinuation': (self._playlist_entries, None), + 'gridContinuation': (self._grid_entries, None), + 'itemSectionContinuation': (self._post_thread_continuation_entries, None), + 'sectionListContinuation': (extract_entries, None), # for feeds } - on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints')) - continuation_items = try_get( - on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list) - continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {} + + continuation_items = traverse_obj(response, ( + ('onResponseReceivedActions', 'onResponseReceivedEndpoints'), ..., + 'appendContinuationItemsAction', 'continuationItems' + ), 'continuationContents', get_all=False) + continuation_item = traverse_obj(continuation_items, 0, None, expected_type=dict, default={}) + video_items_renderer = None - for key, value in continuation_item.items(): + for key in continuation_item.keys(): if key not in known_renderers: continue - video_items_renderer = {known_renderers[key][1]: continuation_items} + func, parent_key = known_renderers[key] + video_items_renderer = {parent_key: continuation_items} if parent_key else continuation_items continuation_list = [None] - yield from known_renderers[key][0](video_items_renderer) + yield from func(video_items_renderer) continuation = continuation_list[0] or self._extract_continuation(video_items_renderer) + + if not video_items_renderer: break - if video_items_renderer: - continue - break @staticmethod def _extract_selected_tab(tabs, fatal=True): - for tab in tabs: - renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {} - if renderer.get('selected') is True: - return renderer - else: - if fatal: - raise ExtractorError('Unable to find selected tab') - - def _extract_uploader(self, data): - uploader = {} - renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {} - owner = try_get( - renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict) - if owner: - owner_text = owner.get('text') - uploader['uploader'] = self._search_regex( - r'^by (.+) and \d+ others?$', owner_text, 'uploader', default=owner_text) - uploader['uploader_id'] = try_get( - owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], str) - uploader['uploader_url'] = urljoin( - 'https://www.youtube.com/', - try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], str)) - return {k: v for k, v in uploader.items() if v is not None} + for tab_renderer in tabs: + if tab_renderer.get('selected'): + return tab_renderer + if fatal: + raise ExtractorError('Unable to find selected tab') + + @staticmethod + def _extract_tab_renderers(response): + return traverse_obj( + response, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs', ..., ('tabRenderer', 'expandableTabRenderer')), expected_type=dict) def _extract_from_tabs(self, item_id, ytcfg, data, tabs): - playlist_id = title = description = channel_url = channel_name = channel_id = None - tags = [] + metadata = self._extract_metadata_from_tabs(item_id, data) selected_tab = self._extract_selected_tab(tabs) - primary_sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') - renderer = try_get( - data, lambda x: x['metadata']['channelMetadataRenderer'], dict) - if renderer: - channel_name = renderer.get('title') - channel_url = renderer.get('channelUrl') - channel_id = renderer.get('externalId') - else: - renderer = try_get( - data, lambda x: x['metadata']['playlistMetadataRenderer'], dict) + metadata['title'] += format_field(selected_tab, 'title', ' - %s') + metadata['title'] += format_field(selected_tab, 'expandedText', ' - %s') - if renderer: - title = renderer.get('title') - description = renderer.get('description', '') - playlist_id = channel_id - tags = renderer.get('keywords', '').split() + return self.playlist_result( + self._entries( + selected_tab, metadata['id'], ytcfg, + self._extract_account_syncid(ytcfg, data), + self._extract_visitor_data(data, ytcfg)), + **metadata) + + def _extract_metadata_from_tabs(self, item_id, data): + info = {'id': item_id} + + metadata_renderer = traverse_obj(data, ('metadata', 'channelMetadataRenderer'), expected_type=dict) + if metadata_renderer: + channel_id = traverse_obj(metadata_renderer, ('externalId', {self.ucid_or_none}), + ('channelUrl', {self.ucid_from_url})) + info.update({ + 'channel': metadata_renderer.get('title'), + 'channel_id': channel_id, + }) + if info['channel_id']: + info['id'] = info['channel_id'] + else: + metadata_renderer = traverse_obj(data, ('metadata', 'playlistMetadataRenderer'), expected_type=dict) # We can get the uncropped banner/avatar by replacing the crop params with '=s0' # See: https://github.com/yt-dlp/yt-dlp/issues/2237#issuecomment-1013694714 def _get_uncropped(url): return url_or_none((url or '').split('=')[0] + '=s0') - avatar_thumbnails = self._extract_thumbnails(renderer, 'avatar') + avatar_thumbnails = self._extract_thumbnails(metadata_renderer, 'avatar') if avatar_thumbnails: uncropped_avatar = _get_uncropped(avatar_thumbnails[0]['url']) if uncropped_avatar: @@ -4310,7 +4920,7 @@ def _get_uncropped(url): }) channel_banners = self._extract_thumbnails( - data, ('header', ..., ['banner', 'mobileBanner', 'tvBanner'])) + data, ('header', ..., ('banner', 'mobileBanner', 'tvBanner'))) for banner in channel_banners: banner['preference'] = -10 @@ -4323,46 +4933,78 @@ def _get_uncropped(url): 'preference': -5 }) + # Deprecated - remove primary_sidebar_renderer when layout discontinued + primary_sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') + playlist_header_renderer = traverse_obj(data, ('header', 'playlistHeaderRenderer'), expected_type=dict) + primary_thumbnails = self._extract_thumbnails( primary_sidebar_renderer, ('thumbnailRenderer', ('playlistVideoThumbnailRenderer', 'playlistCustomThumbnailRenderer'), 'thumbnail')) + playlist_thumbnails = self._extract_thumbnails( + playlist_header_renderer, ('playlistHeaderBanner', 'heroPlaylistThumbnailRenderer', 'thumbnail')) - if playlist_id is None: - playlist_id = item_id - - playlist_stats = traverse_obj(primary_sidebar_renderer, 'stats') - last_updated_unix, _ = self._extract_time_text(playlist_stats, 2) - if title is None: - title = self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag')) or playlist_id - title += format_field(selected_tab, 'title', ' - %s') - title += format_field(selected_tab, 'expandedText', ' - %s') - - metadata = { - 'playlist_id': playlist_id, - 'playlist_title': title, - 'playlist_description': description, - 'uploader': channel_name, - 'uploader_id': channel_id, - 'uploader_url': channel_url, - 'thumbnails': primary_thumbnails + avatar_thumbnails + channel_banners, - 'tags': tags, - 'view_count': self._get_count(playlist_stats, 1), + info.update({ + 'title': (traverse_obj(metadata_renderer, 'title') + or self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag')) + or info['id']), 'availability': self._extract_availability(data), - 'modified_date': strftime_or_none(last_updated_unix, '%Y%m%d'), - 'playlist_count': self._get_count(playlist_stats, 0), 'channel_follower_count': self._get_count(data, ('header', ..., 'subscriberCountText')), - } - if not channel_id: - metadata.update(self._extract_uploader(data)) - metadata.update({ - 'channel': metadata['uploader'], - 'channel_id': metadata['uploader_id'], - 'channel_url': metadata['uploader_url']}) - return self.playlist_result( - self._entries( - selected_tab, playlist_id, ytcfg, - self._extract_account_syncid(ytcfg, data), - self._extract_visitor_data(data, ytcfg)), - **metadata) + 'description': try_get(metadata_renderer, lambda x: x.get('description', '')), + 'tags': try_get(metadata_renderer or {}, lambda x: x.get('keywords', '').split()), + 'thumbnails': (primary_thumbnails or playlist_thumbnails) + avatar_thumbnails + channel_banners, + }) + + channel_handle = ( + traverse_obj(metadata_renderer, (('vanityChannelUrl', ('ownerUrls', ...)), {self.handle_from_url}), get_all=False) + or traverse_obj(data, ('header', ..., 'channelHandleText', {self.handle_or_none}), get_all=False)) + + if channel_handle: + info.update({ + 'uploader_id': channel_handle, + 'uploader_url': format_field(channel_handle, None, 'https://www.youtube.com/%s', default=None), + }) + # Playlist stats is a text runs array containing [video count, view count, last updated]. + # last updated or (view count and last updated) may be missing. + playlist_stats = get_first( + (primary_sidebar_renderer, playlist_header_renderer), (('stats', 'briefStats', 'numVideosText'), )) + + last_updated_unix = self._parse_time_text( + self._get_text(playlist_stats, 2) # deprecated, remove when old layout discontinued + or self._get_text(playlist_header_renderer, ('byline', 1, 'playlistBylineRenderer', 'text'))) + info['modified_date'] = strftime_or_none(last_updated_unix, '%Y%m%d') + + info['view_count'] = self._get_count(playlist_stats, 1) + if info['view_count'] is None: # 0 is allowed + info['view_count'] = self._get_count(playlist_header_renderer, 'viewCountText') + if info['view_count'] is None: + info['view_count'] = self._get_count(data, ( + 'contents', 'twoColumnBrowseResultsRenderer', 'tabs', ..., 'tabRenderer', 'content', 'sectionListRenderer', + 'contents', ..., 'itemSectionRenderer', 'contents', ..., 'channelAboutFullMetadataRenderer', 'viewCountText')) + + info['playlist_count'] = self._get_count(playlist_stats, 0) + if info['playlist_count'] is None: # 0 is allowed + info['playlist_count'] = self._get_count(playlist_header_renderer, ('byline', 0, 'playlistBylineRenderer', 'text')) + + if not info.get('channel_id'): + owner = traverse_obj(playlist_header_renderer, 'ownerText') + if not owner: # Deprecated + owner = traverse_obj( + self._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer'), + ('videoOwner', 'videoOwnerRenderer', 'title')) + owner_text = self._get_text(owner) + browse_ep = traverse_obj(owner, ('runs', 0, 'navigationEndpoint', 'browseEndpoint')) or {} + info.update({ + 'channel': self._search_regex(r'^by (.+) and \d+ others?$', owner_text, 'uploader', default=owner_text), + 'channel_id': self.ucid_or_none(browse_ep.get('browseId')), + 'uploader_id': self.handle_from_url(urljoin('https://www.youtube.com', browse_ep.get('canonicalBaseUrl'))) + }) + + info.update({ + 'uploader': info['channel'], + 'channel_url': format_field(info.get('channel_id'), None, 'https://www.youtube.com/channel/%s', default=None), + 'uploader_url': format_field(info.get('uploader_id'), None, 'https://www.youtube.com/%s', default=None), + }) + + return info def _extract_inline_playlist(self, playlist, playlist_id, data, ytcfg): first_id = last_id = response = None @@ -4424,31 +5066,40 @@ def _extract_availability(self, data): Note: Unless YouTube tells us explicitly, we do not assume it is public @param data: response """ - is_private = is_unlisted = None - renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {} - badge_labels = self._extract_badges(renderer) + sidebar_renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {} + playlist_header_renderer = traverse_obj(data, ('header', 'playlistHeaderRenderer')) or {} + player_header_privacy = playlist_header_renderer.get('privacy') + + badges = self._extract_badges(sidebar_renderer) # Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge - privacy_dropdown_entries = try_get( - renderer, lambda x: x['privacyForm']['dropdownFormFieldRenderer']['dropdown']['dropdownRenderer']['entries'], list) or [] - for renderer_dict in privacy_dropdown_entries: - is_selected = try_get( - renderer_dict, lambda x: x['privacyDropdownItemRenderer']['isSelected'], bool) or False - if not is_selected: - continue - label = self._get_text(renderer_dict, ('privacyDropdownItemRenderer', 'label')) - if label: - badge_labels.add(label.lower()) - break + privacy_setting_icon = get_first( + (playlist_header_renderer, sidebar_renderer), + ('privacyForm', 'dropdownFormFieldRenderer', 'dropdown', 'dropdownRenderer', 'entries', + lambda _, v: v['privacyDropdownItemRenderer']['isSelected'], 'privacyDropdownItemRenderer', 'icon', 'iconType'), + expected_type=str) - for badge_label in badge_labels: - if badge_label == 'unlisted': - is_unlisted = True - elif badge_label == 'private': - is_private = True - elif badge_label == 'public': - is_unlisted = is_private = False - return self._availability(is_private, False, False, False, is_unlisted) + microformats_is_unlisted = traverse_obj( + data, ('microformat', 'microformatDataRenderer', 'unlisted'), expected_type=bool) + + return ( + 'public' if ( + self._has_badge(badges, BadgeType.AVAILABILITY_PUBLIC) + or player_header_privacy == 'PUBLIC' + or privacy_setting_icon == 'PRIVACY_PUBLIC') + else self._availability( + is_private=( + self._has_badge(badges, BadgeType.AVAILABILITY_PRIVATE) + or player_header_privacy == 'PRIVATE' if player_header_privacy is not None + else privacy_setting_icon == 'PRIVACY_PRIVATE' if privacy_setting_icon is not None else None), + is_unlisted=( + self._has_badge(badges, BadgeType.AVAILABILITY_UNLISTED) + or player_header_privacy == 'UNLISTED' if player_header_privacy is not None + else privacy_setting_icon == 'PRIVACY_UNLISTED' if privacy_setting_icon is not None + else microformats_is_unlisted if microformats_is_unlisted is not None else None), + needs_subscription=self._has_badge(badges, BadgeType.AVAILABILITY_SUBSCRIPTION) or None, + needs_premium=self._has_badge(badges, BadgeType.AVAILABILITY_PREMIUM) or None, + needs_auth=False)) @staticmethod def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict): @@ -4461,39 +5112,23 @@ def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict): def _reload_with_unavailable_videos(self, item_id, data, ytcfg): """ - Get playlist with unavailable videos if the 'show unavailable videos' button exists. + Reload playlists with unavailable videos (e.g. private videos, region blocked, etc.) """ - browse_id = params = None - renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') - if not renderer: + is_playlist = bool(traverse_obj( + data, ('metadata', 'playlistMetadataRenderer'), ('header', 'playlistHeaderRenderer'))) + if not is_playlist: return - menu_renderer = try_get( - renderer, lambda x: x['menu']['menuRenderer']['items'], list) or [] - for menu_item in menu_renderer: - if not isinstance(menu_item, dict): - continue - nav_item_renderer = menu_item.get('menuNavigationItemRenderer') - text = try_get( - nav_item_renderer, lambda x: x['text']['simpleText'], str) - if not text or text.lower() != 'show unavailable videos': - continue - browse_endpoint = try_get( - nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {} - browse_id = browse_endpoint.get('browseId') - params = browse_endpoint.get('params') - break - headers = self.generate_api_headers( ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data), visitor_data=self._extract_visitor_data(data, ytcfg)) query = { - 'params': params or 'wgYCCAA=', - 'browseId': browse_id or 'VL%s' % item_id + 'params': 'wgYCCAA=', + 'browseId': f'VL{item_id}' } return self._extract_response( item_id=item_id, headers=headers, query=query, check_get_keys='contents', fatal=False, ytcfg=ytcfg, - note='Downloading API JSON with unavailable videos') + note='Redownloading playlist API JSON with unavailable videos') @functools.cached_property def skip_webpage(self): @@ -4545,8 +5180,7 @@ def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=Fals webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal) ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage) # Reject webpage data if redirected to home page without explicitly requesting - selected_tab = self._extract_selected_tab(traverse_obj( - data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list, default=[]), fatal=False) or {} + selected_tab = self._extract_selected_tab(self._extract_tab_renderers(data), fatal=False) or {} if (url != 'https://www.youtube.com/feed/recommended' and selected_tab.get('tabIdentifier') == 'FEwhat_to_watch' # Home page and 'no-youtube-channel-redirect' not in self.get_param('compat_opts', [])): @@ -4617,7 +5251,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): IE_DESC = 'YouTube Tabs' _VALID_URL = r'''(?x: https?:// - (?:\w+\.)? + (?!consent\.)(?:\w+\.)? (?: youtube(?:kids)?\.com| %(invidious)s @@ -4646,12 +5280,12 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'title': 'Igor Kleiner - Playlists', 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2', 'uploader': 'Igor Kleiner', - 'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg', + 'uploader_id': '@IgorDataScience', + 'uploader_url': 'https://www.youtube.com/@IgorDataScience', 'channel': 'Igor Kleiner', 'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg', 'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'], 'channel_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg', - 'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg', 'channel_follower_count': int }, }, { @@ -4662,9 +5296,9 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'id': 'UCqj7Cz7revf5maW9g5pgNcg', 'title': 'Igor Kleiner - Playlists', 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2', - 'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg', 'uploader': 'Igor Kleiner', - 'uploader_url': 'https://www.youtube.com/channel/UCqj7Cz7revf5maW9g5pgNcg', + 'uploader_id': '@IgorDataScience', + 'uploader_url': 'https://www.youtube.com/@IgorDataScience', 'tags': ['"критическое', 'мышление"', '"наука', 'просто"', 'математика', '"анализ', 'данных"'], 'channel_id': 'UCqj7Cz7revf5maW9g5pgNcg', 'channel': 'Igor Kleiner', @@ -4679,12 +5313,12 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'id': 'UCYO_jab_esuFRV4b17AJtAw', 'title': '3Blue1Brown - Playlists', 'description': 'md5:e1384e8a133307dd10edee76e875d62f', - 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw', - 'uploader': '3Blue1Brown', 'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw', - 'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw', 'channel': '3Blue1Brown', 'channel_id': 'UCYO_jab_esuFRV4b17AJtAw', + 'uploader_id': '@3blue1brown', + 'uploader_url': 'https://www.youtube.com/@3blue1brown', + 'uploader': '3Blue1Brown', 'tags': ['Mathematics'], 'channel_follower_count': int }, @@ -4697,10 +5331,10 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'title': 'ThirstForScience - Playlists', 'description': 'md5:609399d937ea957b0f53cbffb747a14c', 'uploader': 'ThirstForScience', - 'uploader_id': 'UCAEtajcuhQ6an9WEzY9LEMQ', - 'uploader_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ', - 'channel_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ', + 'uploader_url': 'https://www.youtube.com/@ThirstForScience', + 'uploader_id': '@ThirstForScience', 'channel_id': 'UCAEtajcuhQ6an9WEzY9LEMQ', + 'channel_url': 'https://www.youtube.com/channel/UCAEtajcuhQ6an9WEzY9LEMQ', 'tags': 'count:13', 'channel': 'ThirstForScience', 'channel_follower_count': int @@ -4712,8 +5346,6 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'note': 'basic, single video playlist', 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc', 'info_dict': { - 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA', - 'uploader': 'Sergey M.', 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc', 'title': 'youtube-dl public playlist', 'description': '', @@ -4722,16 +5354,17 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'modified_date': '20201130', 'channel': 'Sergey M.', 'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA', - 'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA', 'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA', + 'availability': 'public', + 'uploader': 'Sergey M.', + 'uploader_url': 'https://www.youtube.com/@sergeym.6173', + 'uploader_id': '@sergeym.6173', }, 'playlist_count': 1, }, { 'note': 'empty playlist', 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf', 'info_dict': { - 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA', - 'uploader': 'Sergey M.', 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf', 'title': 'youtube-dl empty playlist', 'tags': [], @@ -4740,7 +5373,10 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'modified_date': '20160902', 'channel_id': 'UCmlqkdCBesrv2Lak1mF_MxA', 'channel_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA', - 'uploader_url': 'https://www.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA', + 'availability': 'public', + 'uploader_url': 'https://www.youtube.com/@sergeym.6173', + 'uploader_id': '@sergeym.6173', + 'uploader': 'Sergey M.', }, 'playlist_count': 0, }, { @@ -4751,10 +5387,10 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'title': 'lex will - Home', 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488', 'uploader': 'lex will', - 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', + 'uploader_id': '@lexwill718', 'channel': 'lex will', 'tags': ['bible', 'history', 'prophesy'], - 'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', + 'uploader_url': 'https://www.youtube.com/@lexwill718', 'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', 'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', 'channel_follower_count': int @@ -4768,11 +5404,11 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'title': 'lex will - Videos', 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488', 'uploader': 'lex will', - 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', + 'uploader_id': '@lexwill718', 'tags': ['bible', 'history', 'prophesy'], 'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', 'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', - 'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', + 'uploader_url': 'https://www.youtube.com/@lexwill718', 'channel': 'lex will', 'channel_follower_count': int }, @@ -4785,9 +5421,9 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'title': 'lex will - Videos', 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488', 'uploader': 'lex will', - 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', + 'uploader_id': '@lexwill718', 'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', - 'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', + 'uploader_url': 'https://www.youtube.com/@lexwill718', 'channel': 'lex will', 'tags': ['bible', 'history', 'prophesy'], 'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', @@ -4802,8 +5438,8 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'title': 'lex will - Playlists', 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488', 'uploader': 'lex will', - 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', - 'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', + 'uploader_id': '@lexwill718', + 'uploader_url': 'https://www.youtube.com/@lexwill718', 'channel': 'lex will', 'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', 'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', @@ -4818,14 +5454,14 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w', 'title': 'lex will - Community', 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488', - 'uploader': 'lex will', - 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', - 'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', 'channel': 'lex will', 'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', 'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', 'tags': ['bible', 'history', 'prophesy'], - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader_url': 'https://www.youtube.com/@lexwill718', + 'uploader_id': '@lexwill718', + 'uploader': 'lex will', }, 'playlist_mincount': 18, }, { @@ -4835,14 +5471,14 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w', 'title': 'lex will - Channels', 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488', - 'uploader': 'lex will', - 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', - 'uploader_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', 'channel': 'lex will', 'channel_url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w', 'channel_id': 'UCKfVa3S1e4PHvxWcwyMMg8w', 'tags': ['bible', 'history', 'prophesy'], - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader_url': 'https://www.youtube.com/@lexwill718', + 'uploader_id': '@lexwill718', + 'uploader': 'lex will', }, 'playlist_mincount': 12, }, { @@ -4853,14 +5489,14 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'id': 'UCYO_jab_esuFRV4b17AJtAw', 'title': '3Blue1Brown - Search - linear algebra', 'description': 'md5:e1384e8a133307dd10edee76e875d62f', - 'uploader': '3Blue1Brown', - 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw', 'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw', - 'uploader_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw', 'tags': ['Mathematics'], 'channel': '3Blue1Brown', 'channel_id': 'UCYO_jab_esuFRV4b17AJtAw', - 'channel_follower_count': int + 'channel_follower_count': int, + 'uploader_url': 'https://www.youtube.com/@3blue1brown', + 'uploader_id': '@3blue1brown', + 'uploader': '3Blue1Brown', }, }, { 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA', @@ -4877,16 +5513,17 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'info_dict': { 'title': '29C3: Not my department', 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC', - 'uploader': 'Christiaan008', - 'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg', 'description': 'md5:a14dc1a8ef8307a9807fe136a0660268', 'tags': [], - 'uploader_url': 'https://www.youtube.com/c/ChRiStIaAn008', 'view_count': int, 'modified_date': '20150605', 'channel_id': 'UCEPzS1rYsrkqzSLNp76nrcg', - 'channel_url': 'https://www.youtube.com/c/ChRiStIaAn008', + 'channel_url': 'https://www.youtube.com/channel/UCEPzS1rYsrkqzSLNp76nrcg', 'channel': 'Christiaan008', + 'availability': 'public', + 'uploader_id': '@ChRiStIaAn008', + 'uploader': 'Christiaan008', + 'uploader_url': 'https://www.youtube.com/@ChRiStIaAn008', }, 'playlist_count': 96, }, { @@ -4895,16 +5532,17 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'info_dict': { 'title': 'Uploads from Cauchemar', 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q', - 'uploader': 'Cauchemar', - 'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q', - 'channel_url': 'https://www.youtube.com/c/Cauchemar89', + 'channel_url': 'https://www.youtube.com/channel/UCBABnxM4Ar9ten8Mdjj1j0Q', 'tags': [], 'modified_date': r're:\d{8}', 'channel': 'Cauchemar', - 'uploader_url': 'https://www.youtube.com/c/Cauchemar89', 'view_count': int, 'description': '', 'channel_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q', + 'availability': 'public', + 'uploader_id': '@Cauchemar89', + 'uploader': 'Cauchemar', + 'uploader_url': 'https://www.youtube.com/@Cauchemar89', }, 'playlist_mincount': 1123, 'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'], @@ -4918,16 +5556,17 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'info_dict': { 'title': 'Uploads from Interstellar Movie', 'id': 'UUXw-G3eDE9trcvY2sBMM_aA', - 'uploader': 'Interstellar Movie', - 'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA', - 'uploader_url': 'https://www.youtube.com/c/InterstellarMovie', 'tags': [], 'view_count': int, 'channel_id': 'UCXw-G3eDE9trcvY2sBMM_aA', - 'channel_url': 'https://www.youtube.com/c/InterstellarMovie', + 'channel_url': 'https://www.youtube.com/channel/UCXw-G3eDE9trcvY2sBMM_aA', 'channel': 'Interstellar Movie', 'description': '', 'modified_date': r're:\d{8}', + 'availability': 'public', + 'uploader_id': '@InterstellarMovie', + 'uploader': 'Interstellar Movie', + 'uploader_url': 'https://www.youtube.com/@InterstellarMovie', }, 'playlist_mincount': 21, }, { @@ -4936,16 +5575,17 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'info_dict': { 'title': 'Uploads from Phim Siêu Nhân Nhật Bản', 'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q', - 'uploader': 'Phim Siêu Nhân Nhật Bản', - 'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q', 'view_count': int, 'channel': 'Phim Siêu Nhân Nhật Bản', 'tags': [], - 'uploader_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q', 'description': '', 'channel_url': 'https://www.youtube.com/channel/UCTYLiWFZy8xtPwxFwX9rV7Q', 'channel_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q', 'modified_date': r're:\d{8}', + 'availability': 'public', + 'uploader_url': 'https://www.youtube.com/@phimsieunhannhatban', + 'uploader_id': '@phimsieunhannhatban', + 'uploader': 'Phim Siêu Nhân Nhật Bản', }, 'playlist_mincount': 200, 'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'], @@ -4955,16 +5595,17 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'info_dict': { 'title': 'Uploads from BlankTV', 'id': 'UU8l9frL61Yl5KFOl87nIm2w', - 'uploader': 'BlankTV', - 'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w', 'channel': 'BlankTV', - 'channel_url': 'https://www.youtube.com/c/blanktv', + 'channel_url': 'https://www.youtube.com/channel/UC8l9frL61Yl5KFOl87nIm2w', 'channel_id': 'UC8l9frL61Yl5KFOl87nIm2w', 'view_count': int, 'tags': [], - 'uploader_url': 'https://www.youtube.com/c/blanktv', 'modified_date': r're:\d{8}', 'description': '', + 'availability': 'public', + 'uploader_id': '@blanktv', + 'uploader': 'BlankTV', + 'uploader_url': 'https://www.youtube.com/@blanktv', }, 'playlist_mincount': 1000, 'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'], @@ -4974,15 +5615,17 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'info_dict': { 'title': 'Data Analysis with Dr Mike Pound', 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba', - 'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA', - 'uploader': 'Computerphile', 'description': 'md5:7f567c574d13d3f8c0954d9ffee4e487', - 'uploader_url': 'https://www.youtube.com/user/Computerphile', 'tags': [], 'view_count': int, 'channel_id': 'UC9-y-6csu5WGm29I7JiwpnA', - 'channel_url': 'https://www.youtube.com/user/Computerphile', + 'channel_url': 'https://www.youtube.com/channel/UC9-y-6csu5WGm29I7JiwpnA', 'channel': 'Computerphile', + 'availability': 'public', + 'modified_date': '20190712', + 'uploader_id': '@Computerphile', + 'uploader': 'Computerphile', + 'uploader_url': 'https://www.youtube.com/@Computerphile', }, 'playlist_mincount': 11, }, { @@ -4995,9 +5638,6 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'id': 'FqZTN594JQw', 'ext': 'webm', 'title': "Smiley's People 01 detective, Adventure Series, Action", - 'uploader': 'STREEM', - 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng', 'upload_date': '20150526', 'license': 'Standard YouTube License', 'description': 'md5:507cdcb5a49ac0da37a920ece610be80', @@ -5020,29 +5660,30 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): }, { 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live', 'info_dict': { - 'id': 'Wq15eF5vCbI', # This will keep changing + 'id': 'AlTsmyW4auo', # This will keep changing 'ext': 'mp4', 'title': str, - 'uploader': 'Sky News', - 'uploader_id': 'skynews', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews', 'upload_date': r're:\d{8}', 'description': str, 'categories': ['News & Politics'], 'tags': list, 'like_count': int, - 'release_timestamp': 1642502819, + 'release_timestamp': int, 'channel': 'Sky News', 'channel_id': 'UCoMdktPbSTixAyNGwb-UYkQ', 'age_limit': 0, 'view_count': int, - 'thumbnail': 'https://i.ytimg.com/vi/GgL890LIznQ/maxresdefault_live.jpg', + 'thumbnail': r're:https?://i\.ytimg\.com/vi/[^/]+/maxresdefault(?:_live)?\.jpg', 'playable_in_embed': True, - 'release_date': '20220118', + 'release_date': r're:\d+', 'availability': 'public', 'live_status': 'is_live', 'channel_url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ', - 'channel_follower_count': int + 'channel_follower_count': int, + 'concurrent_view_count': int, + 'uploader_url': 'https://www.youtube.com/@SkyNews', + 'uploader_id': '@SkyNews', + 'uploader': 'Sky News', }, 'params': { 'skip_download': True, @@ -5054,9 +5695,6 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'id': 'a48o2S1cPoo', 'ext': 'mp4', 'title': 'The Young Turks - Live Main Show', - 'uploader': 'The Young Turks', - 'uploader_id': 'TheYoungTurks', - 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks', 'upload_date': '20150715', 'license': 'Standard YouTube License', 'description': 'md5:438179573adcdff3c97ebb1ee632b891', @@ -5120,7 +5758,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'title': '#cctv9', 'tags': [], }, - 'playlist_mincount': 350, + 'playlist_mincount': 300, # not consistent but should be over 300 }, { 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU', 'only_matching': True, @@ -5137,54 +5775,49 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'url': 'https://music.youtube.com/browse/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq', 'info_dict': { 'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq', - 'uploader': 'NoCopyrightSounds', 'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!', - 'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', 'title': 'NCS : All Releases 💿', - 'uploader_url': 'https://www.youtube.com/c/NoCopyrightSounds', - 'channel_url': 'https://www.youtube.com/c/NoCopyrightSounds', + 'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg', 'modified_date': r're:\d{8}', 'view_count': int, 'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', 'tags': [], 'channel': 'NoCopyrightSounds', + 'availability': 'public', + 'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds', + 'uploader': 'NoCopyrightSounds', + 'uploader_id': '@NoCopyrightSounds', }, 'playlist_mincount': 166, - 'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'], + 'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden', 'YouTube Music is not directly supported'], }, { + # TODO: fix 'unviewable' issue with this playlist when reloading with unavailable videos 'note': 'Topic, should redirect to playlist?list=UU...', 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw', 'info_dict': { 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw', - 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw', 'title': 'Uploads from Royalty Free Music - Topic', - 'uploader': 'Royalty Free Music - Topic', 'tags': [], 'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw', 'channel': 'Royalty Free Music - Topic', 'view_count': int, 'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw', - 'channel_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw', 'modified_date': r're:\d{8}', - 'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw', 'description': '', + 'availability': 'public', + 'uploader': 'Royalty Free Music - Topic', }, - 'expected_warnings': [ - 'The URL does not have a videos tab', - r'[Uu]navailable videos (are|will be) hidden', - ], 'playlist_mincount': 101, + 'expected_warnings': ['YouTube Music is not directly supported', r'[Uu]navailable videos (are|will be) hidden'], }, { - 'note': 'Topic without a UU playlist', + # Destination channel with only a hidden self tab (tab id is UCtFRv9O2AHqOZjjynzrv-xg) + # Treat as a general feed 'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg', 'info_dict': { 'id': 'UCtFRv9O2AHqOZjjynzrv-xg', 'title': 'UCtFRv9O2AHqOZjjynzrv-xg', 'tags': [], }, - 'expected_warnings': [ - 'the playlist redirect gave error', - ], 'playlist_mincount': 9, }, { 'note': 'Youtube music Album', @@ -5199,12 +5832,11 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'modified_date': r're:\d{8}', }, 'playlist_count': 50, + 'expected_warnings': ['YouTube Music is not directly supported'], }, { 'note': 'unlisted single video playlist', 'url': 'https://www.youtube.com/playlist?list=PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf', 'info_dict': { - 'uploader_id': 'UC9zHu_mHU96r19o-wV5Qs1Q', - 'uploader': 'colethedj', 'id': 'PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf', 'title': 'yt-dlp unlisted playlist test', 'availability': 'unlisted', @@ -5213,9 +5845,11 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'channel': 'colethedj', 'view_count': int, 'description': '', - 'uploader_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q', 'channel_id': 'UC9zHu_mHU96r19o-wV5Qs1Q', 'channel_url': 'https://www.youtube.com/channel/UC9zHu_mHU96r19o-wV5Qs1Q', + 'uploader_url': 'https://www.youtube.com/@colethedj1894', + 'uploader_id': '@colethedj1894', + 'uploader': 'colethedj', }, 'playlist_count': 1, }, { @@ -5238,13 +5872,10 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'id': 'UCu6mSoMNzHQiBIOCkHUa2Aw', 'title': 'Cody\'sLab - Videos', 'description': 'md5:d083b7c2f0c67ee7a6c74c3e9b4243fa', - 'uploader': 'Cody\'sLab', - 'uploader_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw', 'channel': 'Cody\'sLab', 'channel_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw', 'tags': [], 'channel_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw', - 'uploader_url': 'https://www.youtube.com/channel/UCu6mSoMNzHQiBIOCkHUa2Aw', 'channel_follower_count': int }, 'playlist_mincount': 650, @@ -5252,14 +5883,13 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'skip_download': True, 'extractor_args': {'youtubetab': {'skip': ['webpage']}} }, + 'skip': 'Query for sorting no longer works', }, { 'note': 'API Fallback: Topic, should redirect to playlist?list=UU...', 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw', 'info_dict': { 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw', - 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw', 'title': 'Uploads from Royalty Free Music - Topic', - 'uploader': 'Royalty Free Music - Topic', 'modified_date': r're:\d{8}', 'channel_id': 'UC9ALqqC4aIeG5iDs7i90Bfw', 'description': '', @@ -5267,17 +5897,15 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'tags': [], 'channel': 'Royalty Free Music - Topic', 'view_count': int, - 'uploader_url': 'https://www.youtube.com/channel/UC9ALqqC4aIeG5iDs7i90Bfw', + 'availability': 'public', + 'uploader': 'Royalty Free Music - Topic', }, - 'expected_warnings': [ - 'does not have a videos tab', - r'[Uu]navailable videos (are|will be) hidden', - ], 'playlist_mincount': 101, 'params': { 'skip_download': True, 'extractor_args': {'youtubetab': {'skip': ['webpage']}} }, + 'expected_warnings': ['YouTube Music is not directly supported', r'[Uu]navailable videos (are|will be) hidden'], }, { 'note': 'non-standard redirect to regional channel', 'url': 'https://www.youtube.com/channel/UCwVVpHQ2Cs9iGJfpdFngePQ', @@ -5290,24 +5918,366 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'modified_date': '20220407', 'channel_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q', 'tags': [], - 'uploader_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q', - 'uploader': 'pukkandan', 'availability': 'unlisted', 'channel_id': 'UCKcqXmCcyqnhgpA5P0oHH_Q', 'channel': 'pukkandan', 'description': 'Test for collaborative playlist', 'title': 'yt-dlp test - collaborative playlist', 'view_count': int, - 'uploader_url': 'https://www.youtube.com/channel/UCKcqXmCcyqnhgpA5P0oHH_Q', + 'uploader_url': 'https://www.youtube.com/@pukkandan', + 'uploader_id': '@pukkandan', + 'uploader': 'pukkandan', }, 'playlist_mincount': 2 + }, { + 'note': 'translated tab name', + 'url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA/playlists', + 'info_dict': { + 'id': 'UCiu-3thuViMebBjw_5nWYrA', + 'tags': [], + 'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', + 'description': 'test description', + 'title': 'cole-dlp-test-acc - 再生リスト', + 'channel_id': 'UCiu-3thuViMebBjw_5nWYrA', + 'channel': 'cole-dlp-test-acc', + 'uploader_url': 'https://www.youtube.com/@coletdjnz', + 'uploader_id': '@coletdjnz', + 'uploader': 'cole-dlp-test-acc', + }, + 'playlist_mincount': 1, + 'params': {'extractor_args': {'youtube': {'lang': ['ja']}}}, + 'expected_warnings': ['Preferring "ja"'], + }, { + # XXX: this should really check flat playlist entries, but the test suite doesn't support that + 'note': 'preferred lang set with playlist with translated video titles', + 'url': 'https://www.youtube.com/playlist?list=PLt5yu3-wZAlQAaPZ5Z-rJoTdbT-45Q7c0', + 'info_dict': { + 'id': 'PLt5yu3-wZAlQAaPZ5Z-rJoTdbT-45Q7c0', + 'tags': [], + 'view_count': int, + 'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', + 'channel': 'cole-dlp-test-acc', + 'channel_id': 'UCiu-3thuViMebBjw_5nWYrA', + 'description': 'test', + 'title': 'dlp test playlist', + 'availability': 'public', + 'uploader_url': 'https://www.youtube.com/@coletdjnz', + 'uploader_id': '@coletdjnz', + 'uploader': 'cole-dlp-test-acc', + }, + 'playlist_mincount': 1, + 'params': {'extractor_args': {'youtube': {'lang': ['ja']}}}, + 'expected_warnings': ['Preferring "ja"'], + }, { + # shorts audio pivot for 2GtVksBMYFM. + 'url': 'https://www.youtube.com/feed/sfv_audio_pivot?bp=8gUrCikSJwoLMkd0VmtzQk1ZRk0SCzJHdFZrc0JNWUZNGgsyR3RWa3NCTVlGTQ==', + 'info_dict': { + 'id': 'sfv_audio_pivot', + 'title': 'sfv_audio_pivot', + 'tags': [], + }, + 'playlist_mincount': 50, + + }, { + # Channel with a real live tab (not to be mistaken with streams tab) + # Do not treat like it should redirect to live stream + 'url': 'https://www.youtube.com/channel/UCEH7P7kyJIkS_gJf93VYbmg/live', + 'info_dict': { + 'id': 'UCEH7P7kyJIkS_gJf93VYbmg', + 'title': 'UCEH7P7kyJIkS_gJf93VYbmg - Live', + 'tags': [], + }, + 'playlist_mincount': 20, + }, { + # Tab name is not the same as tab id + 'url': 'https://www.youtube.com/channel/UCQvWX73GQygcwXOTSf_VDVg/letsplay', + 'info_dict': { + 'id': 'UCQvWX73GQygcwXOTSf_VDVg', + 'title': 'UCQvWX73GQygcwXOTSf_VDVg - Let\'s play', + 'tags': [], + }, + 'playlist_mincount': 8, + }, { + # Home tab id is literally home. Not to get mistaken with featured + 'url': 'https://www.youtube.com/channel/UCQvWX73GQygcwXOTSf_VDVg/home', + 'info_dict': { + 'id': 'UCQvWX73GQygcwXOTSf_VDVg', + 'title': 'UCQvWX73GQygcwXOTSf_VDVg - Home', + 'tags': [], + }, + 'playlist_mincount': 8, + }, { + # Should get three playlists for videos, shorts and streams tabs + 'url': 'https://www.youtube.com/channel/UCK9V2B22uJYu3N7eR_BT9QA', + 'info_dict': { + 'id': 'UCK9V2B22uJYu3N7eR_BT9QA', + 'title': 'Polka Ch. 尾丸ポルカ', + 'channel_follower_count': int, + 'channel_id': 'UCK9V2B22uJYu3N7eR_BT9QA', + 'channel_url': 'https://www.youtube.com/channel/UCK9V2B22uJYu3N7eR_BT9QA', + 'description': 'md5:e56b74b5bb7e9c701522162e9abfb822', + 'channel': 'Polka Ch. 尾丸ポルカ', + 'tags': 'count:35', + 'uploader_url': 'https://www.youtube.com/@OmaruPolka', + 'uploader': 'Polka Ch. 尾丸ポルカ', + 'uploader_id': '@OmaruPolka', + }, + 'playlist_count': 3, + }, { + # Shorts tab with channel with handle + # TODO: fix channel description + 'url': 'https://www.youtube.com/@NotJustBikes/shorts', + 'info_dict': { + 'id': 'UC0intLFzLaudFG-xAvUEO-A', + 'title': 'Not Just Bikes - Shorts', + 'tags': 'count:12', + 'channel_url': 'https://www.youtube.com/channel/UC0intLFzLaudFG-xAvUEO-A', + 'description': 'md5:26bc55af26855a608a5cf89dfa595c8d', + 'channel_follower_count': int, + 'channel_id': 'UC0intLFzLaudFG-xAvUEO-A', + 'channel': 'Not Just Bikes', + 'uploader_url': 'https://www.youtube.com/@NotJustBikes', + 'uploader': 'Not Just Bikes', + 'uploader_id': '@NotJustBikes', + }, + 'playlist_mincount': 10, + }, { + # Streams tab + 'url': 'https://www.youtube.com/channel/UC3eYAvjCVwNHgkaGbXX3sig/streams', + 'info_dict': { + 'id': 'UC3eYAvjCVwNHgkaGbXX3sig', + 'title': '中村悠一 - Live', + 'tags': 'count:7', + 'channel_id': 'UC3eYAvjCVwNHgkaGbXX3sig', + 'channel_url': 'https://www.youtube.com/channel/UC3eYAvjCVwNHgkaGbXX3sig', + 'channel': '中村悠一', + 'channel_follower_count': int, + 'description': 'md5:e744f6c93dafa7a03c0c6deecb157300', + 'uploader_url': 'https://www.youtube.com/@Yuichi-Nakamura', + 'uploader_id': '@Yuichi-Nakamura', + 'uploader': '中村悠一', + }, + 'playlist_mincount': 60, + }, { + # Channel with no uploads and hence no videos, streams, shorts tabs or uploads playlist. This should fail. + # See test_youtube_lists + 'url': 'https://www.youtube.com/channel/UC2yXPzFejc422buOIzn_0CA', + 'only_matching': True, + }, { + # No uploads and no UCID given. Should fail with no uploads error + # See test_youtube_lists + 'url': 'https://www.youtube.com/news', + 'only_matching': True + }, { + # No videos tab but has a shorts tab + 'url': 'https://www.youtube.com/c/TKFShorts', + 'info_dict': { + 'id': 'UCgJ5_1F6yJhYLnyMszUdmUg', + 'title': 'Shorts Break - Shorts', + 'tags': 'count:48', + 'channel_id': 'UCgJ5_1F6yJhYLnyMszUdmUg', + 'channel': 'Shorts Break', + 'description': 'md5:6de33c5e7ba686e5f3efd4e19c7ef499', + 'channel_follower_count': int, + 'channel_url': 'https://www.youtube.com/channel/UCgJ5_1F6yJhYLnyMszUdmUg', + 'uploader_url': 'https://www.youtube.com/@ShortsBreak_Official', + 'uploader': 'Shorts Break', + 'uploader_id': '@ShortsBreak_Official', + }, + 'playlist_mincount': 30, + }, { + # Trending Now Tab. tab id is empty + 'url': 'https://www.youtube.com/feed/trending', + 'info_dict': { + 'id': 'trending', + 'title': 'trending - Now', + 'tags': [], + }, + 'playlist_mincount': 30, + }, { + # Trending Gaming Tab. tab id is empty + 'url': 'https://www.youtube.com/feed/trending?bp=4gIcGhpnYW1pbmdfY29ycHVzX21vc3RfcG9wdWxhcg%3D%3D', + 'info_dict': { + 'id': 'trending', + 'title': 'trending - Gaming', + 'tags': [], + }, + 'playlist_mincount': 30, + }, { + # Shorts url result in shorts tab + # TODO: Fix channel id extraction + 'url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA/shorts', + 'info_dict': { + 'id': 'UCiu-3thuViMebBjw_5nWYrA', + 'title': 'cole-dlp-test-acc - Shorts', + 'channel': 'cole-dlp-test-acc', + 'description': 'test description', + 'channel_id': 'UCiu-3thuViMebBjw_5nWYrA', + 'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', + 'tags': [], + 'uploader_url': 'https://www.youtube.com/@coletdjnz', + 'uploader_id': '@coletdjnz', + 'uploader': 'cole-dlp-test-acc', + }, + 'playlist': [{ + 'info_dict': { + # Channel data is not currently available for short renderers (as of 2023-03-01) + '_type': 'url', + 'ie_key': 'Youtube', + 'url': 'https://www.youtube.com/shorts/sSM9J5YH_60', + 'id': 'sSM9J5YH_60', + 'title': 'SHORT short', + 'view_count': int, + 'thumbnails': list, + } + }], + 'params': {'extract_flat': True}, + }, { + # Live video status should be extracted + 'url': 'https://www.youtube.com/channel/UCQvWX73GQygcwXOTSf_VDVg/live', + 'info_dict': { + 'id': 'UCQvWX73GQygcwXOTSf_VDVg', + 'title': 'UCQvWX73GQygcwXOTSf_VDVg - Live', # TODO, should be Minecraft - Live or Minecraft - Topic - Live + 'tags': [] + }, + 'playlist': [{ + 'info_dict': { + '_type': 'url', + 'ie_key': 'Youtube', + 'url': 'startswith:https://www.youtube.com/watch?v=', + 'id': str, + 'title': str, + 'live_status': 'is_live', + 'channel_id': str, + 'channel_url': str, + 'concurrent_view_count': int, + 'channel': str, + } + }], + 'params': {'extract_flat': True, 'playlist_items': '1'}, + 'playlist_mincount': 1 + }, { + # Channel renderer metadata. Contains number of videos on the channel + 'url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA/channels', + 'info_dict': { + 'id': 'UCiu-3thuViMebBjw_5nWYrA', + 'title': 'cole-dlp-test-acc - Channels', + 'channel': 'cole-dlp-test-acc', + 'description': 'test description', + 'channel_id': 'UCiu-3thuViMebBjw_5nWYrA', + 'channel_url': 'https://www.youtube.com/channel/UCiu-3thuViMebBjw_5nWYrA', + 'tags': [], + 'uploader_url': 'https://www.youtube.com/@coletdjnz', + 'uploader_id': '@coletdjnz', + 'uploader': 'cole-dlp-test-acc', + }, + 'playlist': [{ + 'info_dict': { + '_type': 'url', + 'ie_key': 'YoutubeTab', + 'url': 'https://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw', + 'id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw', + 'channel_id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw', + 'title': 'PewDiePie', + 'channel': 'PewDiePie', + 'channel_url': 'https://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw', + 'thumbnails': list, + 'channel_follower_count': int, + 'playlist_count': int, + 'uploader': 'PewDiePie', + 'uploader_url': 'https://www.youtube.com/@PewDiePie', + 'uploader_id': '@PewDiePie', + } + }], + 'params': {'extract_flat': True}, + }, { + 'url': 'https://www.youtube.com/@3blue1brown/about', + 'info_dict': { + 'id': 'UCYO_jab_esuFRV4b17AJtAw', + 'tags': ['Mathematics'], + 'title': '3Blue1Brown - About', + 'channel_follower_count': int, + 'channel_id': 'UCYO_jab_esuFRV4b17AJtAw', + 'channel': '3Blue1Brown', + 'view_count': int, + 'channel_url': 'https://www.youtube.com/channel/UCYO_jab_esuFRV4b17AJtAw', + 'description': 'md5:e1384e8a133307dd10edee76e875d62f', + 'uploader_url': 'https://www.youtube.com/@3blue1brown', + 'uploader_id': '@3blue1brown', + 'uploader': '3Blue1Brown', + }, + 'playlist_count': 0, + }, { + # Podcasts tab, with rich entry playlistRenderers + 'url': 'https://www.youtube.com/@99percentinvisiblepodcast/podcasts', + 'info_dict': { + 'id': 'UCVMF2HD4ZgC0QHpU9Yq5Xrw', + 'channel_id': 'UCVMF2HD4ZgC0QHpU9Yq5Xrw', + 'uploader_url': 'https://www.youtube.com/@99percentinvisiblepodcast', + 'description': 'md5:3a0ed38f1ad42a68ef0428c04a15695c', + 'title': '99 Percent Invisible - Podcasts', + 'uploader': '99 Percent Invisible', + 'channel_follower_count': int, + 'channel_url': 'https://www.youtube.com/channel/UCVMF2HD4ZgC0QHpU9Yq5Xrw', + 'tags': [], + 'channel': '99 Percent Invisible', + 'uploader_id': '@99percentinvisiblepodcast', + }, + 'playlist_count': 1, + }, { + # Releases tab, with rich entry playlistRenderers (same as Podcasts tab) + 'url': 'https://www.youtube.com/@AHimitsu/releases', + 'info_dict': { + 'id': 'UCgFwu-j5-xNJml2FtTrrB3A', + 'channel': 'A Himitsu', + 'uploader_url': 'https://www.youtube.com/@AHimitsu', + 'title': 'A Himitsu - Releases', + 'uploader_id': '@AHimitsu', + 'uploader': 'A Himitsu', + 'channel_id': 'UCgFwu-j5-xNJml2FtTrrB3A', + 'tags': 'count:16', + 'description': 'I make music', + 'channel_url': 'https://www.youtube.com/channel/UCgFwu-j5-xNJml2FtTrrB3A', + 'channel_follower_count': int, + }, + 'playlist_mincount': 10, }] @classmethod def suitable(cls, url): return False if YoutubeIE.suitable(url) else super().suitable(url) - _URL_RE = re.compile(rf'(?P
{_VALID_URL})(?(not_channel)|(?P/\w+))?(?P.*)$')
+    _URL_RE = re.compile(rf'(?P
{_VALID_URL})(?(not_channel)|(?P/[^?#/]+))?(?P.*)$')
+
+    def _get_url_mobj(self, url):
+        mobj = self._URL_RE.match(url).groupdict()
+        mobj.update((k, '') for k, v in mobj.items() if v is None)
+        return mobj
+
+    def _extract_tab_id_and_name(self, tab, base_url='https://www.youtube.com'):
+        tab_name = (tab.get('title') or '').lower()
+        tab_url = urljoin(base_url, traverse_obj(
+            tab, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url')))
+
+        tab_id = (tab_url and self._get_url_mobj(tab_url)['tab'][1:]
+                  or traverse_obj(tab, 'tabIdentifier', expected_type=str))
+        if tab_id:
+            return {
+                'TAB_ID_SPONSORSHIPS': 'membership',
+            }.get(tab_id, tab_id), tab_name
+
+        # Fallback to tab name if we cannot get the tab id.
+        # XXX: should we strip non-ascii letters? e.g. in case of 'let's play' tab example on special gaming channel
+        # Note that in the case of translated tab name this may result in an empty string, which we don't want.
+        if tab_name:
+            self.write_debug(f'Falling back to selected tab name: {tab_name}')
+        return {
+            'home': 'featured',
+            'live': 'streams',
+        }.get(tab_name, tab_name), tab_name
+
+    def _has_tab(self, tabs, tab_id):
+        return any(self._extract_tab_id_and_name(tab)[0] == tab_id for tab in tabs)
 
     @YoutubeTabBaseInfoExtractor.passthrough_smuggled_data
     def _real_extract(self, url, smuggled_data):
@@ -5316,113 +6286,137 @@ def _real_extract(self, url, smuggled_data):
             urllib.parse.urlparse(url)._replace(netloc='www.youtube.com'))
         compat_opts = self.get_param('compat_opts', [])
 
-        def get_mobj(url):
-            mobj = self._URL_RE.match(url).groupdict()
-            mobj.update((k, '') for k, v in mobj.items() if v is None)
-            return mobj
-
-        mobj, redirect_warning = get_mobj(url), None
-        # Youtube returns incomplete data if tabname is not lower case
-        pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
-        if is_channel:
-            if smuggled_data.get('is_music_url'):
-                if item_id[:2] == 'VL':  # Youtube music VL channels have an equivalent playlist
-                    item_id = item_id[2:]
-                    pre, tab, post, is_channel = f'https://www.youtube.com/playlist?list={item_id}', '', '', False
-                elif item_id[:2] == 'MP':  # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
-                    mdata = self._extract_tab_endpoint(
-                        f'https://music.youtube.com/channel/{item_id}', item_id, default_client='web_music')
-                    murl = traverse_obj(mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'),
-                                        get_all=False, expected_type=str)
-                    if not murl:
-                        raise ExtractorError('Failed to resolve album to playlist')
-                    return self.url_result(murl, ie=YoutubeTabIE.ie_key())
-                elif mobj['channel_type'] == 'browse':  # Youtube music /browse/ should be changed to /channel/
-                    pre = f'https://www.youtube.com/channel/{item_id}'
-
-        original_tab_name = tab
+        mobj = self._get_url_mobj(url)
+        pre, tab, post, is_channel = mobj['pre'], mobj['tab'], mobj['post'], not mobj['not_channel']
+        if is_channel and smuggled_data.get('is_music_url'):
+            if item_id[:2] == 'VL':  # Youtube music VL channels have an equivalent playlist
+                return self.url_result(
+                    f'https://music.youtube.com/playlist?list={item_id[2:]}', YoutubeTabIE, item_id[2:])
+            elif item_id[:2] == 'MP':  # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
+                mdata = self._extract_tab_endpoint(
+                    f'https://music.youtube.com/channel/{item_id}', item_id, default_client='web_music')
+                murl = traverse_obj(mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'),
+                                    get_all=False, expected_type=str)
+                if not murl:
+                    raise ExtractorError('Failed to resolve album to playlist')
+                return self.url_result(murl, YoutubeTabIE)
+            elif mobj['channel_type'] == 'browse':  # Youtube music /browse/ should be changed to /channel/
+                return self.url_result(
+                    f'https://music.youtube.com/channel/{item_id}{tab}{post}', YoutubeTabIE, item_id)
+
+        original_tab_id, display_id = tab[1:], f'{item_id}{tab}'
         if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
-            # Home URLs should redirect to /videos/
-            redirect_warning = ('A channel/user page was given. All the channel\'s videos will be downloaded. '
-                                'To download only the videos in the home page, add a "/featured" to the URL')
-            tab = '/videos'
-
-        url = ''.join((pre, tab, post))
-        mobj = get_mobj(url)
+            url = f'{pre}/videos{post}'
+        if smuggled_data.get('is_music_url'):
+            self.report_warning(f'YouTube Music is not directly supported. Redirecting to {url}')
 
         # Handle both video/playlist URLs
         qs = parse_qs(url)
-        video_id, playlist_id = (qs.get(key, [None])[0] for key in ('v', 'list'))
-
+        video_id, playlist_id = [traverse_obj(qs, (key, 0)) for key in ('v', 'list')]
         if not video_id and mobj['not_channel'].startswith('watch'):
             if not playlist_id:
                 # If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
-                raise ExtractorError('Unable to recognize tab page')
+                raise ExtractorError('A video URL was given without video ID', expected=True)
             # Common mistake: https://www.youtube.com/watch?list=playlist_id
             self.report_warning(f'A video URL was given without video ID. Trying to download playlist {playlist_id}')
-            url = f'https://www.youtube.com/playlist?list={playlist_id}'
-            mobj = get_mobj(url)
+            return self.url_result(
+                f'https://www.youtube.com/playlist?list={playlist_id}', YoutubeTabIE, playlist_id)
 
-        if video_id and playlist_id:
-            if self.get_param('noplaylist'):
-                self.to_screen(f'Downloading just video {video_id} because of --no-playlist')
-                return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
-                                       ie=YoutubeIE.ie_key(), video_id=video_id)
-            self.to_screen(f'Downloading playlist {playlist_id}; add --no-playlist to just download video {video_id}')
+        if not self._yes_playlist(playlist_id, video_id):
+            return self.url_result(
+                f'https://www.youtube.com/watch?v={video_id}', YoutubeIE, video_id)
 
-        data, ytcfg = self._extract_data(url, item_id)
+        data, ytcfg = self._extract_data(url, display_id)
 
         # YouTube may provide a non-standard redirect to the regional channel
         # See: https://github.com/yt-dlp/yt-dlp/issues/2694
+        # https://support.google.com/youtube/answer/2976814#zippy=,conditional-redirects
         redirect_url = traverse_obj(
             data, ('onResponseReceivedActions', ..., 'navigateAction', 'endpoint', 'commandMetadata', 'webCommandMetadata', 'url'), get_all=False)
         if redirect_url and 'no-youtube-channel-redirect' not in compat_opts:
-            redirect_url = ''.join((
-                urljoin('https://www.youtube.com', redirect_url), mobj['tab'], mobj['post']))
-            self.to_screen(f'This playlist is likely not available in your region. Following redirect to regional playlist {redirect_url}')
-            return self.url_result(redirect_url, ie=YoutubeTabIE.ie_key())
+            redirect_url = ''.join((urljoin('https://www.youtube.com', redirect_url), tab, post))
+            self.to_screen(f'This playlist is likely not available in your region. Following conditional redirect to {redirect_url}')
+            return self.url_result(redirect_url, YoutubeTabIE)
 
-        tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
-        if tabs:
+        tabs, extra_tabs = self._extract_tab_renderers(data), []
+        if is_channel and tabs and 'no-youtube-channel-redirect' not in compat_opts:
             selected_tab = self._extract_selected_tab(tabs)
-            selected_tab_name = selected_tab.get('title', '').lower()
-            if selected_tab_name == 'home':
-                selected_tab_name = 'featured'
-            requested_tab_name = mobj['tab'][1:]
-            if 'no-youtube-channel-redirect' not in compat_opts:
-                if requested_tab_name == 'live':  # Live tab should have redirected to the video
-                    raise UserNotLive(video_id=mobj['id'])
-                if requested_tab_name not in ('', selected_tab_name):
-                    redirect_warning = f'The channel does not have a {requested_tab_name} tab'
-                    if not original_tab_name:
-                        if item_id[:2] == 'UC':
-                            # Topic channels don't have /videos. Use the equivalent playlist instead
-                            pl_id = f'UU{item_id[2:]}'
-                            pl_url = f'https://www.youtube.com/playlist?list={pl_id}'
-                            try:
-                                data, ytcfg = self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True, webpage_fatal=True)
-                            except ExtractorError:
-                                redirect_warning += ' and the playlist redirect gave error'
-                            else:
-                                item_id, url, selected_tab_name = pl_id, pl_url, requested_tab_name
-                                redirect_warning += f'. Redirecting to playlist {pl_id} instead'
-                        if selected_tab_name and selected_tab_name != requested_tab_name:
-                            redirect_warning += f'. {selected_tab_name} tab is being downloaded instead'
+            selected_tab_id, selected_tab_name = self._extract_tab_id_and_name(selected_tab, url)  # NB: Name may be translated
+            self.write_debug(f'Selected tab: {selected_tab_id!r} ({selected_tab_name}), Requested tab: {original_tab_id!r}')
+
+            if not original_tab_id and selected_tab_name:
+                self.to_screen('Downloading all uploads of the channel. '
+                               'To download only the videos in a specific tab, pass the tab\'s URL')
+                if self._has_tab(tabs, 'streams'):
+                    extra_tabs.append(''.join((pre, '/streams', post)))
+                if self._has_tab(tabs, 'shorts'):
+                    extra_tabs.append(''.join((pre, '/shorts', post)))
+                # XXX: Members-only tab should also be extracted
+
+                if not extra_tabs and selected_tab_id != 'videos':
+                    # Channel does not have streams, shorts or videos tabs
+                    if item_id[:2] != 'UC':
+                        raise ExtractorError('This channel has no uploads', expected=True)
+
+                    # Topic channels don't have /videos. Use the equivalent playlist instead
+                    pl_id = f'UU{item_id[2:]}'
+                    pl_url = f'https://www.youtube.com/playlist?list={pl_id}'
+                    try:
+                        data, ytcfg = self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True, webpage_fatal=True)
+                    except ExtractorError:
+                        raise ExtractorError('This channel has no uploads', expected=True)
                     else:
-                        raise ExtractorError(redirect_warning, expected=True)
-
-        if redirect_warning:
-            self.to_screen(redirect_warning)
-        self.write_debug(f'Final URL: {url}')
+                        item_id, url = pl_id, pl_url
+                        self.to_screen(
+                            f'The channel does not have a videos, shorts, or live tab. Redirecting to playlist {pl_id} instead')
+
+                elif extra_tabs and selected_tab_id != 'videos':
+                    # When there are shorts/live tabs but not videos tab
+                    url, data = f'{pre}{post}', None
+
+            elif (original_tab_id or 'videos') != selected_tab_id:
+                if original_tab_id == 'live':
+                    # Live tab should have redirected to the video
+                    # Except in the case the channel has an actual live tab
+                    # Example: https://www.youtube.com/channel/UCEH7P7kyJIkS_gJf93VYbmg/live
+                    raise UserNotLive(video_id=item_id)
+                elif selected_tab_name:
+                    raise ExtractorError(f'This channel does not have a {original_tab_id} tab', expected=True)
+
+                # For channels such as https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg
+                url = f'{pre}{post}'
 
         # YouTube sometimes provides a button to reload playlist with unavailable videos.
         if 'no-youtube-unavailable-videos' not in compat_opts:
-            data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
+            data = self._reload_with_unavailable_videos(display_id, data, ytcfg) or data
         self._extract_and_report_alerts(data, only_once=True)
-        tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list)
-        if tabs:
-            return self._extract_from_tabs(item_id, ytcfg, data, tabs)
 
+        tabs, entries = self._extract_tab_renderers(data), []
+        if tabs:
+            entries = [self._extract_from_tabs(item_id, ytcfg, data, tabs)]
+            entries[0].update({
+                'extractor_key': YoutubeTabIE.ie_key(),
+                'extractor': YoutubeTabIE.IE_NAME,
+                'webpage_url': url,
+            })
+        if self.get_param('playlist_items') == '0':
+            entries.extend(self.url_result(u, YoutubeTabIE) for u in extra_tabs)
+        else:  # Users expect to get all `video_id`s even with `--flat-playlist`. So don't return `url_result`
+            entries.extend(map(self._real_extract, extra_tabs))
+
+        if len(entries) == 1:
+            return entries[0]
+        elif entries:
+            metadata = self._extract_metadata_from_tabs(item_id, data)
+            uploads_url = 'the Uploads (UU) playlist URL'
+            if try_get(metadata, lambda x: x['channel_id'].startswith('UC')):
+                uploads_url = f'https://www.youtube.com/playlist?list=UU{metadata["channel_id"][2:]}'
+            self.to_screen(
+                'Downloading as multiple playlists, separated by tabs. '
+                f'To download as a single playlist instead, pass {uploads_url}')
+            return self.playlist_result(entries, item_id, **metadata)
+
+        # Inline playlist
         playlist = traverse_obj(
             data, ('contents', 'twoColumnWatchNextResults', 'playlist', 'playlist'), expected_type=dict)
         if playlist:
@@ -5431,10 +6425,9 @@ def get_mobj(url):
         video_id = traverse_obj(
             data, ('currentVideoEndpoint', 'watchEndpoint', 'videoId'), expected_type=str) or video_id
         if video_id:
-            if mobj['tab'] != '/live':  # live tab is expected to redirect to video
+            if tab != '/live':  # live tab is expected to redirect to video
                 self.report_warning(f'Unable to recognize playlist. Downloading just video {video_id}')
-            return self.url_result(f'https://www.youtube.com/watch?v={video_id}',
-                                   ie=YoutubeIE.ie_key(), video_id=video_id)
+            return self.url_result(f'https://www.youtube.com/watch?v={video_id}', YoutubeIE, video_id)
 
         raise ExtractorError('Unable to recognize tab page')
 
@@ -5464,15 +6457,16 @@ class YoutubePlaylistIE(InfoExtractor):
             'title': '[OLD]Team Fortress 2 (Class-based LP)',
             'id': 'PLBB231211A4F62143',
             'uploader': 'Wickman',
-            'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
+            'uploader_id': '@WickmanVT',
             'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2',
             'view_count': int,
-            'uploader_url': 'https://www.youtube.com/user/Wickydoo',
+            'uploader_url': 'https://www.youtube.com/@WickmanVT',
             'modified_date': r're:\d{8}',
             'channel_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
             'channel': 'Wickman',
             'tags': [],
-            'channel_url': 'https://www.youtube.com/user/Wickydoo',
+            'channel_url': 'https://www.youtube.com/channel/UCKSpbfbl5kRQpTdL7kMc-1Q',
+            'availability': 'public',
         },
         'playlist_mincount': 29,
     }, {
@@ -5491,7 +6485,7 @@ class YoutubePlaylistIE(InfoExtractor):
             'title': 'JODA15',
             'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
             'uploader': 'milan',
-            'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
+            'uploader_id': '@milan5503',
             'description': '',
             'channel_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
             'tags': [],
@@ -5499,9 +6493,10 @@ class YoutubePlaylistIE(InfoExtractor):
             'view_count': int,
             'channel': 'milan',
             'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
-            'uploader_url': 'https://www.youtube.com/channel/UCEI1-PVPcYXjB73Hfelbmaw',
+            'uploader_url': 'https://www.youtube.com/@milan5503',
+            'availability': 'public',
         },
-        'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
+        'expected_warnings': [r'[Uu]navailable videos? (is|are|will be) hidden'],
     }, {
         'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
         'playlist_mincount': 455,
@@ -5509,15 +6504,16 @@ class YoutubePlaylistIE(InfoExtractor):
             'title': '2018 Chinese New Singles (11/6 updated)',
             'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
             'uploader': 'LBK',
-            'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
+            'uploader_id': '@music_king',
             'description': 'md5:da521864744d60a198e3a88af4db0d9d',
             'channel': 'LBK',
             'view_count': int,
-            'channel_url': 'https://www.youtube.com/c/愛低音的國王',
+            'channel_url': 'https://www.youtube.com/channel/UC21nz3_MesPLqtDqwdvnoxA',
             'tags': [],
-            'uploader_url': 'https://www.youtube.com/c/愛低音的國王',
+            'uploader_url': 'https://www.youtube.com/@music_king',
             'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
             'modified_date': r're:\d{8}',
+            'availability': 'public',
         },
         'expected_warnings': [r'[Uu]navailable videos (are|will be) hidden'],
     }, {
@@ -5560,8 +6556,8 @@ class YoutubeYtBeIE(InfoExtractor):
             'ext': 'mp4',
             'title': 'Small Scale Baler and Braiding Rugs',
             'uploader': 'Backus-Page House Museum',
-            'uploader_id': 'backuspagemuseum',
-            'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
+            'uploader_id': '@backuspagemuseum',
+            'uploader_url': r're:https?://(?:www\.)?youtube\.com/@backuspagemuseum',
             'upload_date': '20161008',
             'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
             'categories': ['Nonprofits & Activism'],
@@ -5569,7 +6565,7 @@ class YoutubeYtBeIE(InfoExtractor):
             'like_count': int,
             'age_limit': 0,
             'playable_in_embed': True,
-            'thumbnail': 'https://i.ytimg.com/vi_webp/yeWKywCrFtk/maxresdefault.webp',
+            'thumbnail': r're:^https?://.*\.webp',
             'channel': 'Backus-Page House Museum',
             'channel_id': 'UCEfMCQ9bs3tjvjy1s451zaw',
             'live_status': 'not_live',
@@ -5627,9 +6623,7 @@ class YoutubeYtUserIE(InfoExtractor):
 
     def _real_extract(self, url):
         user_id = self._match_id(url)
-        return self.url_result(
-            'https://www.youtube.com/user/%s/videos' % user_id,
-            ie=YoutubeTabIE.ie_key(), video_id=user_id)
+        return self.url_result(f'https://www.youtube.com/user/{user_id}', YoutubeTabIE, user_id)
 
 
 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
@@ -5687,7 +6681,7 @@ def _extract_notification_renderer(self, notification):
         if not video_id:
             browse_ep = traverse_obj(
                 notification, ('navigationEndpoint', 'browseEndpoint'), expected_type=dict)
-            channel_id = traverse_obj(browse_ep, 'browseId', expected_type=str)
+            channel_id = self.ucid_or_none(traverse_obj(browse_ep, 'browseId', expected_type=str))
             post_id = self._search_regex(
                 r'/post/(.+)', traverse_obj(browse_ep, 'canonicalBaseUrl', expected_type=str),
                 'post id', default=None)
@@ -5706,9 +6700,9 @@ def _extract_notification_renderer(self, notification):
         title = self._search_regex(
             rf'{re.escape(channel or "")}[^:]+: (.+)', notification_title,
             'video title', default=None)
-        upload_date = (strftime_or_none(self._extract_time_text(notification, 'sentTimeText')[0], '%Y%m%d')
-                       if self._configuration_arg('approximate_date', ie_key=YoutubeTabIE.ie_key())
-                       else None)
+        timestamp = (self._parse_time_text(self._get_text(notification, 'sentTimeText'))
+                     if self._configuration_arg('approximate_date', ie_key=YoutubeTabIE)
+                     else None)
         return {
             '_type': 'url',
             'url': url,
@@ -5717,8 +6711,9 @@ def _extract_notification_renderer(self, notification):
             'title': title,
             'channel_id': channel_id,
             'channel': channel,
+            'uploader': channel,
             'thumbnails': self._extract_thumbnails(notification, 'videoThumbnail'),
-            'upload_date': upload_date,
+            'timestamp': timestamp,
         }
 
     def _notification_menu_entries(self, ytcfg):
@@ -5802,6 +6797,33 @@ class YoutubeSearchURLIE(YoutubeTabBaseInfoExtractor):
             #     'title': '#cats',
             # }],
         },
+    }, {
+        # Channel results
+        'url': 'https://www.youtube.com/results?search_query=kurzgesagt&sp=EgIQAg%253D%253D',
+        'info_dict': {
+            'id': 'kurzgesagt',
+            'title': 'kurzgesagt',
+        },
+        'playlist': [{
+            'info_dict': {
+                '_type': 'url',
+                'id': 'UCsXVk37bltHxD1rDPwtNM8Q',
+                'url': 'https://www.youtube.com/channel/UCsXVk37bltHxD1rDPwtNM8Q',
+                'ie_key': 'YoutubeTab',
+                'channel': 'Kurzgesagt – In a Nutshell',
+                'description': 'md5:4ae48dfa9505ffc307dad26342d06bfc',
+                'title': 'Kurzgesagt – In a Nutshell',
+                'channel_id': 'UCsXVk37bltHxD1rDPwtNM8Q',
+                'playlist_count': int,  # XXX: should have a way of saying > 1
+                'channel_url': 'https://www.youtube.com/channel/UCsXVk37bltHxD1rDPwtNM8Q',
+                'thumbnails': list,
+                'uploader_id': '@kurzgesagt',
+                'uploader_url': 'https://www.youtube.com/@kurzgesagt',
+                'uploader': 'Kurzgesagt – In a Nutshell',
+            }
+        }],
+        'params': {'extract_flat': True, 'playlist_items': '1'},
+        'playlist_mincount': 1,
     }, {
         'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
         'only_matching': True,
@@ -5814,7 +6836,7 @@ def _real_extract(self, url):
 
 
 class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor):
-    IE_DESC = 'YouTube music search URLs with selectable sections (Eg: #songs)'
+    IE_DESC = 'YouTube music search URLs with selectable sections, e.g. #songs'
     IE_NAME = 'youtube:music:search_url'
     _VALID_URL = r'https?://music\.youtube\.com/search\?([^#]+&)?(?:search_query|q)=(?:[^&]+)(?:[&#]|$)'
     _TESTS = [{
@@ -5952,10 +6974,34 @@ class YoutubeStoriesIE(InfoExtractor):
     def _real_extract(self, url):
         playlist_id = f'RLTD{self._match_id(url)}'
         return self.url_result(
-            f'https://www.youtube.com/playlist?list={playlist_id}&playnext=1',
+            smuggle_url(f'https://www.youtube.com/playlist?list={playlist_id}&playnext=1', {'is_story': True}),
             ie=YoutubeTabIE, video_id=playlist_id)
 
 
+class YoutubeShortsAudioPivotIE(InfoExtractor):
+    IE_DESC = 'YouTube Shorts audio pivot (Shorts using audio of a given video)'
+    IE_NAME = 'youtube:shorts:pivot:audio'
+    _VALID_URL = r'https?://(?:www\.)?youtube\.com/source/(?P[\w-]{11})/shorts'
+    _TESTS = [{
+        'url': 'https://www.youtube.com/source/Lyj-MZSAA9o/shorts',
+        'only_matching': True,
+    }]
+
+    @staticmethod
+    def _generate_audio_pivot_params(video_id):
+        """
+        Generates sfv_audio_pivot browse params for this video id
+        """
+        pb_params = b'\xf2\x05+\n)\x12\'\n\x0b%b\x12\x0b%b\x1a\x0b%b' % ((video_id.encode(),) * 3)
+        return urllib.parse.quote(base64.b64encode(pb_params).decode())
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        return self.url_result(
+            f'https://www.youtube.com/feed/sfv_audio_pivot?bp={self._generate_audio_pivot_params(video_id)}',
+            ie=YoutubeTabIE)
+
+
 class YoutubeTruncatedURLIE(InfoExtractor):
     IE_NAME = 'youtube:truncated_url'
     IE_DESC = False  # Do not list
@@ -6031,11 +7077,12 @@ class YoutubeClipIE(YoutubeTabBaseInfoExtractor):
             'title': 'Mobile Games on Console - Scott The Woz',
             'upload_date': '20210920',
             'uploader': 'Scott The Woz',
-            'uploader_id': 'scottthewoz',
-            'uploader_url': 'http://www.youtube.com/user/scottthewoz',
+            'uploader_id': '@ScottTheWoz',
+            'uploader_url': 'https://www.youtube.com/@ScottTheWoz',
             'view_count': int,
             'live_status': 'not_live',
-            'channel_follower_count': int
+            'channel_follower_count': int,
+            'chapters': 'count:20',
         }
     }]
 
@@ -6063,6 +7110,51 @@ def _real_extract(self, url):
         }
 
 
+class YoutubeConsentRedirectIE(YoutubeBaseInfoExtractor):
+    IE_NAME = 'youtube:consent'
+    IE_DESC = False  # Do not list
+    _VALID_URL = r'https?://consent\.youtube\.com/m\?'
+    _TESTS = [{
+        'url': 'https://consent.youtube.com/m?continue=https%3A%2F%2Fwww.youtube.com%2Flive%2FqVv6vCqciTM%3Fcbrd%3D1&gl=NL&m=0&pc=yt&hl=en&src=1',
+        'info_dict': {
+            'id': 'qVv6vCqciTM',
+            'ext': 'mp4',
+            'age_limit': 0,
+            'uploader_id': '@sana_natori',
+            'comment_count': int,
+            'chapters': 'count:13',
+            'upload_date': '20221223',
+            'thumbnail': 'https://i.ytimg.com/vi/qVv6vCqciTM/maxresdefault.jpg',
+            'channel_url': 'https://www.youtube.com/channel/UCIdEIHpS0TdkqRkHL5OkLtA',
+            'uploader_url': 'https://www.youtube.com/@sana_natori',
+            'like_count': int,
+            'release_date': '20221223',
+            'tags': ['Vtuber', '月ノ美兎', '名取さな', 'にじさんじ', 'クリスマス', '3D配信'],
+            'title': '【 #インターネット女クリスマス 】3Dで歌ってはしゃぐインターネットの女たち【月ノ美兎/名取さな】',
+            'view_count': int,
+            'playable_in_embed': True,
+            'duration': 4438,
+            'availability': 'public',
+            'channel_follower_count': int,
+            'channel_id': 'UCIdEIHpS0TdkqRkHL5OkLtA',
+            'categories': ['Entertainment'],
+            'live_status': 'was_live',
+            'release_timestamp': 1671793345,
+            'channel': 'さなちゃんねる',
+            'description': 'md5:6aebf95cc4a1d731aebc01ad6cc9806d',
+            'uploader': 'さなちゃんねる',
+        },
+        'add_ie': ['Youtube'],
+        'params': {'skip_download': 'Youtube'},
+    }]
+
+    def _real_extract(self, url):
+        redirect_url = url_or_none(parse_qs(url).get('continue', [None])[-1])
+        if not redirect_url:
+            raise ExtractorError('Invalid cookie consent redirect URL', expected=True)
+        return self.url_result(redirect_url)
+
+
 class YoutubeTruncatedIDIE(InfoExtractor):
     IE_NAME = 'youtube:truncated_id'
     IE_DESC = False  # Do not list