]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/youtube.py
[youtube:tab] Reload with unavailable videos for all playlists
[yt-dlp.git] / yt_dlp / extractor / youtube.py
index 6c93517d44474bf62658b04abefddaa0660d02c7..4c9da101f5a09739296e41c620a86072b49cb75d 100644 (file)
@@ -2,6 +2,7 @@
 
 from __future__ import unicode_literals
 
+import calendar
 import hashlib
 import itertools
 import json
@@ -15,7 +16,6 @@
 from ..compat import (
     compat_chr,
     compat_HTTPError,
-    compat_kwargs,
     compat_parse_qs,
     compat_str,
     compat_urllib_parse_unquote_plus,
@@ -28,6 +28,8 @@
     bool_or_none,
     clean_html,
     dict_get,
+    datetime_from_str,
+    error_to_compat_str,
     ExtractorError,
     format_field,
     float_or_none,
@@ -47,7 +49,7 @@
     update_url_query,
     url_or_none,
     urlencode_postdata,
-    urljoin,
+    urljoin
 )
 
 
@@ -126,7 +128,7 @@ def req(url, f_req, note, errnote):
                 })
 
         def warn(message):
-            self._downloader.report_warning(message)
+            self.report_warning(message)
 
         lookup_req = [
             username,
@@ -261,33 +263,30 @@ def warn(message):
 
         return True
 
-    def _download_webpage_handle(self, *args, **kwargs):
-        query = kwargs.get('query', {}).copy()
-        kwargs['query'] = query
-        return super(YoutubeBaseInfoExtractor, self)._download_webpage_handle(
-            *args, **compat_kwargs(kwargs))
+    def _initialize_consent(self):
+        cookies = self._get_cookies('https://www.youtube.com/')
+        if cookies.get('__Secure-3PSID'):
+            return
+        consent_id = None
+        consent = cookies.get('CONSENT')
+        if consent:
+            if 'YES' in consent.value:
+                return
+            consent_id = self._search_regex(
+                r'PENDING\+(\d+)', consent.value, 'consent', default=None)
+        if not consent_id:
+            consent_id = random.randint(100, 999)
+        self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
 
     def _real_initialize(self):
+        self._initialize_consent()
         if self._downloader is None:
             return
         if not self._login():
             return
 
-    _YT_WEB_CLIENT_VERSION = '2.20210301.08.00'
-    _DEFAULT_API_DATA = {
-        'context': {
-            'client': {
-                'clientName': 'WEB',
-                'clientVersion': _YT_WEB_CLIENT_VERSION,
-            }
-        },
-    }
-
-    _DEFAULT_BASIC_API_HEADERS = {
-        'X-YouTube-Client-Name': '1',
-        'X-YouTube-Client-Version': _YT_WEB_CLIENT_VERSION
-    }
-
+    _YT_WEB_CLIENT_VERSION = '2.20210407.08.00'
+    _YT_INNERTUBE_API_KEY = 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
     _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
     _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
     _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
@@ -301,19 +300,23 @@ def _generate_sapisidhash_header(self):
         return "SAPISIDHASH %s_%s" % (time_now, sapisidhash)
 
     def _call_api(self, ep, query, video_id, fatal=True, headers=None,
-                  note='Downloading API JSON', errnote='Unable to download API page'):
-        data = self._DEFAULT_API_DATA.copy()
+                  note='Downloading API JSON', errnote='Unable to download API page',
+                  context=None, api_key=None):
+
+        data = {'context': context} if context else {'context': self._extract_context()}
         data.update(query)
-        headers = headers or {}
-        headers.update({'content-type': 'application/json'})
-        auth = self._generate_sapisidhash_header()
-        if auth is not None:
-            headers.update({'Authorization': auth, 'X-Origin': 'https://www.youtube.com'})
+        real_headers = self._generate_api_headers()
+        real_headers.update({'content-type': 'application/json'})
+        if headers:
+            real_headers.update(headers)
         return self._download_json(
             'https://www.youtube.com/youtubei/v1/%s' % ep,
             video_id=video_id, fatal=fatal, note=note, errnote=errnote,
-            data=json.dumps(data).encode('utf8'), headers=headers,
-            query={'key': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'})
+            data=json.dumps(data).encode('utf8'), headers=real_headers,
+            query={'key': api_key or self._extract_api_key()})
+
+    def _extract_api_key(self, ytcfg=None):
+        return try_get(ytcfg, lambda x: x['INNERTUBE_API_KEY'], compat_str) or self._YT_INNERTUBE_API_KEY
 
     def _extract_yt_initial_data(self, video_id, webpage):
         return self._parse_json(
@@ -334,20 +337,65 @@ def _extract_identity_token(self, webpage, item_id):
 
     @staticmethod
     def _extract_account_syncid(data):
-        """Extract syncId required to download private playlists of secondary channels"""
-        sync_ids = (
-            try_get(data, lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'], compat_str)
-            or '').split("||")
+        """
+        Extract syncId required to download private playlists of secondary channels
+        @param data Either response or ytcfg
+        """
+        sync_ids = (try_get(
+            data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
+                   lambda x: x['DATASYNC_ID']), compat_str) or '').split("||")
         if len(sync_ids) >= 2 and sync_ids[1]:
             # datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
             # and just "user_syncid||" for primary channel. We only want the channel_syncid
             return sync_ids[0]
+        # ytcfg includes channel_syncid if on secondary channel
+        return data.get('DELEGATED_SESSION_ID')
 
     def _extract_ytcfg(self, video_id, webpage):
         return self._parse_json(
             self._search_regex(
                 r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
-                default='{}'), video_id, fatal=False)
+                default='{}'), video_id, fatal=False) or {}
+
+    def __extract_client_version(self, ytcfg):
+        return try_get(ytcfg, lambda x: x['INNERTUBE_CLIENT_VERSION'], compat_str) or self._YT_WEB_CLIENT_VERSION
+
+    def _extract_context(self, ytcfg=None):
+        context = try_get(ytcfg, lambda x: x['INNERTUBE_CONTEXT'], dict)
+        if context:
+            return context
+
+        # Recreate the client context (required)
+        client_version = self.__extract_client_version(ytcfg)
+        client_name = try_get(ytcfg, lambda x: x['INNERTUBE_CLIENT_NAME'], compat_str) or 'WEB'
+        context = {
+            'client': {
+                'clientName': client_name,
+                'clientVersion': client_version,
+            }
+        }
+        visitor_data = try_get(ytcfg, lambda x: x['VISITOR_DATA'], compat_str)
+        if visitor_data:
+            context['client']['visitorData'] = visitor_data
+        return context
+
+    def _generate_api_headers(self, ytcfg=None, identity_token=None, account_syncid=None, visitor_data=None):
+        headers = {
+            'X-YouTube-Client-Name': '1',
+            'X-YouTube-Client-Version': self.__extract_client_version(ytcfg),
+        }
+        if identity_token:
+            headers['x-youtube-identity-token'] = identity_token
+        if account_syncid:
+            headers['X-Goog-PageId'] = account_syncid
+            headers['X-Goog-AuthUser'] = 0
+        if visitor_data:
+            headers['x-goog-visitor-id'] = visitor_data
+        auth = self._generate_sapisidhash_header()
+        if auth is not None:
+            headers['Authorization'] = auth
+            headers['X-Origin'] = 'https://www.youtube.com'
+        return headers
 
     def _extract_video(self, renderer):
         video_id = renderer.get('videoId')
@@ -370,7 +418,7 @@ def _extract_video(self, renderer):
             (lambda x: x['ownerText']['runs'][0]['text'],
              lambda x: x['shortBylineText']['runs'][0]['text']), compat_str)
         return {
-            '_type': 'url_transparent',
+            '_type': 'url',
             'ie_key': YoutubeIE.ie_key(),
             'id': video_id,
             'url': video_id,
@@ -1237,6 +1285,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
             'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
             'only_matching': True,
         },
+        {
+            # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
+            'url': 'cBvYw8_A0vQ',
+            'info_dict': {
+                'id': 'cBvYw8_A0vQ',
+                'ext': 'mp4',
+                'title': '4K Ueno Okachimachi  Street  Scenes  上野御徒町歩き',
+                'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
+                'upload_date': '20201120',
+                'uploader': 'Walk around Japan',
+                'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
+                'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
+            },
+            'params': {
+                'skip_download': True,
+            },
+        },
     ]
 
     def __init__(self, *args, **kwargs):
@@ -1490,6 +1555,16 @@ def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
             (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
              regex), webpage, name, default='{}'), video_id, fatal=False)
 
+    @staticmethod
+    def parse_time_text(time_text):
+        """
+        Parse the comment time text
+        time_text is in the format 'X units ago (edited)'
+        """
+        time_text_split = time_text.split(' ')
+        if len(time_text_split) >= 3:
+            return datetime_from_str('now-%s%s' % (time_text_split[0], time_text_split[1]), precision='auto')
+
     @staticmethod
     def _join_text_entries(runs):
         text = None
@@ -1512,7 +1587,7 @@ def _extract_comment(self, comment_renderer, parent=None):
         text = self._join_text_entries(comment_text_runs) or ''
         comment_time_text = try_get(comment_renderer, lambda x: x['publishedTimeText']['runs']) or []
         time_text = self._join_text_entries(comment_time_text)
-
+        timestamp = calendar.timegm(self.parse_time_text(time_text).timetuple())
         author = try_get(comment_renderer, lambda x: x['authorText']['simpleText'], compat_str)
         author_id = try_get(comment_renderer,
                             lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
@@ -1523,11 +1598,10 @@ def _extract_comment(self, comment_renderer, parent=None):
 
         author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
         is_liked = try_get(comment_renderer, lambda x: x['isLiked'], bool)
-
         return {
             'id': comment_id,
             'text': text,
-            # TODO: This should be parsed to timestamp
+            'timestamp': timestamp,
             'time_text': time_text,
             'like_count': votes,
             'is_favorited': is_liked,
@@ -1539,7 +1613,7 @@ def _extract_comment(self, comment_renderer, parent=None):
         }
 
     def _comment_entries(self, root_continuation_data, identity_token, account_syncid,
-                         session_token_list, parent=None, comment_counts=None):
+                         ytcfg, session_token_list, parent=None, comment_counts=None):
 
         def extract_thread(parent_renderer):
             contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
@@ -1565,7 +1639,7 @@ def extract_thread(parent_renderer):
                 if comment_replies_renderer:
                     comment_counts[2] += 1
                     comment_entries_iter = self._comment_entries(
-                        comment_replies_renderer, identity_token, account_syncid,
+                        comment_replies_renderer, identity_token, account_syncid, ytcfg,
                         parent=comment.get('id'), session_token_list=session_token_list,
                         comment_counts=comment_counts)
 
@@ -1575,16 +1649,10 @@ def extract_thread(parent_renderer):
         if not comment_counts:
             # comment so far, est. total comments, current comment thread #
             comment_counts = [0, 0, 0]
-        headers = self._DEFAULT_BASIC_API_HEADERS.copy()
 
         # TODO: Generalize the download code with TabIE
-        if identity_token:
-            headers['x-youtube-identity-token'] = identity_token
-
-        if account_syncid:
-            headers['X-Goog-PageId'] = account_syncid
-            headers['X-Goog-AuthUser'] = 0
-
+        context = self._extract_context(ytcfg)
+        visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
         continuation = YoutubeTabIE._extract_continuation(root_continuation_data)  # TODO
         first_continuation = False
         if parent is None:
@@ -1593,6 +1661,7 @@ def extract_thread(parent_renderer):
         for page_num in itertools.count(0):
             if not continuation:
                 break
+            headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data)
             retries = self._downloader.params.get('extractor_retries', 3)
             count = -1
             last_error = None
@@ -1615,12 +1684,12 @@ def extract_thread(parent_renderer):
                     comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1])
                     if page_num == 0:
                         if first_continuation:
-                            note_prefix = "Downloading initial comment continuation page"
+                            note_prefix = 'Downloading initial comment continuation page'
                         else:
-                            note_prefix = "    Downloading comment reply thread %d %s" % (comment_counts[2], comment_prog_str)
+                            note_prefix = '    Downloading comment reply thread %d %s' % (comment_counts[2], comment_prog_str)
                     else:
-                        note_prefix = "%sDownloading comment%s page %d %s" % (
-                            "       " if parent else "",
+                        note_prefix = '%sDownloading comment%s page %d %s' % (
+                            '       ' if parent else '',
                             ' replies' if parent else '',
                             page_num,
                             comment_prog_str)
@@ -1635,13 +1704,13 @@ def extract_thread(parent_renderer):
                 except ExtractorError as e:
                     if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404, 413):
                         if e.cause.code == 413:
-                            self.report_warning("Assumed end of comments (received HTTP Error 413)")
+                            self.report_warning('Assumed end of comments (received HTTP Error 413)')
                             return
                         # Downloading page may result in intermittent 5xx HTTP error
                         # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
                         last_error = 'HTTP Error %s' % e.cause.code
                         if e.cause.code == 404:
-                            last_error = last_error + " (this API is probably deprecated)"
+                            last_error = last_error + ' (this API is probably deprecated)'
                         if count < retries:
                             continue
                     raise
@@ -1659,7 +1728,7 @@ def extract_thread(parent_renderer):
 
                     # YouTube sometimes gives reload: now json if something went wrong (e.g. bad auth)
                     if browse.get('reload'):
-                        raise ExtractorError("Invalid or missing params in continuation request", expected=False)
+                        raise ExtractorError('Invalid or missing params in continuation request', expected=False)
 
                     # TODO: not tested, merged from old extractor
                     err_msg = browse.get('externalErrorMessage')
@@ -1670,10 +1739,14 @@ def extract_thread(parent_renderer):
                     # See: https://github.com/ytdl-org/youtube-dl/issues/28194
                     last_error = 'Incomplete data received'
                     if count >= retries:
-                        self._downloader.report_error(last_error)
+                        raise ExtractorError(last_error)
 
             if not response:
                 break
+            visitor_data = try_get(
+                response,
+                lambda x: x['responseContext']['webResponseContextExtensionData']['ytConfigData']['visitorData'],
+                compat_str) or visitor_data
 
             known_continuation_renderers = {
                 'itemSectionContinuation': extract_thread,
@@ -1699,7 +1772,7 @@ def extract_thread(parent_renderer):
 
                     if expected_comment_count:
                         comment_counts[1] = str_to_int(expected_comment_count)
-                        self.to_screen("Downloading ~%d comments" % str_to_int(expected_comment_count))
+                        self.to_screen('Downloading ~%d comments' % str_to_int(expected_comment_count))
                         yield comment_counts[1]
 
                     # TODO: cli arg.
@@ -1715,7 +1788,7 @@ def extract_thread(parent_renderer):
                         continuation = YoutubeTabIE._build_continuation_query(
                             continuation=sort_continuation_renderer.get('continuation'),
                             ctp=sort_continuation_renderer.get('clickTrackingParams'))
-                        self.to_screen("Sorting comments by %s" % ('popular' if comment_sort_index == 0 else 'newest'))
+                        self.to_screen('Sorting comments by %s' % ('popular' if comment_sort_index == 0 else 'newest'))
                         break
 
                 for entry in known_continuation_renderers[key](continuation_renderer):
@@ -1740,6 +1813,7 @@ def _extract_comments(self, ytcfg, video_id, contents, webpage, xsrf_token):
                     renderer,
                     identity_token=self._extract_identity_token(webpage, item_id=video_id),
                     account_syncid=self._extract_account_syncid(ytcfg),
+                    ytcfg=ytcfg,
                     session_token_list=[xsrf_token])
 
                 for comment in comment_iter:
@@ -1748,7 +1822,7 @@ def _extract_comments(self, ytcfg, video_id, contents, webpage, xsrf_token):
                         continue
                     comments.append(comment)
                 break
-        self.to_screen("Downloaded %d/%d comments" % (len(comments), estimated_total))
+        self.to_screen('Downloaded %d/%d comments' % (len(comments), estimated_total))
         return {
             'comments': comments,
             'comment_count': len(comments),
@@ -1760,17 +1834,18 @@ def _real_extract(self, url):
         base_url = self.http_scheme() + '//www.youtube.com/'
         webpage_url = base_url + 'watch?v=' + video_id
         webpage = self._download_webpage(
-            webpage_url + '&has_verified=1&bpctr=9999999999',
-            video_id, fatal=False)
+            webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
 
         player_response = None
         if webpage:
             player_response = self._extract_yt_initial_variable(
                 webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
                 video_id, 'initial player response')
+
+        ytcfg = self._extract_ytcfg(video_id, webpage)
         if not player_response:
             player_response = self._call_api(
-                'player', {'videoId': video_id}, video_id)
+                'player', {'videoId': video_id}, video_id, api_key=self._extract_api_key(ytcfg))
 
         playability_status = player_response.get('playabilityStatus') or {}
         if playability_status.get('reason') == 'Sign in to confirm your age':
@@ -1798,7 +1873,13 @@ def _real_extract(self, url):
         def get_text(x):
             if not x:
                 return
-            return x.get('simpleText') or ''.join([r['text'] for r in x['runs']])
+            text = x.get('simpleText')
+            if text and isinstance(text, compat_str):
+                return text
+            runs = x.get('runs')
+            if not isinstance(runs, list):
+                return
+            return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
 
         search_meta = (
             lambda x: self._html_search_meta(x, webpage, default=None)) \
@@ -1947,7 +2028,7 @@ def feed_entry(name):
                     f['format_id'] = itag
                 formats.append(f)
 
-        if self._downloader.params.get('youtube_include_dash_manifest'):
+        if self._downloader.params.get('youtube_include_dash_manifest', True):
             dash_manifest_url = streaming_data.get('dashManifestUrl')
             if dash_manifest_url:
                 for f in self._extract_mpd_formats(
@@ -1969,7 +2050,7 @@ def feed_entry(name):
 
         if not formats:
             if not self._downloader.params.get('allow_unplayable_formats') and streaming_data.get('licenseInfos'):
-                raise ExtractorError(
+                self.raise_no_formats(
                     'This video is DRM protected.', expected=True)
             pemr = try_get(
                 playability_status,
@@ -1984,11 +2065,10 @@ def feed_entry(name):
                     if not countries:
                         regions_allowed = search_meta('regionsAllowed')
                         countries = regions_allowed.split(',') if regions_allowed else None
-                    self.raise_geo_restricted(
-                        subreason, countries)
+                    self.raise_geo_restricted(subreason, countries, metadata_available=True)
                 reason += '\n' + subreason
             if reason:
-                raise ExtractorError(reason, expected=True)
+                self.raise_no_formats(reason, expected=True)
 
         self._sort_formats(formats)
 
@@ -1999,8 +2079,10 @@ def feed_entry(name):
                 for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
         for keyword in keywords:
             if keyword.startswith('yt:stretch='):
-                w, h = keyword.split('=')[1].split(':')
-                w, h = int(w), int(h)
+                stretch_ratio = map(
+                    lambda x: int_or_none(x, default=0),
+                    keyword.split('=')[1].split(':'))
+                w, h = (list(stretch_ratio) + [0])[:2]
                 if w > 0 and h > 0:
                     ratio = w / h
                     for f in formats:
@@ -2015,6 +2097,11 @@ def feed_entry(name):
                 thumbnail_url = thumbnail.get('url')
                 if not thumbnail_url:
                     continue
+                # Sometimes youtube gives a wrong thumbnail URL. See:
+                # https://github.com/yt-dlp/yt-dlp/issues/233
+                # https://github.com/ytdl-org/youtube-dl/issues/28023
+                if 'maxresdefault' in thumbnail_url:
+                    thumbnail_url = thumbnail_url.split('?')[0]
                 thumbnails.append({
                     'height': int_or_none(thumbnail.get('height')),
                     'url': thumbnail_url,
@@ -2143,13 +2230,14 @@ def process_language(container, base_url, lang_code, query):
                 'yt initial data')
         if not initial_data:
             initial_data = self._call_api(
-                'next', {'videoId': video_id}, video_id, fatal=False)
+                'next', {'videoId': video_id}, video_id, fatal=False, api_key=self._extract_api_key(ytcfg))
 
         if not is_live:
             try:
                 # This will error if there is no livechat
                 initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
                 info['subtitles']['live_chat'] = [{
+                    'url': 'https://www.youtube.com/watch?v=%s' % video_id,  # url is needed to set cookies
                     'video_id': video_id,
                     'ext': 'json',
                     'protocol': 'youtube_live_chat_replay',
@@ -2243,7 +2331,7 @@ def chapter_time(mmlir):
                     info['channel'] = get_text(try_get(
                         vsir,
                         lambda x: x['owner']['videoOwnerRenderer']['title'],
-                        compat_str))
+                        dict))
                     rows = try_get(
                         vsir,
                         lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
@@ -2287,8 +2375,10 @@ def chapter_time(mmlir):
         is_private = bool_or_none(video_details.get('isPrivate'))
         is_unlisted = bool_or_none(microformat.get('isUnlisted'))
         is_membersonly = None
+        is_premium = None
         if initial_data and is_private is not None:
             is_membersonly = False
+            is_premium = False
             contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list)
             for content in contents or []:
                 badges = try_get(content, lambda x: x['videoPrimaryInfoRenderer']['badges'], list)
@@ -2297,13 +2387,16 @@ def chapter_time(mmlir):
                     if label.lower() == 'members only':
                         is_membersonly = True
                         break
-                if is_membersonly:
+                    elif label.lower() == 'premium':
+                        is_premium = True
+                        break
+                if is_membersonly or is_premium:
                     break
 
         # TODO: Add this for playlists
         info['availability'] = self._availability(
             is_private=is_private,
-            needs_premium=False,  # Youtube no longer have premium-only videos?
+            needs_premium=is_premium,
             needs_subscription=is_membersonly,
             needs_auth=info['age_limit'] >= 18,
             is_unlisted=None if is_private is None else is_unlisted)
@@ -2535,6 +2628,29 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
             'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
         },
         'playlist_mincount': 21,
+    }, {
+        'note': 'Playlist with "show unavailable videos" button',
+        'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q',
+        'info_dict': {
+            'title': 'Uploads from Phim Siêu Nhân Nhật Bản',
+            'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q',
+            'uploader': 'Phim Siêu Nhân Nhật Bản',
+            'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
+        },
+        'playlist_mincount': 1400,
+        'expected_warnings': [
+            'YouTube said: INFO - Unavailable videos are hidden',
+        ]
+    }, {
+        'note': 'Playlist with unavailable videos in a later page',
+        'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w',
+        'info_dict': {
+            'title': 'Uploads from BlankTV',
+            'id': 'UU8l9frL61Yl5KFOl87nIm2w',
+            'uploader': 'BlankTV',
+            'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w',
+        },
+        'playlist_mincount': 20000,
     }, {
         # https://github.com/ytdl-org/youtube-dl/issues/21844
         'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
@@ -2667,6 +2783,13 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
     }, {
         'url': 'https://www.youtube.com/TheYoungTurks/live',
         'only_matching': True,
+    }, {
+        'url': 'https://www.youtube.com/hashtag/cctv9',
+        'info_dict': {
+            'id': 'cctv9',
+            'title': '#cctv9',
+        },
+        'playlist_mincount': 350,
     }]
 
     @classmethod
@@ -2827,6 +2950,16 @@ def _post_thread_continuation_entries(self, post_thread_continuation):
             for entry in self._post_thread_entries(renderer):
                 yield entry
 
+    r''' # unused
+    def _rich_grid_entries(self, contents):
+        for content in contents:
+            video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
+            if video_renderer:
+                entry = self._video_entry(video_renderer)
+                if entry:
+                    yield entry
+    '''
+
     @staticmethod
     def _build_continuation_query(continuation, ctp=None):
         query = {
@@ -2872,7 +3005,7 @@ def _extract_continuation(cls, renderer):
             ctp = continuation_ep.get('clickTrackingParams')
             return YoutubeTabIE._build_continuation_query(continuation, ctp)
 
-    def _entries(self, tab, item_id, identity_token, account_syncid):
+    def _entries(self, tab, item_id, identity_token, account_syncid, ytcfg):
 
         def extract_entries(parent_renderer):  # this needs to called again for continuation to work with feeds
             contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
@@ -2924,61 +3057,26 @@ def extract_entries(parent_renderer):  # this needs to called again for continua
         for entry in extract_entries(parent_renderer):
             yield entry
         continuation = continuation_list[0]
-
-        headers = {
-            'x-youtube-client-name': '1',
-            'x-youtube-client-version': '2.20201112.04.01',
-        }
-        if identity_token:
-            headers['x-youtube-identity-token'] = identity_token
-
-        if account_syncid:
-            headers['X-Goog-PageId'] = account_syncid
-            headers['X-Goog-AuthUser'] = 0
+        context = self._extract_context(ytcfg)
+        visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
 
         for page_num in itertools.count(1):
             if not continuation:
                 break
-            retries = self._downloader.params.get('extractor_retries', 3)
-            count = -1
-            last_error = None
-            while count < retries:
-                count += 1
-                if last_error:
-                    self.report_warning('%s. Retrying ...' % last_error)
-                try:
-                    response = self._call_api(
-                        ep="browse", fatal=True, headers=headers,
-                        video_id='%s page %s' % (item_id, page_num),
-                        query={
-                            'continuation': continuation['continuation'],
-                            'clickTracking': {'clickTrackingParams': continuation['itct']},
-                        },
-                        note='Downloading API JSON%s' % (' (retry #%d)' % count if count else ''))
-                except ExtractorError as e:
-                    if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404):
-                        # Downloading page may result in intermittent 5xx HTTP error
-                        # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
-                        last_error = 'HTTP Error %s' % e.cause.code
-                        if count < retries:
-                            continue
-                    raise
-                else:
-                    # Youtube sometimes sends incomplete data
-                    # See: https://github.com/ytdl-org/youtube-dl/issues/28194
-                    if dict_get(response,
-                                ('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints')):
-                        break
-
-                    # Youtube may send alerts if there was an issue with the continuation page
-                    self._extract_alerts(response, expected=False)
-
-                    last_error = 'Incomplete data received'
-                    if count >= retries:
-                        self._downloader.report_error(last_error)
+            query = {
+                'continuation': continuation['continuation'],
+                'clickTracking': {'clickTrackingParams': continuation['itct']}
+            }
+            headers = self._generate_api_headers(ytcfg, identity_token, account_syncid, visitor_data)
+            response = self._extract_response(
+                item_id='%s page %s' % (item_id, page_num),
+                query=query, headers=headers, ytcfg=ytcfg,
+                check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints'))
 
             if not response:
                 break
+            visitor_data = try_get(
+                response, lambda x: x['responseContext']['visitorData'], compat_str) or visitor_data
 
             known_continuation_renderers = {
                 'playlistVideoListContinuation': self._playlist_entries,
@@ -3009,9 +3107,9 @@ def extract_entries(parent_renderer):  # this needs to called again for continua
                 'richItemRenderer': (extract_entries, 'contents'),  # for hashtag
                 'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
             }
+            on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
             continuation_items = try_get(
-                response,
-                lambda x: dict_get(x, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))[0]['appendContinuationItemsAction']['continuationItems'], list)
+                on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
             continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
             video_items_renderer = None
             for key, value in continuation_item.items():
@@ -3069,10 +3167,10 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs):
             channel_name = renderer.get('title')
             channel_url = renderer.get('channelUrl')
             channel_id = renderer.get('externalId')
-
-        if not renderer:
+        else:
             renderer = try_get(
                 data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
+
         if renderer:
             title = renderer.get('title')
             description = renderer.get('description', '')
@@ -3098,11 +3196,12 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs):
                 'width': int_or_none(t.get('width')),
                 'height': int_or_none(t.get('height')),
             })
-
         if playlist_id is None:
             playlist_id = item_id
         if title is None:
-            title = playlist_id
+            title = (
+                try_get(data, lambda x: x['header']['hashtagHeaderRenderer']['hashtag']['simpleText'])
+                or playlist_id)
         title += format_field(selected_tab, 'title', ' - %s')
 
         metadata = {
@@ -3125,11 +3224,17 @@ def _extract_from_tabs(self, item_id, webpage, data, tabs):
             self._entries(
                 selected_tab, playlist_id,
                 self._extract_identity_token(webpage, item_id),
-                self._extract_account_syncid(data)),
+                self._extract_account_syncid(data),
+                self._extract_ytcfg(item_id, webpage)),
             **metadata)
 
-    def _extract_mix_playlist(self, playlist, playlist_id):
+    def _extract_mix_playlist(self, playlist, playlist_id, data, webpage):
         first_id = last_id = None
+        ytcfg = self._extract_ytcfg(playlist_id, webpage)
+        headers = self._generate_api_headers(
+            ytcfg, account_syncid=self._extract_account_syncid(data),
+            identity_token=self._extract_identity_token(webpage, item_id=playlist_id),
+            visitor_data=try_get(self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str))
         for page_num in itertools.count(1):
             videos = list(self._playlist_entries(playlist))
             if not videos:
@@ -3144,14 +3249,25 @@ def _extract_mix_playlist(self, playlist, playlist_id):
                 yield video
             first_id = first_id or videos[0]['id']
             last_id = videos[-1]['id']
-
-            _, data = self._extract_webpage(
-                'https://www.youtube.com/watch?list=%s&v=%s' % (playlist_id, last_id),
-                '%s page %d' % (playlist_id, page_num))
+            watch_endpoint = try_get(
+                playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
+            query = {
+                'playlistId': playlist_id,
+                'videoId': watch_endpoint.get('videoId') or last_id,
+                'index': watch_endpoint.get('index') or len(videos),
+                'params': watch_endpoint.get('params') or 'OAE%3D'
+            }
+            response = self._extract_response(
+                item_id='%s page %d' % (playlist_id, page_num),
+                query=query,
+                ep='next',
+                headers=headers,
+                check_get_keys='contents'
+            )
             playlist = try_get(
-                data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
+                response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
 
-    def _extract_from_playlist(self, item_id, url, data, playlist):
+    def _extract_from_playlist(self, item_id, url, data, playlist, webpage):
         title = playlist.get('title') or try_get(
             data, lambda x: x['titleText']['simpleText'], compat_str)
         playlist_id = playlist.get('playlistId') or item_id
@@ -3166,7 +3282,7 @@ def _extract_from_playlist(self, item_id, url, data, playlist):
                 video_title=title)
 
         return self.playlist_result(
-            self._extract_mix_playlist(playlist, playlist_id),
+            self._extract_mix_playlist(playlist, playlist_id, data, webpage),
             playlist_id=playlist_id, playlist_title=title)
 
     def _extract_alerts(self, data, expected=False):
@@ -3179,25 +3295,118 @@ def _real_extract_alerts():
                     alert_type = alert.get('type')
                     if not alert_type:
                         continue
-                    message = try_get(alert, lambda x: x['text']['simpleText'], compat_str)
+                    message = try_get(alert, lambda x: x['text']['simpleText'], compat_str) or ''
                     if message:
                         yield alert_type, message
                     for run in try_get(alert, lambda x: x['text']['runs'], list) or []:
-                        message = try_get(run, lambda x: x['text'], compat_str)
-                        if message:
-                            yield alert_type, message
+                        message += try_get(run, lambda x: x['text'], compat_str)
+                    if message:
+                        yield alert_type, message
 
-        err_msg = None
+        errors = []
+        warnings = []
         for alert_type, alert_message in _real_extract_alerts():
             if alert_type.lower() == 'error':
-                if err_msg:
-                    self._downloader.report_warning('YouTube said: %s - %s' % ('ERROR', err_msg))
-                err_msg = alert_message
+                errors.append([alert_type, alert_message])
             else:
-                self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
+                warnings.append([alert_type, alert_message])
 
-        if err_msg:
-            raise ExtractorError('YouTube said: %s' % err_msg, expected=expected)
+        for alert_type, alert_message in (warnings + errors[:-1]):
+            self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
+        if errors:
+            raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
+
+    def _reload_with_unavailable_videos(self, item_id, data, webpage):
+        """
+        Get playlist with unavailable videos if the 'show unavailable videos' button exists.
+        """
+        sidebar_renderer = try_get(
+            data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list)
+        if not sidebar_renderer:
+            return
+        browse_id = params = None
+        for item in sidebar_renderer:
+            if not isinstance(item, dict):
+                continue
+            renderer = item.get('playlistSidebarPrimaryInfoRenderer')
+            menu_renderer = try_get(
+                renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
+            for menu_item in menu_renderer:
+                if not isinstance(menu_item, dict):
+                    continue
+                nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
+                text = try_get(
+                    nav_item_renderer, lambda x: x['text']['simpleText'], compat_str)
+                if not text or text.lower() != 'show unavailable videos':
+                    continue
+                browse_endpoint = try_get(
+                    nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
+                browse_id = browse_endpoint.get('browseId')
+                params = browse_endpoint.get('params')
+                break
+
+            ytcfg = self._extract_ytcfg(item_id, webpage)
+            headers = self._generate_api_headers(
+                ytcfg, account_syncid=self._extract_account_syncid(ytcfg),
+                identity_token=self._extract_identity_token(webpage, item_id=item_id),
+                visitor_data=try_get(
+                    self._extract_context(ytcfg), lambda x: x['client']['visitorData'], compat_str))
+            query = {
+                'params': params or 'wgYCCAA=',
+                'browseId': browse_id or 'VL%s' % item_id
+            }
+            return self._extract_response(
+                item_id=item_id, headers=headers, query=query,
+                check_get_keys='contents', fatal=False,
+                note='Downloading API JSON with unavailable videos')
+
+    def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None,
+                          ytcfg=None, check_get_keys=None, ep='browse', fatal=True):
+        response = None
+        last_error = None
+        count = -1
+        retries = self._downloader.params.get('extractor_retries', 3)
+        if check_get_keys is None:
+            check_get_keys = []
+        while count < retries:
+            count += 1
+            if last_error:
+                self.report_warning('%s. Retrying ...' % last_error)
+            try:
+                response = self._call_api(
+                    ep=ep, fatal=True, headers=headers,
+                    video_id=item_id, query=query,
+                    context=self._extract_context(ytcfg),
+                    api_key=self._extract_api_key(ytcfg),
+                    note='%s%s' % (note, ' (retry #%d)' % count if count else ''))
+            except ExtractorError as e:
+                if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503, 404):
+                    # Downloading page may result in intermittent 5xx HTTP error
+                    # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
+                    last_error = 'HTTP Error %s' % e.cause.code
+                    if count < retries:
+                        continue
+                if fatal:
+                    raise
+                else:
+                    self.report_warning(error_to_compat_str(e))
+                    return
+
+            else:
+                # Youtube may send alerts if there was an issue with the continuation page
+                self._extract_alerts(response, expected=False)
+                if not check_get_keys or dict_get(response, check_get_keys):
+                    break
+                # Youtube sometimes sends incomplete data
+                # See: https://github.com/ytdl-org/youtube-dl/issues/28194
+                last_error = 'Incomplete data received'
+                if count >= retries:
+                    if fatal:
+                        raise ExtractorError(last_error)
+                    else:
+                        self.report_warning(last_error)
+                        return
+        return response
 
     def _extract_webpage(self, url, item_id):
         retries = self._downloader.params.get('extractor_retries', 3)
@@ -3217,7 +3426,7 @@ def _extract_webpage(self, url, item_id):
             if data.get('contents') or data.get('currentVideoEndpoint'):
                 break
             if count >= retries:
-                self._downloader.report_error(last_error)
+                raise ExtractorError(last_error)
         return webpage, data
 
     def _real_extract(self, url):
@@ -3229,7 +3438,7 @@ def _real_extract(self, url):
         mobj = re.match(r'(?P<pre>%s)(?P<post>/?(?![^#?]).*$)' % self._VALID_URL, url)
         mobj = mobj.groupdict() if mobj else {}
         if mobj and not mobj.get('not_channel'):
-            self._downloader.report_warning(
+            self.report_warning(
                 'A channel/user page was given. All the channel\'s videos will be downloaded. '
                 'To download only the videos in the home page, add a "/featured" to the URL')
             url = '%s/videos%s' % (mobj.get('pre'), mobj.get('post') or '')
@@ -3244,7 +3453,7 @@ def _real_extract(self, url):
                 # If there is neither video or playlist ids,
                 # youtube redirects to home page, which is undesirable
                 raise ExtractorError('Unable to recognize tab page')
-            self._downloader.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
+            self.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
             url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
 
         if video_id and playlist_id:
@@ -3255,6 +3464,9 @@ def _real_extract(self, url):
 
         webpage, data = self._extract_webpage(url, item_id)
 
+        # YouTube sometimes provides a button to reload playlist with unavailable videos.
+        data = self._reload_with_unavailable_videos(item_id, data, webpage) or data
+
         tabs = try_get(
             data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
         if tabs:
@@ -3263,13 +3475,13 @@ def _real_extract(self, url):
         playlist = try_get(
             data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
         if playlist:
-            return self._extract_from_playlist(item_id, url, data, playlist)
+            return self._extract_from_playlist(item_id, url, data, playlist, webpage)
 
         video_id = try_get(
             data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
             compat_str) or video_id
         if video_id:
-            self._downloader.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
+            self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
             return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
 
         raise ExtractorError('Unable to recognize tab page')
@@ -3425,7 +3637,7 @@ def _real_extract(self, url):
             ie=YoutubeTabIE.ie_key())
 
 
-class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
+class YoutubeSearchIE(SearchInfoExtractor, YoutubeTabIE):
     IE_DESC = 'YouTube.com searches, "ytsearch" keyword'
     # there doesn't appear to be a real limit, for example if you search for
     # 'python' you get more than 8.000.000 results
@@ -3441,9 +3653,10 @@ def _entries(self, query, n):
             data['params'] = self._SEARCH_PARAMS
         total = 0
         for page_num in itertools.count(1):
-            search = self._call_api(
-                ep='search', video_id='query "%s"' % query, fatal=False,
-                note='Downloading page %s' % page_num, query=data)
+            search = self._extract_response(
+                item_id='query "%s" page %s' % (query, page_num), ep='search', query=data,
+                check_get_keys=('contents', 'onResponseReceivedCommands')
+            )
             if not search:
                 break
             slr_contents = try_get(