]> jfr.im git - yt-dlp.git/blobdiff - youtube_dlc/extractor/youtube.py
Readme changes
[yt-dlp.git] / youtube_dlc / extractor / youtube.py
index fd15d3865b18e3f27ec4db2dc0d526e163ba589a..e0f211b741f712b1e1d9b394a54233385e67dc50 100644 (file)
@@ -64,7 +64,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
     _TFA_URL = 'https://accounts.google.com/_/signin/challenge?hl=en&TL={0}'
 
     _RESERVED_NAMES = (
-        r'course|embed|channel|c|user|playlist|watch|w|results|storefront|'
+        r'course|embed|channel|c|user|playlist|watch|w|results|storefront|oops|'
         r'shared|index|account|reporthistory|t/terms|about|upload|signin|logout|'
         r'feed/(watch_later|history|subscriptions|library|trending|recommended)')
 
@@ -506,7 +506,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
         '396': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
         '397': {'acodec': 'none', 'vcodec': 'av01.0.05M.08'},
     }
-    _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
+    _SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')  # TODO 'json3' raising issues with automatic captions
 
     _GEO_BYPASS = False
 
@@ -1335,44 +1335,6 @@ def _get_ytplayer_config(self, video_id, webpage):
             return self._parse_json(
                 uppercase_escape(config), video_id, fatal=False)
 
-    def _get_music_metadata_from_yt_initial(self, yt_initial):
-        music_metadata = []
-        key_map = {
-            'Album': 'album',
-            'Artist': 'artist',
-            'Song': 'track'
-        }
-        contents = try_get(yt_initial, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'])
-        if type(contents) is list:
-            for content in contents:
-                music_track = {}
-                if type(content) is not dict:
-                    continue
-                videoSecondaryInfoRenderer = try_get(content, lambda x: x['videoSecondaryInfoRenderer'])
-                if type(videoSecondaryInfoRenderer) is not dict:
-                    continue
-                rows = try_get(videoSecondaryInfoRenderer, lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'])
-                if type(rows) is not list:
-                    continue
-                for row in rows:
-                    metadataRowRenderer = try_get(row, lambda x: x['metadataRowRenderer'])
-                    if type(metadataRowRenderer) is not dict:
-                        continue
-                    key = try_get(metadataRowRenderer, lambda x: x['title']['simpleText'])
-                    value = try_get(metadataRowRenderer, lambda x: x['contents'][0]['simpleText']) or \
-                        try_get(metadataRowRenderer, lambda x: x['contents'][0]['runs'][0]['text'])
-                    if type(key) is not str or type(value) is not str:
-                        continue
-                    if key in key_map:
-                        if key_map[key] in music_track:
-                            # we've started on a new track
-                            music_metadata.append(music_track)
-                            music_track = {}
-                        music_track[key_map[key]] = value
-                if len(music_track.keys()):
-                    music_metadata.append(music_track)
-        return music_metadata
-
     def _get_automatic_captions(self, video_id, webpage):
         """We need the webpage for getting the captions url, pass it as an
            argument to speed up the process."""
@@ -2295,7 +2257,7 @@ def extract_meta(field):
         # Youtube Music Auto-generated description
         release_date = release_year = None
         if video_description:
-            mobj = re.search(r'(?s)Provided to YouTube by [^\n]+\n+(?P<track>[^·]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?', video_description)
+            mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
             if mobj:
                 if not track:
                     track = mobj.group('track').strip()
@@ -2312,13 +2274,33 @@ def extract_meta(field):
                 if release_year:
                     release_year = int(release_year)
 
-        yt_initial = self._get_yt_initial_data(video_id, video_webpage)
-        if yt_initial:
-            music_metadata = self._get_music_metadata_from_yt_initial(yt_initial)
-            if len(music_metadata):
-                album = music_metadata[0].get('album')
-                artist = music_metadata[0].get('artist')
-                track = music_metadata[0].get('track')
+        yt_initial_data = self._extract_yt_initial_data(video_id, video_webpage)
+        contents = try_get(yt_initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
+        for content in contents:
+            rows = try_get(content, lambda x: x['videoSecondaryInfoRenderer']['metadataRowContainer']['metadataRowContainerRenderer']['rows'], list) or []
+            multiple_songs = False
+            for row in rows:
+                if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
+                    multiple_songs = True
+                    break
+            for row in rows:
+                mrr = row.get('metadataRowRenderer') or {}
+                mrr_title = try_get(
+                    mrr, lambda x: x['title']['simpleText'], compat_str)
+                mrr_contents = try_get(
+                    mrr, lambda x: x['contents'][0], dict) or {}
+                mrr_contents_text = try_get(mrr_contents, [lambda x: x['simpleText'], lambda x: x['runs'][0]['text']], compat_str)
+                if not (mrr_title and mrr_contents_text):
+                    continue
+                if mrr_title == 'License':
+                    video_license = mrr_contents_text
+                elif not multiple_songs:
+                    if mrr_title == 'Album':
+                        album = mrr_contents_text
+                    elif mrr_title == 'Artist':
+                        artist = mrr_contents_text
+                    elif mrr_title == 'Song':
+                        track = mrr_contents_text
 
         m_episode = re.search(
             r'<div[^>]+id="watch7-headline"[^>]*>\s*<span[^>]*>.*?>(?P<series>[^<]+)</a></b>\s*S(?P<season>\d+)\s*•\s*E(?P<episode>\d+)</span>',
@@ -3244,6 +3226,21 @@ def _extract_from_playlist(self, item_id, data, playlist):
             self._playlist_entries(playlist), playlist_id=playlist_id,
             playlist_title=title)
 
+    def _extract_alerts(self, data):
+        for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
+            for renderer in alert_dict:
+                alert = alert_dict[renderer]
+                alert_type = alert.get('type')
+                if not alert_type:
+                    continue
+                message = try_get(alert, lambda x: x['text']['simpleText'], compat_str)
+                if message:
+                    yield alert_type, message
+                for run in try_get(alert, lambda x: x['text']['runs'], list) or []:
+                    message = try_get(run, lambda x: x['text'], compat_str)
+                    if message:
+                        yield alert_type, message
+
     def _real_extract(self, url):
         item_id = self._match_id(url)
         url = compat_urlparse.urlunparse(
@@ -3259,16 +3256,27 @@ def _real_extract(self, url):
         qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
         video_id = qs.get('v', [None])[0]
         playlist_id = qs.get('list', [None])[0]
+
+        if is_home.group('not_channel') is not None and is_home.group('not_channel').startswith('watch') and not video_id:
+            if playlist_id:
+                self._downloader.report_warning('%s is not a valid Youtube URL. Trying to download playlist %s' % (url, playlist_id))
+                url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
+                # return self.url_result(playlist_id, ie=YoutubePlaylistIE.ie_key())
+            else:
+                raise ExtractorError('Unable to recognize tab page')
         if video_id and playlist_id:
             if self._downloader.params.get('noplaylist'):
                 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
                 return self.url_result(video_id, ie=YoutubeIE.ie_key(), video_id=video_id)
             self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
+
         webpage = self._download_webpage(url, item_id)
         identity_token = self._search_regex(
             r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
             'identity token', default=None)
         data = self._extract_yt_initial_data(item_id, webpage)
+        for alert_type, alert_message in self._extract_alerts(data):
+            self._downloader.report_warning('YouTube said: %s - %s' % (alert_type, alert_message))
         tabs = try_get(
             data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
         if tabs:
@@ -3461,10 +3469,33 @@ def _entries(self, query, n):
                 list)
             if not slr_contents:
                 break
-            isr_contents = try_get(
-                slr_contents,
-                lambda x: x[0]['itemSectionRenderer']['contents'],
-                list)
+
+            isr_contents = []
+            continuation_token = None
+            # Youtube sometimes adds promoted content to searches,
+            # changing the index location of videos and token.
+            # So we search through all entries till we find them.
+            for index, isr in enumerate(slr_contents):
+                if not isr_contents:
+                    isr_contents = try_get(
+                        slr_contents,
+                        (lambda x: x[index]['itemSectionRenderer']['contents']),
+                        list)
+                    for content in isr_contents:
+                        if content.get('videoRenderer') is not None:
+                            break
+                    else:
+                        isr_contents = []
+
+                if continuation_token is None:
+                    continuation_token = try_get(
+                        slr_contents,
+                        lambda x: x[index]['continuationItemRenderer']['continuationEndpoint']['continuationCommand'][
+                            'token'],
+                        compat_str)
+                if continuation_token is not None and isr_contents:
+                    break
+
             if not isr_contents:
                 break
             for content in isr_contents:
@@ -3498,13 +3529,9 @@ def _entries(self, query, n):
                 }
                 if total == n:
                     return
-            token = try_get(
-                slr_contents,
-                lambda x: x[1]['continuationItemRenderer']['continuationEndpoint']['continuationCommand']['token'],
-                compat_str)
-            if not token:
+            if not continuation_token:
                 break
-            data['continuation'] = token
+            data['continuation'] = continuation_token
 
     def _get_n_results(self, query, n):
         """Get a specified number of results for a query"""