]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/niconico.py
[ie/niconico] Remove legacy danmaku extraction (#9209)
[yt-dlp.git] / yt_dlp / extractor / niconico.py
index 2103037596db0a90207c19503e5a7c5bdeab252d..b889c752ccfed533b69d720f524e93c161423f6b 100644 (file)
@@ -5,10 +5,11 @@
 import re
 import time
 
+from urllib.parse import urlparse
+
 from .common import InfoExtractor, SearchInfoExtractor
-from ..compat import (
-    compat_HTTPError,
-)
+from ..networking import Request
+from ..networking.exceptions import HTTPError
 from ..utils import (
     ExtractorError,
     OnDemandPagedList,
@@ -171,9 +172,6 @@ class NiconicoIE(InfoExtractor):
 
     _VALID_URL = r'https?://(?:(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch|nico\.ms)/(?P<id>(?:[a-z]{2})?[0-9]+)'
     _NETRC_MACHINE = 'niconico'
-    _COMMENT_API_ENDPOINTS = (
-        'https://nvcomment.nicovideo.jp/legacy/api.json',
-        'https://nmsg.nicovideo.jp/api.json',)
     _API_HEADERS = {
         'X-Frontend-ID': '6',
         'X-Frontend-Version': '0',
@@ -392,7 +390,7 @@ def _real_extract(self, url):
             webpage, handle = self._download_webpage_handle(
                 'https://www.nicovideo.jp/watch/' + video_id, video_id)
             if video_id.startswith('so'):
-                video_id = self._match_id(handle.geturl())
+                video_id = self._match_id(handle.url)
 
             api_data = self._parse_json(self._html_search_regex(
                 'data-api-data="([^"]+)"', webpage,
@@ -403,9 +401,9 @@ def _real_extract(self, url):
                     'https://www.nicovideo.jp/api/watch/v3/%s?_frontendId=6&_frontendVersion=0&actionTrackId=AAAAAAAAAA_%d' % (video_id, round(time.time() * 1000)), video_id,
                     note='Downloading API JSON', errnote='Unable to fetch data')['data']
             except ExtractorError:
-                if not isinstance(e.cause, compat_HTTPError):
+                if not isinstance(e.cause, HTTPError):
                     raise
-                webpage = e.cause.read().decode('utf-8', 'replace')
+                webpage = e.cause.response.read().decode('utf-8', 'replace')
                 error_msg = self._html_search_regex(
                     r'(?s)<section\s+class="(?:(?:ErrorMessage|WatchExceptionPage-message)\s*)+">(.+?)</section>',
                     webpage, 'error reason', default=None)
@@ -469,81 +467,39 @@ def get_video_info(*items, get_first=True, **kwargs):
                 parse_duration(self._html_search_meta('video:duration', webpage, 'video duration', default=None))
                 or get_video_info('duration')),
             'webpage_url': url_or_none(url) or f'https://www.nicovideo.jp/watch/{video_id}',
-            'subtitles': self.extract_subtitles(video_id, api_data, session_api_data),
+            'subtitles': self.extract_subtitles(video_id, api_data),
         }
 
-    def _get_subtitles(self, video_id, api_data, session_api_data):
-        comment_user_key = traverse_obj(api_data, ('comment', 'keys', 'userKey'))
-        user_id_str = session_api_data.get('serviceUserId')
-
-        thread_ids = traverse_obj(api_data, ('comment', 'threads', lambda _, v: v['isActive']))
-        raw_danmaku = self._extract_all_comments(video_id, thread_ids, user_id_str, comment_user_key)
-        if not raw_danmaku:
+    def _get_subtitles(self, video_id, api_data):
+        comments_info = traverse_obj(api_data, ('comment', 'nvComment', {dict})) or {}
+        danmaku = traverse_obj(self._download_json(
+            f'{comments_info.get("server")}/v1/threads', video_id, data=json.dumps({
+                'additionals': {},
+                'params': comments_info.get('params'),
+                'threadKey': comments_info.get('threadKey'),
+            }).encode(), fatal=False,
+            headers={
+                'Referer': 'https://www.nicovideo.jp/',
+                'Origin': 'https://www.nicovideo.jp',
+                'Content-Type': 'text/plain;charset=UTF-8',
+                'x-client-os-type': 'others',
+                'x-frontend-id': '6',
+                'x-frontend-version': '0',
+            },
+            note='Downloading comments', errnote='Failed to download comments'),
+            ('data', 'threads', ..., 'comments', ...))
+
+        if not danmaku:
             self.report_warning(f'Failed to get comments. {bug_reports_message()}')
             return
+
         return {
             'comments': [{
                 'ext': 'json',
-                'data': json.dumps(raw_danmaku),
+                'data': json.dumps(danmaku),
             }],
         }
 
-    def _extract_all_comments(self, video_id, threads, user_id, user_key):
-        auth_data = {
-            'user_id': user_id,
-            'userkey': user_key,
-        } if user_id and user_key else {'user_id': ''}
-
-        # Request Start
-        post_data = [{'ping': {'content': 'rs:0'}}]
-        for i, thread in enumerate(threads):
-            thread_id = thread['id']
-            thread_fork = thread['fork']
-            # Post Start (2N)
-            post_data.append({'ping': {'content': f'ps:{i * 2}'}})
-            post_data.append({'thread': {
-                'fork': thread_fork,
-                'language': 0,
-                'nicoru': 3,
-                'scores': 1,
-                'thread': thread_id,
-                'version': '20090904',
-                'with_global': 1,
-                **auth_data,
-            }})
-            # Post Final (2N)
-            post_data.append({'ping': {'content': f'pf:{i * 2}'}})
-
-            # Post Start (2N+1)
-            post_data.append({'ping': {'content': f'ps:{i * 2 + 1}'}})
-            post_data.append({'thread_leaves': {
-                # format is '<bottom of minute range>-<top of minute range>:<comments per minute>,<total last comments'
-                # unfortunately NND limits (deletes?) comment returns this way, so you're only able to grab the last 1000 per language
-                'content': '0-999999:999999,999999,nicoru:999999',
-                'fork': thread_fork,
-                'language': 0,
-                'nicoru': 3,
-                'scores': 1,
-                'thread': thread_id,
-                **auth_data,
-            }})
-            # Post Final (2N+1)
-            post_data.append({'ping': {'content': f'pf:{i * 2 + 1}'}})
-        # Request Final
-        post_data.append({'ping': {'content': 'rf:0'}})
-
-        for api_url in self._COMMENT_API_ENDPOINTS:
-            comments = self._download_json(
-                api_url, video_id, data=json.dumps(post_data).encode(), fatal=False,
-                headers={
-                    'Referer': 'https://www.nicovideo.jp/watch/%s' % video_id,
-                    'Origin': 'https://www.nicovideo.jp',
-                    'Content-Type': 'text/plain;charset=UTF-8',
-                },
-                note='Downloading comments', errnote=f'Failed to access endpoint {api_url}')
-            if comments:
-                return comments
-
 
 class NiconicoPlaylistBaseIE(InfoExtractor):
     _PAGE_SIZE = 100
@@ -636,10 +592,10 @@ def _real_extract(self, url):
 
 class NiconicoSeriesIE(InfoExtractor):
     IE_NAME = 'niconico:series'
-    _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp|nico\.ms)/series/(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp(?:/user/\d+)?|nico\.ms)/series/(?P<id>\d+)'
 
     _TESTS = [{
-        'url': 'https://www.nicovideo.jp/series/110226',
+        'url': 'https://www.nicovideo.jp/user/44113208/series/110226',
         'info_dict': {
             'id': '110226',
             'title': 'ご立派ァ!のシリーズ',
@@ -659,7 +615,7 @@ class NiconicoSeriesIE(InfoExtractor):
 
     def _real_extract(self, url):
         list_id = self._match_id(url)
-        webpage = self._download_webpage(f'https://www.nicovideo.jp/series/{list_id}', list_id)
+        webpage = self._download_webpage(url, list_id)
 
         title = self._search_regex(
             (r'<title>「(.+)(全',
@@ -667,16 +623,15 @@ def _real_extract(self, url):
             webpage, 'title', fatal=False)
         if title:
             title = unescapeHTML(title)
-        playlist = [
-            self.url_result(f'https://www.nicovideo.jp/watch/{v_id}', video_id=v_id)
-            for v_id in re.findall(r'data-href=[\'"](?:https://www\.nicovideo\.jp)?/watch/([a-z0-9]+)', webpage)]
-        return self.playlist_result(playlist, list_id, title)
+        json_data = next(self._yield_json_ld(webpage, None, fatal=False))
+        return self.playlist_from_matches(
+            traverse_obj(json_data, ('itemListElement', ..., 'url')), list_id, title, ie=NiconicoIE)
 
 
 class NiconicoHistoryIE(NiconicoPlaylistBaseIE):
     IE_NAME = 'niconico:history'
-    IE_DESC = 'NicoNico user history. Requires cookies.'
-    _VALID_URL = r'https?://(?:www\.|sp\.)?nicovideo\.jp/my/history'
+    IE_DESC = 'NicoNico user history or likes. Requires cookies.'
+    _VALID_URL = r'https?://(?:www\.|sp\.)?nicovideo\.jp/my/(?P<id>history(?:/like)?)'
 
     _TESTS = [{
         'note': 'PC page, with /video',
@@ -694,23 +649,29 @@ class NiconicoHistoryIE(NiconicoPlaylistBaseIE):
         'note': 'mobile page, without /video',
         'url': 'https://sp.nicovideo.jp/my/history',
         'only_matching': True,
+    }, {
+        'note': 'PC page',
+        'url': 'https://www.nicovideo.jp/my/history/like',
+        'only_matching': True,
+    }, {
+        'note': 'Mobile page',
+        'url': 'https://sp.nicovideo.jp/my/history/like',
+        'only_matching': True,
     }]
 
     def _call_api(self, list_id, resource, query):
+        path = 'likes' if list_id == 'history/like' else 'watch/history'
         return self._download_json(
-            'https://nvapi.nicovideo.jp/v1/users/me/watch/history', 'history',
-            f'Downloading {resource}', query=query,
-            headers=self._API_HEADERS)['data']
+            f'https://nvapi.nicovideo.jp/v1/users/me/{path}', list_id,
+            f'Downloading {resource}', query=query, headers=self._API_HEADERS)['data']
 
     def _real_extract(self, url):
-        list_id = 'history'
+        list_id = self._match_id(url)
         try:
-            mylist = self._call_api(list_id, 'list', {
-                'pageSize': 1,
-            })
+            mylist = self._call_api(list_id, 'list', {'pageSize': 1})
         except ExtractorError as e:
-            if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
-                self.raise_login_required('You have to be logged in to get your watch history')
+            if isinstance(e.cause, HTTPError) and e.cause.status == 401:
+                self.raise_login_required('You have to be logged in to get your history')
             raise
         return self.playlist_result(self._entries(list_id), list_id, **self._parse_owner(mylist))
 
@@ -866,3 +827,155 @@ def _entries(self, list_id):
     def _real_extract(self, url):
         list_id = self._match_id(url)
         return self.playlist_result(self._entries(list_id), list_id, ie=NiconicoIE.ie_key())
+
+
+class NiconicoLiveIE(InfoExtractor):
+    IE_NAME = 'niconico:live'
+    IE_DESC = 'ニコニコ生放送'
+    _VALID_URL = r'https?://(?:sp\.)?live2?\.nicovideo\.jp/(?:watch|gate)/(?P<id>lv\d+)'
+    _TESTS = [{
+        'note': 'this test case includes invisible characters for title, pasting them as-is',
+        'url': 'https://live.nicovideo.jp/watch/lv339533123',
+        'info_dict': {
+            'id': 'lv339533123',
+            'title': '激辛ペヤング食べます‪( ;ᯅ; )‬(歌枠オーディション参加中)',
+            'view_count': 1526,
+            'comment_count': 1772,
+            'description': '初めましてもかって言います❕\nのんびり自由に適当に暮らしてます',
+            'uploader': 'もか',
+            'channel': 'ゲストさんのコミュニティ',
+            'channel_id': 'co5776900',
+            'channel_url': 'https://com.nicovideo.jp/community/co5776900',
+            'timestamp': 1670677328,
+            'is_live': True,
+        },
+        'skip': 'livestream',
+    }, {
+        'url': 'https://live2.nicovideo.jp/watch/lv339533123',
+        'only_matching': True,
+    }, {
+        'url': 'https://sp.live.nicovideo.jp/watch/lv339533123',
+        'only_matching': True,
+    }, {
+        'url': 'https://sp.live2.nicovideo.jp/watch/lv339533123',
+        'only_matching': True,
+    }]
+
+    _KNOWN_LATENCY = ('high', 'low')
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+        webpage, urlh = self._download_webpage_handle(f'https://live.nicovideo.jp/watch/{video_id}', video_id)
+
+        embedded_data = self._parse_json(unescapeHTML(self._search_regex(
+            r'<script\s+id="embedded-data"\s*data-props="(.+?)"', webpage, 'embedded data')), video_id)
+
+        ws_url = traverse_obj(embedded_data, ('site', 'relive', 'webSocketUrl'))
+        if not ws_url:
+            raise ExtractorError('The live hasn\'t started yet or already ended.', expected=True)
+        ws_url = update_url_query(ws_url, {
+            'frontend_id': traverse_obj(embedded_data, ('site', 'frontendId')) or '9',
+        })
+
+        hostname = remove_start(urlparse(urlh.url).hostname, 'sp.')
+        latency = try_get(self._configuration_arg('latency'), lambda x: x[0])
+        if latency not in self._KNOWN_LATENCY:
+            latency = 'high'
+
+        ws = self._request_webpage(
+            Request(ws_url, headers={'Origin': f'https://{hostname}'}),
+            video_id=video_id, note='Connecting to WebSocket server')
+
+        self.write_debug('[debug] Sending HLS server request')
+        ws.send(json.dumps({
+            'type': 'startWatching',
+            'data': {
+                'stream': {
+                    'quality': 'abr',
+                    'protocol': 'hls+fmp4',
+                    'latency': latency,
+                    'chasePlay': False
+                },
+                'room': {
+                    'protocol': 'webSocket',
+                    'commentable': True
+                },
+                'reconnect': False,
+            }
+        }))
+
+        while True:
+            recv = ws.recv()
+            if not recv:
+                continue
+            data = json.loads(recv)
+            if not isinstance(data, dict):
+                continue
+            if data.get('type') == 'stream':
+                m3u8_url = data['data']['uri']
+                qualities = data['data']['availableQualities']
+                break
+            elif data.get('type') == 'disconnect':
+                self.write_debug(recv)
+                raise ExtractorError('Disconnected at middle of extraction')
+            elif data.get('type') == 'error':
+                self.write_debug(recv)
+                message = traverse_obj(data, ('body', 'code')) or recv
+                raise ExtractorError(message)
+            elif self.get_param('verbose', False):
+                if len(recv) > 100:
+                    recv = recv[:100] + '...'
+                self.write_debug('Server said: %s' % recv)
+
+        title = traverse_obj(embedded_data, ('program', 'title')) or self._html_search_meta(
+            ('og:title', 'twitter:title'), webpage, 'live title', fatal=False)
+
+        raw_thumbs = traverse_obj(embedded_data, ('program', 'thumbnail')) or {}
+        thumbnails = []
+        for name, value in raw_thumbs.items():
+            if not isinstance(value, dict):
+                thumbnails.append({
+                    'id': name,
+                    'url': value,
+                    **parse_resolution(value, lenient=True),
+                })
+                continue
+
+            for k, img_url in value.items():
+                res = parse_resolution(k, lenient=True) or parse_resolution(img_url, lenient=True)
+                width, height = res.get('width'), res.get('height')
+
+                thumbnails.append({
+                    'id': f'{name}_{width}x{height}',
+                    'url': img_url,
+                    **res,
+                })
+
+        formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', live=True)
+        for fmt, q in zip(formats, reversed(qualities[1:])):
+            fmt.update({
+                'format_id': q,
+                'protocol': 'niconico_live',
+                'ws': ws,
+                'video_id': video_id,
+                'live_latency': latency,
+                'origin': hostname,
+            })
+
+        return {
+            'id': video_id,
+            'title': title,
+            **traverse_obj(embedded_data, {
+                'view_count': ('program', 'statistics', 'watchCount'),
+                'comment_count': ('program', 'statistics', 'commentCount'),
+                'uploader': ('program', 'supplier', 'name'),
+                'channel': ('socialGroup', 'name'),
+                'channel_id': ('socialGroup', 'id'),
+                'channel_url': ('socialGroup', 'socialGroupPageUrl'),
+            }),
+            'description': clean_html(traverse_obj(embedded_data, ('program', 'description'))),
+            'timestamp': int_or_none(traverse_obj(embedded_data, ('program', 'openTime'))),
+            'is_live': True,
+            'thumbnails': thumbnails,
+            'formats': formats,
+        }