-# coding: utf-8
-from __future__ import unicode_literals
-
-import datetime
+import datetime as dt
import functools
import itertools
import json
import re
import time
+import urllib.parse
from .common import InfoExtractor, SearchInfoExtractor
-from ..compat import (
- compat_parse_qs,
- compat_urllib_parse_urlparse,
- compat_HTTPError,
-)
+from ..networking import Request
+from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
OnDemandPagedList,
- bug_reports_message,
clean_html,
float_or_none,
int_or_none,
join_nonempty,
parse_duration,
- parse_filesize,
parse_iso8601,
+ parse_resolution,
+ qualities,
remove_start,
+ str_or_none,
traverse_obj,
try_get,
unescapeHTML,
update_url_query,
url_or_none,
urlencode_postdata,
+ urljoin,
)
class NiconicoIE(InfoExtractor):
IE_NAME = 'niconico'
IE_DESC = 'ニコニコ動画'
+ _GEO_COUNTRIES = ['JP']
+ _GEO_BYPASS = False
_TESTS = [{
'url': 'http://www.nicovideo.jp/watch/sm22312215',
'duration': 33,
'view_count': int,
'comment_count': int,
+ 'genres': ['未設定'],
+ 'tags': [],
+ 'expected_protocol': str,
},
- 'skip': 'Requires an account',
}, {
# File downloaded with and without credentials are different, so omit
# the md5 field
'url': 'http://www.nicovideo.jp/watch/nm14296458',
'info_dict': {
'id': 'nm14296458',
- 'ext': 'swf',
- 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
- 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
+ 'ext': 'mp4',
+ 'title': '【Kagamine Rin】Dance on media【Original】take2!',
+ 'description': 'md5:9368f2b1f4178de64f2602c2f3d6cbf5',
'thumbnail': r're:https?://.*',
'uploader': 'りょうた',
'uploader_id': '18822557',
'upload_date': '20110429',
'timestamp': 1304065916,
- 'duration': 209,
+ 'duration': 208.0,
+ 'comment_count': int,
+ 'view_count': int,
+ 'genres': ['音楽・サウンド'],
+ 'tags': ['Translation_Request', 'Kagamine_Rin', 'Rin_Original'],
+ 'expected_protocol': str,
},
- 'skip': 'Requires an account',
}, {
# 'video exists but is marked as "deleted"
# md5 is unstable
}, {
# video not available via `getflv`; "old" HTML5 video
'url': 'http://www.nicovideo.jp/watch/sm1151009',
- 'md5': '8fa81c364eb619d4085354eab075598a',
+ 'md5': 'f95a3d259172667b293530cc2e41ebda',
'info_dict': {
'id': 'sm1151009',
'ext': 'mp4',
'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
- 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7',
+ 'description': 'md5:f95a3d259172667b293530cc2e41ebda',
'thumbnail': r're:https?://.*',
'duration': 184,
- 'timestamp': 1190868283,
- 'upload_date': '20070927',
+ 'timestamp': 1190835883,
+ 'upload_date': '20070926',
'uploader': 'denden2',
'uploader_id': '1392194',
'view_count': int,
'comment_count': int,
+ 'genres': ['ゲーム'],
+ 'tags': [],
+ 'expected_protocol': str,
},
- 'skip': 'Requires an account',
}, {
# "New" HTML5 video
# md5 is unstable
'ext': 'mp4',
'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
- 'timestamp': 1498514060,
+ 'timestamp': 1498481660,
'upload_date': '20170626',
- 'uploader': 'ゲスト',
+ 'uploader': 'no-namamae',
'uploader_id': '40826363',
'thumbnail': r're:https?://.*',
'duration': 198,
'view_count': int,
'comment_count': int,
+ 'genres': ['アニメ'],
+ 'tags': [],
+ 'expected_protocol': str,
},
- 'skip': 'Requires an account',
}, {
# Video without owner
'url': 'http://www.nicovideo.jp/watch/sm18238488',
'ext': 'mp4',
'title': '【実写版】ミュータントタートルズ',
'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
- 'timestamp': 1341160408,
+ 'timestamp': 1341128008,
'upload_date': '20120701',
- 'uploader': None,
- 'uploader_id': None,
'thumbnail': r're:https?://.*',
'duration': 5271,
'view_count': int,
'comment_count': int,
+ 'genres': ['エンターテイメント'],
+ 'tags': [],
+ 'expected_protocol': str,
},
- 'skip': 'Requires an account',
}, {
'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
'only_matching': True,
_VALID_URL = r'https?://(?:(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch|nico\.ms)/(?P<id>(?:[a-z]{2})?[0-9]+)'
_NETRC_MACHINE = 'niconico'
- _COMMENT_API_ENDPOINTS = (
- 'https://nvcomment.nicovideo.jp/legacy/api.json',
- 'https://nmsg.nicovideo.jp/api.json',)
_API_HEADERS = {
'X-Frontend-ID': '6',
'X-Frontend-Version': '0',
'Origin': 'https://www.nicovideo.jp',
}
- def _real_initialize(self):
- self._login()
-
- def _login(self):
- username, password = self._get_login_info()
- # No authentication to be performed
- if not username:
- return True
-
- # Log in
+ def _perform_login(self, username, password):
login_ok = True
login_form_strs = {
'mail_tel': username,
self._request_webpage(
'https://account.nicovideo.jp/login', None,
note='Acquiring Login session')
- urlh = self._request_webpage(
+ page = self._download_webpage(
'https://account.nicovideo.jp/login/redirector?show_button_twitter=1&site=niconico&show_button_facebook=1', None,
note='Logging in', errnote='Unable to log in',
data=urlencode_postdata(login_form_strs),
'Referer': 'https://account.nicovideo.jp/login',
'Content-Type': 'application/x-www-form-urlencoded',
})
- if urlh is False:
- login_ok = False
- else:
- parts = compat_urllib_parse_urlparse(urlh.geturl())
- if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login':
- login_ok = False
+ if 'oneTimePw' in page:
+ post_url = self._search_regex(
+ r'<form[^>]+action=(["\'])(?P<url>.+?)\1', page, 'post url', group='url')
+ page = self._download_webpage(
+ urljoin('https://account.nicovideo.jp', post_url), None,
+ note='Performing MFA', errnote='Unable to complete MFA',
+ data=urlencode_postdata({
+ 'otp': self._get_tfa_info('6 digits code')
+ }), headers={
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ })
+ if 'oneTimePw' in page or 'formError' in page:
+ err_msg = self._html_search_regex(
+ r'formError["\']+>(.*?)</div>', page, 'form_error',
+ default='There\'s an error but the message can\'t be parsed.',
+ flags=re.DOTALL)
+ self.report_warning(f'Unable to log in: MFA challenge failed, "{err_msg}"')
+ return False
+ login_ok = 'class="notice error"' not in page
if not login_ok:
- self.report_warning('unable to log in: bad username or password')
+ self.report_warning('Unable to log in: bad username or password')
return login_ok
def _get_heartbeat_info(self, info_dict):
video_id, video_src_id, audio_src_id = info_dict['url'].split(':')[1].split('/')
- dmc_protocol = info_dict['_expected_protocol']
+ dmc_protocol = info_dict['expected_protocol']
api_data = (
info_dict.get('_api_data')
or self._parse_json(
self._html_search_regex(
'data-api-data="([^"]+)"',
- self._download_webpage('http://www.nicovideo.jp/watch/' + video_id, video_id),
+ self._download_webpage('https://www.nicovideo.jp/watch/' + video_id, video_id),
'API data', default='{}'),
video_id))
if not audio_quality.get('isAvailable') or not video_quality.get('isAvailable'):
return None
- def extract_video_quality(video_quality):
- return parse_filesize('%sB' % self._search_regex(
- r'\| ([0-9]*\.?[0-9]*[MK])', video_quality, 'vbr', default=''))
-
format_id = '-'.join(
[remove_start(s['id'], 'archive_') for s in (video_quality, audio_quality)] + [dmc_protocol])
vid_qual_label = traverse_obj(video_quality, ('metadata', 'label'))
- vid_quality = traverse_obj(video_quality, ('metadata', 'bitrate'))
return {
'url': 'niconico_dmc:%s/%s/%s' % (video_id, video_quality['id'], audio_quality['id']),
'ext': 'mp4', # Session API are used in HTML5, which always serves mp4
'acodec': 'aac',
'vcodec': 'h264',
- 'abr': float_or_none(traverse_obj(audio_quality, ('metadata', 'bitrate')), 1000),
- 'vbr': float_or_none(vid_quality if vid_quality > 0 else extract_video_quality(vid_qual_label), 1000),
- 'height': traverse_obj(video_quality, ('metadata', 'resolution', 'height')),
- 'width': traverse_obj(video_quality, ('metadata', 'resolution', 'width')),
+ **traverse_obj(audio_quality, ('metadata', {
+ 'abr': ('bitrate', {functools.partial(float_or_none, scale=1000)}),
+ 'asr': ('samplingRate', {int_or_none}),
+ })),
+ **traverse_obj(video_quality, ('metadata', {
+ 'vbr': ('bitrate', {functools.partial(float_or_none, scale=1000)}),
+ 'height': ('resolution', 'height', {int_or_none}),
+ 'width': ('resolution', 'width', {int_or_none}),
+ })),
'quality': -2 if 'low' in video_quality['id'] else None,
'protocol': 'niconico_dmc',
- '_expected_protocol': dmc_protocol,
+ 'expected_protocol': dmc_protocol, # XXX: This is not a documented field
'http_headers': {
'Origin': 'https://www.nicovideo.jp',
'Referer': 'https://www.nicovideo.jp/watch/' + video_id,
}
}
+ def _yield_dmc_formats(self, api_data, video_id):
+ dmc_data = traverse_obj(api_data, ('media', 'delivery', 'movie'))
+ audios = traverse_obj(dmc_data, ('audios', ..., {dict}))
+ videos = traverse_obj(dmc_data, ('videos', ..., {dict}))
+ protocols = traverse_obj(dmc_data, ('session', 'protocols', ..., {str}))
+ if not all((audios, videos, protocols)):
+ return
+
+ for audio_quality, video_quality, protocol in itertools.product(audios, videos, protocols):
+ if fmt := self._extract_format_for_quality(video_id, audio_quality, video_quality, protocol):
+ yield fmt
+
+ def _yield_dms_formats(self, api_data, video_id):
+ fmt_filter = lambda _, v: v['isAvailable'] and v['id']
+ videos = traverse_obj(api_data, ('media', 'domand', 'videos', fmt_filter))
+ audios = traverse_obj(api_data, ('media', 'domand', 'audios', fmt_filter))
+ access_key = traverse_obj(api_data, ('media', 'domand', 'accessRightKey', {str}))
+ track_id = traverse_obj(api_data, ('client', 'watchTrackId', {str}))
+ if not all((videos, audios, access_key, track_id)):
+ return
+
+ dms_m3u8_url = self._download_json(
+ f'https://nvapi.nicovideo.jp/v1/watch/{video_id}/access-rights/hls', video_id,
+ data=json.dumps({
+ 'outputs': list(itertools.product((v['id'] for v in videos), (a['id'] for a in audios)))
+ }).encode(), query={'actionTrackId': track_id}, headers={
+ 'x-access-right-key': access_key,
+ 'x-frontend-id': 6,
+ 'x-frontend-version': 0,
+ 'x-request-with': 'https://www.nicovideo.jp',
+ })['data']['contentUrl']
+ # Getting all audio formats results in duplicate video formats which we filter out later
+ dms_fmts = self._extract_m3u8_formats(dms_m3u8_url, video_id)
+
+ # m3u8 extraction does not provide audio bitrates, so extract from the API data and fix
+ for audio_fmt in traverse_obj(dms_fmts, lambda _, v: v['vcodec'] == 'none'):
+ yield {
+ **audio_fmt,
+ **traverse_obj(audios, (lambda _, v: audio_fmt['format_id'].startswith(v['id']), {
+ 'format_id': ('id', {str}),
+ 'abr': ('bitRate', {functools.partial(float_or_none, scale=1000)}),
+ 'asr': ('samplingRate', {int_or_none}),
+ }), get_all=False),
+ 'acodec': 'aac',
+ 'ext': 'm4a',
+ }
+
+ # Sort before removing dupes to keep the format dicts with the lowest tbr
+ video_fmts = sorted((fmt for fmt in dms_fmts if fmt['vcodec'] != 'none'), key=lambda f: f['tbr'])
+ self._remove_duplicate_formats(video_fmts)
+ # Calculate the true vbr/tbr by subtracting the lowest abr
+ min_abr = min(traverse_obj(audios, (..., 'bitRate', {float_or_none})), default=0) / 1000
+ for video_fmt in video_fmts:
+ video_fmt['tbr'] -= min_abr
+ video_fmt['format_id'] = f'video-{video_fmt["tbr"]:.0f}'
+ yield video_fmt
+
def _real_extract(self, url):
video_id = self._match_id(url)
try:
webpage, handle = self._download_webpage_handle(
- 'http://www.nicovideo.jp/watch/' + video_id, video_id)
+ 'https://www.nicovideo.jp/watch/' + video_id, video_id)
if video_id.startswith('so'):
- video_id = self._match_id(handle.geturl())
+ video_id = self._match_id(handle.url)
api_data = self._parse_json(self._html_search_regex(
'data-api-data="([^"]+)"', webpage,
'https://www.nicovideo.jp/api/watch/v3/%s?_frontendId=6&_frontendVersion=0&actionTrackId=AAAAAAAAAA_%d' % (video_id, round(time.time() * 1000)), video_id,
note='Downloading API JSON', errnote='Unable to fetch data')['data']
except ExtractorError:
- if not isinstance(e.cause, compat_HTTPError):
+ if not isinstance(e.cause, HTTPError):
raise
- webpage = e.cause.read().decode('utf-8', 'replace')
+ webpage = e.cause.response.read().decode('utf-8', 'replace')
error_msg = self._html_search_regex(
r'(?s)<section\s+class="(?:(?:ErrorMessage|WatchExceptionPage-message)\s*)+">(.+?)</section>',
webpage, 'error reason', default=None)
if not error_msg:
raise
- raise ExtractorError(re.sub(r'\s+', ' ', error_msg), expected=True)
-
- formats = []
-
- def get_video_info(*items, get_first=True, **kwargs):
- return traverse_obj(api_data, ('video', *items), get_all=not get_first, **kwargs)
-
- quality_info = api_data['media']['delivery']['movie']
- session_api_data = quality_info['session']
- for (audio_quality, video_quality, protocol) in itertools.product(quality_info['audios'], quality_info['videos'], session_api_data['protocols']):
- fmt = self._extract_format_for_quality(video_id, audio_quality, video_quality, protocol)
- if fmt:
- formats.append(fmt)
-
- self._sort_formats(formats)
+ raise ExtractorError(clean_html(error_msg), expected=True)
+
+ availability = self._availability(**(traverse_obj(api_data, ('payment', 'video', {
+ 'needs_premium': ('isPremium', {bool}),
+ 'needs_subscription': ('isAdmission', {bool}),
+ })) or {'needs_auth': True}))
+ formats = [*self._yield_dmc_formats(api_data, video_id),
+ *self._yield_dms_formats(api_data, video_id)]
+ if not formats:
+ fail_msg = clean_html(self._html_search_regex(
+ r'<p[^>]+\bclass="fail-message"[^>]*>(?P<msg>.+?)</p>',
+ webpage, 'fail message', default=None, group='msg'))
+ if fail_msg:
+ self.to_screen(f'Niconico said: {fail_msg}')
+ if fail_msg and 'された地域と同じ地域からのみ視聴できます。' in fail_msg:
+ availability = None
+ self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True)
+ elif availability == 'premium_only':
+ self.raise_login_required('This video requires premium', metadata_available=True)
+ elif availability == 'subscriber_only':
+ self.raise_login_required('This video is for members only', metadata_available=True)
+ elif availability == 'needs_auth':
+ self.raise_login_required(metadata_available=False)
# Start extracting information
tags = None
# find in json (logged in)
tags = traverse_obj(api_data, ('tag', 'items', ..., 'name'))
+ thumb_prefs = qualities(['url', 'middleUrl', 'largeUrl', 'player', 'ogp'])
+
+ def get_video_info(*items, get_first=True, **kwargs):
+ return traverse_obj(api_data, ('video', *items), get_all=not get_first, **kwargs)
+
return {
'id': video_id,
'_api_data': api_data,
'title': get_video_info(('originalTitle', 'title')) or self._og_search_title(webpage, default=None),
'formats': formats,
- 'thumbnail': get_video_info('thumbnail', 'url') or self._html_search_meta(
- ('image', 'og:image'), webpage, 'thumbnail', default=None),
+ 'availability': availability,
+ 'thumbnails': [{
+ 'id': key,
+ 'url': url,
+ 'ext': 'jpg',
+ 'preference': thumb_prefs(key),
+ **parse_resolution(url, lenient=True),
+ } for key, url in (get_video_info('thumbnail') or {}).items() if url],
'description': clean_html(get_video_info('description')),
- 'uploader': traverse_obj(api_data, ('owner', 'nickname')),
+ 'uploader': traverse_obj(api_data, ('owner', 'nickname'), ('channel', 'name'), ('community', 'name')),
+ 'uploader_id': str_or_none(traverse_obj(api_data, ('owner', 'id'), ('channel', 'id'), ('community', 'id'))),
'timestamp': parse_iso8601(get_video_info('registeredAt')) or parse_iso8601(
self._html_search_meta('video:release_date', webpage, 'date published', default=None)),
- 'uploader_id': traverse_obj(api_data, ('owner', 'id')),
'channel': traverse_obj(api_data, ('channel', 'name'), ('community', 'name')),
'channel_id': traverse_obj(api_data, ('channel', 'id'), ('community', 'id')),
'view_count': int_or_none(get_video_info('count', 'view')),
parse_duration(self._html_search_meta('video:duration', webpage, 'video duration', default=None))
or get_video_info('duration')),
'webpage_url': url_or_none(url) or f'https://www.nicovideo.jp/watch/{video_id}',
- 'subtitles': self.extract_subtitles(video_id, api_data, session_api_data),
+ 'subtitles': self.extract_subtitles(video_id, api_data),
}
- def _get_subtitles(self, video_id, api_data, session_api_data):
- comment_user_key = traverse_obj(api_data, ('comment', 'keys', 'userKey'))
- user_id_str = session_api_data.get('serviceUserId')
-
- thread_ids = [x for x in traverse_obj(api_data, ('comment', 'threads')) or [] if x['isActive']]
- raw_danmaku = self._extract_all_comments(video_id, thread_ids, user_id_str, comment_user_key)
- if not raw_danmaku:
- self.report_warning(f'Failed to get comments. {bug_reports_message()}')
+ def _get_subtitles(self, video_id, api_data):
+ comments_info = traverse_obj(api_data, ('comment', 'nvComment', {dict})) or {}
+ if not comments_info.get('server'):
return
+
+ danmaku = traverse_obj(self._download_json(
+ f'{comments_info["server"]}/v1/threads', video_id, data=json.dumps({
+ 'additionals': {},
+ 'params': comments_info.get('params'),
+ 'threadKey': comments_info.get('threadKey'),
+ }).encode(), fatal=False,
+ headers={
+ 'Referer': 'https://www.nicovideo.jp/',
+ 'Origin': 'https://www.nicovideo.jp',
+ 'Content-Type': 'text/plain;charset=UTF-8',
+ 'x-client-os-type': 'others',
+ 'x-frontend-id': '6',
+ 'x-frontend-version': '0',
+ },
+ note='Downloading comments', errnote='Failed to download comments'),
+ ('data', 'threads', ..., 'comments', ...))
+
return {
'comments': [{
'ext': 'json',
- 'data': json.dumps(raw_danmaku),
+ 'data': json.dumps(danmaku),
}],
}
- def _extract_all_comments(self, video_id, threads, user_id, user_key):
- auth_data = {
- 'user_id': user_id,
- 'userkey': user_key,
- } if user_id and user_key else {'user_id': ''}
-
- # Request Start
- post_data = [{'ping': {'content': 'rs:0'}}]
- for i, thread in enumerate(threads):
- thread_id = thread['id']
- thread_fork = thread['fork']
- # Post Start (2N)
- post_data.append({'ping': {'content': f'ps:{i * 2}'}})
- post_data.append({'thread': {
- 'fork': thread_fork,
- 'language': 0,
- 'nicoru': 3,
- 'scores': 1,
- 'thread': thread_id,
- 'version': '20090904',
- 'with_global': 1,
- **auth_data,
- }})
- # Post Final (2N)
- post_data.append({'ping': {'content': f'pf:{i * 2}'}})
-
- # Post Start (2N+1)
- post_data.append({'ping': {'content': f'ps:{i * 2 + 1}'}})
- post_data.append({'thread_leaves': {
- # format is '<bottom of minute range>-<top of minute range>:<comments per minute>,<total last comments'
- # unfortunately NND limits (deletes?) comment returns this way, so you're only able to grab the last 1000 per language
- 'content': '0-999999:999999,999999,nicoru:999999',
- 'fork': thread_fork,
- 'language': 0,
- 'nicoru': 3,
- 'scores': 1,
- 'thread': thread_id,
- **auth_data,
- }})
- # Post Final (2N+1)
- post_data.append({'ping': {'content': f'pf:{i * 2 + 1}'}})
- # Request Final
- post_data.append({'ping': {'content': 'rf:0'}})
-
- for api_url in self._COMMENT_API_ENDPOINTS:
- comments = self._download_json(
- api_url, video_id, data=json.dumps(post_data).encode(), fatal=False,
- headers={
- 'Referer': 'https://www.nicovideo.jp/watch/%s' % video_id,
- 'Origin': 'https://www.nicovideo.jp',
- 'Content-Type': 'text/plain;charset=UTF-8',
- },
- note='Downloading comments', errnote=f'Failed to access endpoint {api_url}')
- if comments:
- return comments
-
class NiconicoPlaylistBaseIE(InfoExtractor):
_PAGE_SIZE = 100
}
def _call_api(self, list_id, resource, query):
- "Implement this in child class"
- pass
+ raise NotImplementedError('Must be implemented in subclasses')
@staticmethod
def _parse_owner(item):
class NiconicoSeriesIE(InfoExtractor):
IE_NAME = 'niconico:series'
- _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp|nico\.ms)/series/(?P<id>\d+)'
+ _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp(?:/user/\d+)?|nico\.ms)/series/(?P<id>\d+)'
_TESTS = [{
- 'url': 'https://www.nicovideo.jp/series/110226',
+ 'url': 'https://www.nicovideo.jp/user/44113208/series/110226',
'info_dict': {
'id': '110226',
'title': 'ご立派ァ!のシリーズ',
},
- 'playlist_mincount': 10, # as of 2021/03/17
+ 'playlist_mincount': 10,
}, {
'url': 'https://www.nicovideo.jp/series/12312/',
'info_dict': {
'id': '12312',
'title': 'バトルスピリッツ お勧めカード紹介(調整中)',
},
- 'playlist_mincount': 97, # as of 2021/03/17
+ 'playlist_mincount': 103,
}, {
'url': 'https://nico.ms/series/203559',
'only_matching': True,
def _real_extract(self, url):
list_id = self._match_id(url)
- webpage = self._download_webpage(f'https://www.nicovideo.jp/series/{list_id}', list_id)
+ webpage = self._download_webpage(url, list_id)
title = self._search_regex(
(r'<title>「(.+)(全',
webpage, 'title', fatal=False)
if title:
title = unescapeHTML(title)
- playlist = [
- self.url_result(f'https://www.nicovideo.jp/watch/{v_id}', video_id=v_id)
- for v_id in re.findall(r'href="/watch/([a-z0-9]+)" data-href="/watch/\1', webpage)]
- return self.playlist_result(playlist, list_id, title)
+ json_data = next(self._yield_json_ld(webpage, None, fatal=False))
+ return self.playlist_from_matches(
+ traverse_obj(json_data, ('itemListElement', ..., 'url')), list_id, title, ie=NiconicoIE)
class NiconicoHistoryIE(NiconicoPlaylistBaseIE):
IE_NAME = 'niconico:history'
- IE_DESC = 'NicoNico user history. Requires cookies.'
- _VALID_URL = r'https?://(?:www\.|sp\.)?nicovideo\.jp/my/history'
+ IE_DESC = 'NicoNico user history or likes. Requires cookies.'
+ _VALID_URL = r'https?://(?:www\.|sp\.)?nicovideo\.jp/my/(?P<id>history(?:/like)?)'
_TESTS = [{
'note': 'PC page, with /video',
'note': 'mobile page, without /video',
'url': 'https://sp.nicovideo.jp/my/history',
'only_matching': True,
+ }, {
+ 'note': 'PC page',
+ 'url': 'https://www.nicovideo.jp/my/history/like',
+ 'only_matching': True,
+ }, {
+ 'note': 'Mobile page',
+ 'url': 'https://sp.nicovideo.jp/my/history/like',
+ 'only_matching': True,
}]
def _call_api(self, list_id, resource, query):
+ path = 'likes' if list_id == 'history/like' else 'watch/history'
return self._download_json(
- 'https://nvapi.nicovideo.jp/v1/users/me/watch/history', 'history',
- f'Downloading {resource}', query=query,
- headers=self._API_HEADERS)['data']
+ f'https://nvapi.nicovideo.jp/v1/users/me/{path}', list_id,
+ f'Downloading {resource}', query=query, headers=self._API_HEADERS)['data']
def _real_extract(self, url):
- list_id = 'history'
+ list_id = self._match_id(url)
try:
- mylist = self._call_api(list_id, 'list', {
- 'pageSize': 1,
- })
+ mylist = self._call_api(list_id, 'list', {'pageSize': 1})
except ExtractorError as e:
- if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
- self.raise_login_required('You have to be logged in to get your watch history')
+ if isinstance(e.cause, HTTPError) and e.cause.status == 401:
+ self.raise_login_required('You have to be logged in to get your history')
raise
return self.playlist_result(self._entries(list_id), list_id, **self._parse_owner(mylist))
webpage = self._download_webpage(url, item_id, query=query, note=note % {'page': page_num})
results = re.findall(r'(?<=data-video-id=)["\']?(?P<videoid>.*?)(?=["\'])', webpage)
for item in results:
- yield self.url_result(f'http://www.nicovideo.jp/watch/{item}', 'Niconico', item)
+ yield self.url_result(f'https://www.nicovideo.jp/watch/{item}', 'Niconico', item)
if not results:
break
'playlist_mincount': 1610,
}]
- _START_DATE = datetime.date(2007, 1, 1)
+ _START_DATE = dt.date(2007, 1, 1)
_RESULTS_PER_PAGE = 32
_MAX_PAGES = 50
def _entries(self, url, item_id, start_date=None, end_date=None):
- start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date()
+ start_date, end_date = start_date or self._START_DATE, end_date or dt.datetime.now().date()
# If the last page has a full page of videos, we need to break down the query interval further
last_page_len = len(list(self._get_entries_for_date(
def _real_extract(self, url):
list_id = self._match_id(url)
return self.playlist_result(self._entries(list_id), list_id, ie=NiconicoIE.ie_key())
+
+
+class NiconicoLiveIE(InfoExtractor):
+ IE_NAME = 'niconico:live'
+ IE_DESC = 'ニコニコ生放送'
+ _VALID_URL = r'https?://(?:sp\.)?live2?\.nicovideo\.jp/(?:watch|gate)/(?P<id>lv\d+)'
+ _TESTS = [{
+ 'note': 'this test case includes invisible characters for title, pasting them as-is',
+ 'url': 'https://live.nicovideo.jp/watch/lv339533123',
+ 'info_dict': {
+ 'id': 'lv339533123',
+ 'title': '激辛ペヤング食べます( ;ᯅ; )(歌枠オーディション参加中)',
+ 'view_count': 1526,
+ 'comment_count': 1772,
+ 'description': '初めましてもかって言います❕\nのんびり自由に適当に暮らしてます',
+ 'uploader': 'もか',
+ 'channel': 'ゲストさんのコミュニティ',
+ 'channel_id': 'co5776900',
+ 'channel_url': 'https://com.nicovideo.jp/community/co5776900',
+ 'timestamp': 1670677328,
+ 'is_live': True,
+ },
+ 'skip': 'livestream',
+ }, {
+ 'url': 'https://live2.nicovideo.jp/watch/lv339533123',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://sp.live.nicovideo.jp/watch/lv339533123',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://sp.live2.nicovideo.jp/watch/lv339533123',
+ 'only_matching': True,
+ }]
+
+ _KNOWN_LATENCY = ('high', 'low')
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage, urlh = self._download_webpage_handle(f'https://live.nicovideo.jp/watch/{video_id}', video_id)
+
+ embedded_data = self._parse_json(unescapeHTML(self._search_regex(
+ r'<script\s+id="embedded-data"\s*data-props="(.+?)"', webpage, 'embedded data')), video_id)
+
+ ws_url = traverse_obj(embedded_data, ('site', 'relive', 'webSocketUrl'))
+ if not ws_url:
+ raise ExtractorError('The live hasn\'t started yet or already ended.', expected=True)
+ ws_url = update_url_query(ws_url, {
+ 'frontend_id': traverse_obj(embedded_data, ('site', 'frontendId')) or '9',
+ })
+
+ hostname = remove_start(urllib.parse.urlparse(urlh.url).hostname, 'sp.')
+ latency = try_get(self._configuration_arg('latency'), lambda x: x[0])
+ if latency not in self._KNOWN_LATENCY:
+ latency = 'high'
+
+ ws = self._request_webpage(
+ Request(ws_url, headers={'Origin': f'https://{hostname}'}),
+ video_id=video_id, note='Connecting to WebSocket server')
+
+ self.write_debug('[debug] Sending HLS server request')
+ ws.send(json.dumps({
+ 'type': 'startWatching',
+ 'data': {
+ 'stream': {
+ 'quality': 'abr',
+ 'protocol': 'hls+fmp4',
+ 'latency': latency,
+ 'chasePlay': False
+ },
+ 'room': {
+ 'protocol': 'webSocket',
+ 'commentable': True
+ },
+ 'reconnect': False,
+ }
+ }))
+
+ while True:
+ recv = ws.recv()
+ if not recv:
+ continue
+ data = json.loads(recv)
+ if not isinstance(data, dict):
+ continue
+ if data.get('type') == 'stream':
+ m3u8_url = data['data']['uri']
+ qualities = data['data']['availableQualities']
+ break
+ elif data.get('type') == 'disconnect':
+ self.write_debug(recv)
+ raise ExtractorError('Disconnected at middle of extraction')
+ elif data.get('type') == 'error':
+ self.write_debug(recv)
+ message = traverse_obj(data, ('body', 'code')) or recv
+ raise ExtractorError(message)
+ elif self.get_param('verbose', False):
+ if len(recv) > 100:
+ recv = recv[:100] + '...'
+ self.write_debug('Server said: %s' % recv)
+
+ title = traverse_obj(embedded_data, ('program', 'title')) or self._html_search_meta(
+ ('og:title', 'twitter:title'), webpage, 'live title', fatal=False)
+
+ raw_thumbs = traverse_obj(embedded_data, ('program', 'thumbnail')) or {}
+ thumbnails = []
+ for name, value in raw_thumbs.items():
+ if not isinstance(value, dict):
+ thumbnails.append({
+ 'id': name,
+ 'url': value,
+ **parse_resolution(value, lenient=True),
+ })
+ continue
+
+ for k, img_url in value.items():
+ res = parse_resolution(k, lenient=True) or parse_resolution(img_url, lenient=True)
+ width, height = res.get('width'), res.get('height')
+
+ thumbnails.append({
+ 'id': f'{name}_{width}x{height}',
+ 'url': img_url,
+ **res,
+ })
+
+ formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', live=True)
+ for fmt, q in zip(formats, reversed(qualities[1:])):
+ fmt.update({
+ 'format_id': q,
+ 'protocol': 'niconico_live',
+ 'ws': ws,
+ 'video_id': video_id,
+ 'live_latency': latency,
+ 'origin': hostname,
+ })
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ **traverse_obj(embedded_data, {
+ 'view_count': ('program', 'statistics', 'watchCount'),
+ 'comment_count': ('program', 'statistics', 'commentCount'),
+ 'uploader': ('program', 'supplier', 'name'),
+ 'channel': ('socialGroup', 'name'),
+ 'channel_id': ('socialGroup', 'id'),
+ 'channel_url': ('socialGroup', 'socialGroupPageUrl'),
+ }),
+ 'description': clean_html(traverse_obj(embedded_data, ('program', 'description'))),
+ 'timestamp': int_or_none(traverse_obj(embedded_data, ('program', 'openTime'))),
+ 'is_live': True,
+ 'thumbnails': thumbnails,
+ 'formats': formats,
+ }