-# coding: utf-8
-from __future__ import unicode_literals
-
-import datetime
+import datetime as dt
+import functools
import itertools
import json
import re
+import time
+import urllib.parse
from .common import InfoExtractor, SearchInfoExtractor
-from ..postprocessor.ffmpeg import FFmpegPostProcessor
-from ..compat import (
- compat_str,
- compat_parse_qs,
- compat_urllib_parse_urlparse,
-)
+from ..networking import Request
+from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
- dict_get,
+ OnDemandPagedList,
+ clean_html,
float_or_none,
int_or_none,
- OnDemandPagedList,
+ join_nonempty,
parse_duration,
parse_iso8601,
- PostProcessingError,
+ parse_resolution,
+ qualities,
remove_start,
str_or_none,
+ traverse_obj,
try_get,
- unified_timestamp,
+ unescapeHTML,
+ update_url_query,
+ url_or_none,
urlencode_postdata,
- xpath_text,
+ urljoin,
)
class NiconicoIE(InfoExtractor):
IE_NAME = 'niconico'
IE_DESC = 'ニコニコ動画'
+ _GEO_COUNTRIES = ['JP']
+ _GEO_BYPASS = False
_TESTS = [{
'url': 'http://www.nicovideo.jp/watch/sm22312215',
- 'md5': 'a5bad06f1347452102953f323c69da34s',
+ 'md5': 'd1a75c0823e2f629128c43e1212760f9',
'info_dict': {
'id': 'sm22312215',
'ext': 'mp4',
'duration': 33,
'view_count': int,
'comment_count': int,
+ 'genres': ['未設定'],
+ 'tags': [],
+ 'expected_protocol': str,
},
- 'skip': 'Requires an account',
}, {
# File downloaded with and without credentials are different, so omit
# the md5 field
'url': 'http://www.nicovideo.jp/watch/nm14296458',
'info_dict': {
'id': 'nm14296458',
- 'ext': 'swf',
- 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
- 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
+ 'ext': 'mp4',
+ 'title': '【Kagamine Rin】Dance on media【Original】take2!',
+ 'description': 'md5:9368f2b1f4178de64f2602c2f3d6cbf5',
'thumbnail': r're:https?://.*',
'uploader': 'りょうた',
'uploader_id': '18822557',
'upload_date': '20110429',
'timestamp': 1304065916,
- 'duration': 209,
+ 'duration': 208.0,
+ 'comment_count': int,
+ 'view_count': int,
+ 'genres': ['音楽・サウンド'],
+ 'tags': ['Translation_Request', 'Kagamine_Rin', 'Rin_Original'],
+ 'expected_protocol': str,
},
- 'skip': 'Requires an account',
}, {
# 'video exists but is marked as "deleted"
# md5 is unstable
}, {
# video not available via `getflv`; "old" HTML5 video
'url': 'http://www.nicovideo.jp/watch/sm1151009',
- 'md5': '8fa81c364eb619d4085354eab075598a',
+ 'md5': 'f95a3d259172667b293530cc2e41ebda',
'info_dict': {
'id': 'sm1151009',
'ext': 'mp4',
'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
- 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7',
+ 'description': 'md5:f95a3d259172667b293530cc2e41ebda',
'thumbnail': r're:https?://.*',
'duration': 184,
- 'timestamp': 1190868283,
- 'upload_date': '20070927',
+ 'timestamp': 1190835883,
+ 'upload_date': '20070926',
'uploader': 'denden2',
'uploader_id': '1392194',
'view_count': int,
'comment_count': int,
+ 'genres': ['ゲーム'],
+ 'tags': [],
+ 'expected_protocol': str,
},
- 'skip': 'Requires an account',
}, {
# "New" HTML5 video
# md5 is unstable
'ext': 'mp4',
'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
- 'timestamp': 1498514060,
+ 'timestamp': 1498481660,
'upload_date': '20170626',
- 'uploader': 'ゲスト',
+ 'uploader': 'no-namamae',
'uploader_id': '40826363',
'thumbnail': r're:https?://.*',
'duration': 198,
'view_count': int,
'comment_count': int,
+ 'genres': ['アニメ'],
+ 'tags': [],
+ 'expected_protocol': str,
},
- 'skip': 'Requires an account',
}, {
# Video without owner
'url': 'http://www.nicovideo.jp/watch/sm18238488',
'ext': 'mp4',
'title': '【実写版】ミュータントタートルズ',
'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
- 'timestamp': 1341160408,
+ 'timestamp': 1341128008,
'upload_date': '20120701',
- 'uploader': None,
- 'uploader_id': None,
'thumbnail': r're:https?://.*',
'duration': 5271,
'view_count': int,
'comment_count': int,
+ 'genres': ['エンターテイメント'],
+ 'tags': [],
+ 'expected_protocol': str,
},
- 'skip': 'Requires an account',
}, {
'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
'only_matching': True,
+ }, {
+ 'note': 'a video that is only served as an ENCRYPTED HLS.',
+ 'url': 'https://www.nicovideo.jp/watch/so38016254',
+ 'only_matching': True,
}]
- _VALID_URL = r'https?://(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
+ _VALID_URL = r'https?://(?:(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch|nico\.ms)/(?P<id>(?:[a-z]{2})?[0-9]+)'
_NETRC_MACHINE = 'niconico'
-
_API_HEADERS = {
'X-Frontend-ID': '6',
- 'X-Frontend-Version': '0'
+ 'X-Frontend-Version': '0',
+ 'X-Niconico-Language': 'en-us',
+ 'Referer': 'https://www.nicovideo.jp/',
+ 'Origin': 'https://www.nicovideo.jp',
}
- def _real_initialize(self):
- self._login()
-
- def _login(self):
- username, password = self._get_login_info()
- # No authentication to be performed
- if not username:
- return True
-
- # Log in
+ def _perform_login(self, username, password):
login_ok = True
login_form_strs = {
'mail_tel': username,
'password': password,
}
- urlh = self._request_webpage(
- 'https://account.nicovideo.jp/api/v1/login', None,
+ self._request_webpage(
+ 'https://account.nicovideo.jp/login', None,
+ note='Acquiring Login session')
+ page = self._download_webpage(
+ 'https://account.nicovideo.jp/login/redirector?show_button_twitter=1&site=niconico&show_button_facebook=1', None,
note='Logging in', errnote='Unable to log in',
- data=urlencode_postdata(login_form_strs))
- if urlh is False:
- login_ok = False
- else:
- parts = compat_urllib_parse_urlparse(urlh.geturl())
- if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login':
- login_ok = False
+ data=urlencode_postdata(login_form_strs),
+ headers={
+ 'Referer': 'https://account.nicovideo.jp/login',
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ })
+ if 'oneTimePw' in page:
+ post_url = self._search_regex(
+ r'<form[^>]+action=(["\'])(?P<url>.+?)\1', page, 'post url', group='url')
+ page = self._download_webpage(
+ urljoin('https://account.nicovideo.jp', post_url), None,
+ note='Performing MFA', errnote='Unable to complete MFA',
+ data=urlencode_postdata({
+ 'otp': self._get_tfa_info('6 digits code')
+ }), headers={
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ })
+ if 'oneTimePw' in page or 'formError' in page:
+ err_msg = self._html_search_regex(
+ r'formError["\']+>(.*?)</div>', page, 'form_error',
+ default='There\'s an error but the message can\'t be parsed.',
+ flags=re.DOTALL)
+ self.report_warning(f'Unable to log in: MFA challenge failed, "{err_msg}"')
+ return False
+ login_ok = 'class="notice error"' not in page
if not login_ok:
- self.report_warning('unable to log in: bad username or password')
+ self.report_warning('Unable to log in: bad username or password')
return login_ok
def _get_heartbeat_info(self, info_dict):
-
video_id, video_src_id, audio_src_id = info_dict['url'].split(':')[1].split('/')
+ dmc_protocol = info_dict['expected_protocol']
api_data = (
info_dict.get('_api_data')
or self._parse_json(
self._html_search_regex(
'data-api-data="([^"]+)"',
- self._download_webpage('http://www.nicovideo.jp/watch/' + video_id, video_id),
+ self._download_webpage('https://www.nicovideo.jp/watch/' + video_id, video_id),
'API data', default='{}'),
video_id))
session_api_endpoint = try_get(session_api_data, lambda x: x['urls'][0])
def ping():
- status = try_get(
- self._download_json(
- 'https://nvapi.nicovideo.jp/v1/2ab0cbaa/watch', video_id,
- query={'t': try_get(api_data, lambda x: x['media']['delivery']['trackingId'])},
- note='Acquiring permission for downloading video',
- headers=self._API_HEADERS),
- lambda x: x['meta']['status'])
- if status != 200:
- self.report_warning('Failed to acquire permission for playing video. The video may not download.')
+ tracking_id = traverse_obj(api_data, ('media', 'delivery', 'trackingId'))
+ if tracking_id:
+ tracking_url = update_url_query('https://nvapi.nicovideo.jp/v1/2ab0cbaa/watch', {'t': tracking_id})
+ watch_request_response = self._download_json(
+ tracking_url, video_id,
+ note='Acquiring permission for downloading video', fatal=False,
+ headers=self._API_HEADERS)
+ if traverse_obj(watch_request_response, ('meta', 'status')) != 200:
+ self.report_warning('Failed to acquire permission for playing video. Video download may fail.')
yesno = lambda x: 'yes' if x else 'no'
- # m3u8 (encryption)
- if try_get(api_data, lambda x: x['media']['delivery']['encryption']) is not None:
+ if dmc_protocol == 'http':
+ protocol = 'http'
+ protocol_parameters = {
+ 'http_output_download_parameters': {
+ 'use_ssl': yesno(session_api_data['urls'][0]['isSsl']),
+ 'use_well_known_port': yesno(session_api_data['urls'][0]['isWellKnownPort']),
+ }
+ }
+ elif dmc_protocol == 'hls':
protocol = 'm3u8'
- encryption = self._parse_json(session_api_data['token'], video_id)['hls_encryption']
- session_api_http_parameters = {
- 'parameters': {
- 'hls_parameters': {
- 'encryption': {
- encryption: {
- 'encrypted_key': try_get(api_data, lambda x: x['media']['delivery']['encryption']['encryptedKey']),
- 'key_uri': try_get(api_data, lambda x: x['media']['delivery']['encryption']['keyUri'])
- }
- },
- 'transfer_preset': '',
- 'use_ssl': yesno(session_api_endpoint['isSsl']),
- 'use_well_known_port': yesno(session_api_endpoint['isWellKnownPort']),
- 'segment_duration': 6000,
- }
+ segment_duration = try_get(self._configuration_arg('segment_duration'), lambda x: int(x[0])) or 6000
+ parsed_token = self._parse_json(session_api_data['token'], video_id)
+ encryption = traverse_obj(api_data, ('media', 'delivery', 'encryption'))
+ protocol_parameters = {
+ 'hls_parameters': {
+ 'segment_duration': segment_duration,
+ 'transfer_preset': '',
+ 'use_ssl': yesno(session_api_data['urls'][0]['isSsl']),
+ 'use_well_known_port': yesno(session_api_data['urls'][0]['isWellKnownPort']),
}
}
- # http
- else:
- protocol = 'http'
- session_api_http_parameters = {
- 'parameters': {
- 'http_output_download_parameters': {
- 'use_ssl': yesno(session_api_endpoint['isSsl']),
- 'use_well_known_port': yesno(session_api_endpoint['isWellKnownPort']),
+ if 'hls_encryption' in parsed_token and encryption:
+ protocol_parameters['hls_parameters']['encryption'] = {
+ parsed_token['hls_encryption']: {
+ 'encrypted_key': encryption['encryptedKey'],
+ 'key_uri': encryption['keyUri'],
}
}
- }
+ else:
+ protocol = 'm3u8_native'
+ else:
+ raise ExtractorError(f'Unsupported DMC protocol: {dmc_protocol}')
session_response = self._download_json(
session_api_endpoint['url'], video_id,
'lifetime': session_api_data.get('heartbeatLifetime')
}
},
- 'priority': session_api_data.get('priority'),
+ 'priority': session_api_data['priority'],
'protocol': {
'name': 'http',
'parameters': {
- 'http_parameters': session_api_http_parameters
+ 'http_parameters': {
+ 'parameters': protocol_parameters
+ }
}
},
'recipe_id': session_api_data.get('recipeId'),
return info_dict, heartbeat_info_dict
- def _extract_format_for_quality(self, api_data, video_id, audio_quality, video_quality):
- def parse_format_id(id_code):
- mobj = re.match(r'''(?x)
- (?:archive_)?
- (?:(?P<codec>[^_]+)_)?
- (?:(?P<br>[\d]+)kbps_)?
- (?:(?P<res>[\d+]+)p_)?
- ''', '%s_' % id_code)
- return mobj.groupdict() if mobj else {}
-
- protocol = 'niconico_dmc'
- format_id = '-'.join(map(lambda s: remove_start(s['id'], 'archive_'), [video_quality, audio_quality]))
- vdict = parse_format_id(video_quality['id'])
- adict = parse_format_id(audio_quality['id'])
- resolution = try_get(video_quality, lambda x: x['metadata']['resolution'], dict) or {'height': vdict.get('res')}
- vbr = try_get(video_quality, lambda x: x['metadata']['bitrate'], float)
+ def _extract_format_for_quality(self, video_id, audio_quality, video_quality, dmc_protocol):
+
+ if not audio_quality.get('isAvailable') or not video_quality.get('isAvailable'):
+ return None
+
+ format_id = '-'.join(
+ [remove_start(s['id'], 'archive_') for s in (video_quality, audio_quality)] + [dmc_protocol])
+
+ vid_qual_label = traverse_obj(video_quality, ('metadata', 'label'))
return {
- 'url': '%s:%s/%s/%s' % (protocol, video_id, video_quality['id'], audio_quality['id']),
+ 'url': 'niconico_dmc:%s/%s/%s' % (video_id, video_quality['id'], audio_quality['id']),
'format_id': format_id,
- 'format_note': 'DMC %s' % try_get(video_quality, lambda x: x['metadata']['label'], compat_str),
+ 'format_note': join_nonempty('DMC', vid_qual_label, dmc_protocol.upper(), delim=' '),
'ext': 'mp4', # Session API are used in HTML5, which always serves mp4
- 'vcodec': vdict.get('codec'),
- 'acodec': adict.get('codec'),
- 'vbr': float_or_none(vbr, 1000) or float_or_none(vdict.get('br')),
- 'abr': float_or_none(audio_quality.get('bitrate'), 1000) or float_or_none(adict.get('br')),
- 'height': int_or_none(resolution.get('height', vdict.get('res'))),
- 'width': int_or_none(resolution.get('width')),
- 'quality': -2 if 'low' in format_id else -1, # Default quality value is -1
- 'protocol': protocol,
+ 'acodec': 'aac',
+ 'vcodec': 'h264',
+ **traverse_obj(audio_quality, ('metadata', {
+ 'abr': ('bitrate', {functools.partial(float_or_none, scale=1000)}),
+ 'asr': ('samplingRate', {int_or_none}),
+ })),
+ **traverse_obj(video_quality, ('metadata', {
+ 'vbr': ('bitrate', {functools.partial(float_or_none, scale=1000)}),
+ 'height': ('resolution', 'height', {int_or_none}),
+ 'width': ('resolution', 'width', {int_or_none}),
+ })),
+ 'quality': -2 if 'low' in video_quality['id'] else None,
+ 'protocol': 'niconico_dmc',
+ 'expected_protocol': dmc_protocol, # XXX: This is not a documented field
'http_headers': {
'Origin': 'https://www.nicovideo.jp',
'Referer': 'https://www.nicovideo.jp/watch/' + video_id,
}
}
+ def _yield_dmc_formats(self, api_data, video_id):
+ dmc_data = traverse_obj(api_data, ('media', 'delivery', 'movie'))
+ audios = traverse_obj(dmc_data, ('audios', ..., {dict}))
+ videos = traverse_obj(dmc_data, ('videos', ..., {dict}))
+ protocols = traverse_obj(dmc_data, ('session', 'protocols', ..., {str}))
+ if not all((audios, videos, protocols)):
+ return
+
+ for audio_quality, video_quality, protocol in itertools.product(audios, videos, protocols):
+ if fmt := self._extract_format_for_quality(video_id, audio_quality, video_quality, protocol):
+ yield fmt
+
+ def _yield_dms_formats(self, api_data, video_id):
+ fmt_filter = lambda _, v: v['isAvailable'] and v['id']
+ videos = traverse_obj(api_data, ('media', 'domand', 'videos', fmt_filter))
+ audios = traverse_obj(api_data, ('media', 'domand', 'audios', fmt_filter))
+ access_key = traverse_obj(api_data, ('media', 'domand', 'accessRightKey', {str}))
+ track_id = traverse_obj(api_data, ('client', 'watchTrackId', {str}))
+ if not all((videos, audios, access_key, track_id)):
+ return
+
+ dms_m3u8_url = self._download_json(
+ f'https://nvapi.nicovideo.jp/v1/watch/{video_id}/access-rights/hls', video_id,
+ data=json.dumps({
+ 'outputs': list(itertools.product((v['id'] for v in videos), (a['id'] for a in audios)))
+ }).encode(), query={'actionTrackId': track_id}, headers={
+ 'x-access-right-key': access_key,
+ 'x-frontend-id': 6,
+ 'x-frontend-version': 0,
+ 'x-request-with': 'https://www.nicovideo.jp',
+ })['data']['contentUrl']
+ # Getting all audio formats results in duplicate video formats which we filter out later
+ dms_fmts = self._extract_m3u8_formats(dms_m3u8_url, video_id)
+
+ # m3u8 extraction does not provide audio bitrates, so extract from the API data and fix
+ for audio_fmt in traverse_obj(dms_fmts, lambda _, v: v['vcodec'] == 'none'):
+ yield {
+ **audio_fmt,
+ **traverse_obj(audios, (lambda _, v: audio_fmt['format_id'].startswith(v['id']), {
+ 'format_id': ('id', {str}),
+ 'abr': ('bitRate', {functools.partial(float_or_none, scale=1000)}),
+ 'asr': ('samplingRate', {int_or_none}),
+ }), get_all=False),
+ 'acodec': 'aac',
+ 'ext': 'm4a',
+ }
+
+ # Sort before removing dupes to keep the format dicts with the lowest tbr
+ video_fmts = sorted((fmt for fmt in dms_fmts if fmt['vcodec'] != 'none'), key=lambda f: f['tbr'])
+ self._remove_duplicate_formats(video_fmts)
+ # Calculate the true vbr/tbr by subtracting the lowest abr
+ min_abr = min(traverse_obj(audios, (..., 'bitRate', {float_or_none})), default=0) / 1000
+ for video_fmt in video_fmts:
+ video_fmt['tbr'] -= min_abr
+ video_fmt['format_id'] = f'video-{video_fmt["tbr"]:.0f}'
+ yield video_fmt
+
def _real_extract(self, url):
video_id = self._match_id(url)
- # Get video webpage for API data.
- webpage, handle = self._download_webpage_handle(
- 'http://www.nicovideo.jp/watch/' + video_id, video_id)
- if video_id.startswith('so'):
- video_id = self._match_id(handle.geturl())
-
- api_data = self._parse_json(self._html_search_regex(
- 'data-api-data="([^"]+)"', webpage,
- 'API data', default='{}'), video_id)
-
- def get_video_info_web(items):
- return dict_get(api_data['video'], items)
-
- # Get video info
- video_info_xml = self._download_xml(
- 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id,
- video_id, note='Downloading video info page')
-
- def get_video_info_xml(items):
- if not isinstance(items, list):
- items = [items]
- for item in items:
- ret = xpath_text(video_info_xml, './/' + item)
- if ret:
- return ret
-
- if get_video_info_xml('error'):
- error_code = get_video_info_xml('code')
-
- if error_code == 'DELETED':
- raise ExtractorError('The video has been deleted.',
- expected=True)
- elif error_code == 'NOT_FOUND':
- raise ExtractorError('The video is not found.',
- expected=True)
- elif error_code == 'COMMUNITY':
- self.to_screen('%s: The video is community members only.' % video_id)
- else:
- raise ExtractorError('%s reports error: %s' % (self.IE_NAME, error_code))
-
- # Start extracting video formats
- formats = []
+ try:
+ webpage, handle = self._download_webpage_handle(
+ 'https://www.nicovideo.jp/watch/' + video_id, video_id)
+ if video_id.startswith('so'):
+ video_id = self._match_id(handle.url)
- # Get HTML5 videos info
- quality_info = try_get(api_data, lambda x: x['media']['delivery']['movie'])
- if not quality_info:
- raise ExtractorError('The video can\'t be downloaded', expected=True)
+ api_data = self._parse_json(self._html_search_regex(
+ 'data-api-data="([^"]+)"', webpage,
+ 'API data', default='{}'), video_id)
+ except ExtractorError as e:
+ try:
+ api_data = self._download_json(
+ 'https://www.nicovideo.jp/api/watch/v3/%s?_frontendId=6&_frontendVersion=0&actionTrackId=AAAAAAAAAA_%d' % (video_id, round(time.time() * 1000)), video_id,
+ note='Downloading API JSON', errnote='Unable to fetch data')['data']
+ except ExtractorError:
+ if not isinstance(e.cause, HTTPError):
+ raise
+ webpage = e.cause.response.read().decode('utf-8', 'replace')
+ error_msg = self._html_search_regex(
+ r'(?s)<section\s+class="(?:(?:ErrorMessage|WatchExceptionPage-message)\s*)+">(.+?)</section>',
+ webpage, 'error reason', default=None)
+ if not error_msg:
+ raise
+ raise ExtractorError(clean_html(error_msg), expected=True)
+
+ availability = self._availability(**(traverse_obj(api_data, ('payment', 'video', {
+ 'needs_premium': ('isPremium', {bool}),
+ 'needs_subscription': ('isAdmission', {bool}),
+ })) or {'needs_auth': True}))
+ formats = [*self._yield_dmc_formats(api_data, video_id),
+ *self._yield_dms_formats(api_data, video_id)]
+ if not formats:
+ fail_msg = clean_html(self._html_search_regex(
+ r'<p[^>]+\bclass="fail-message"[^>]*>(?P<msg>.+?)</p>',
+ webpage, 'fail message', default=None, group='msg'))
+ if fail_msg:
+ self.to_screen(f'Niconico said: {fail_msg}')
+ if fail_msg and 'された地域と同じ地域からのみ視聴できます。' in fail_msg:
+ availability = None
+ self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True)
+ elif availability == 'premium_only':
+ self.raise_login_required('This video requires premium', metadata_available=True)
+ elif availability == 'subscriber_only':
+ self.raise_login_required('This video is for members only', metadata_available=True)
+ elif availability == 'needs_auth':
+ self.raise_login_required(metadata_available=False)
- for audio_quality in quality_info.get('audios') or {}:
- for video_quality in quality_info.get('videos') or {}:
- if not audio_quality.get('isAvailable') or not video_quality.get('isAvailable'):
- continue
- formats.append(self._extract_format_for_quality(
- api_data, video_id, audio_quality, video_quality))
+ # Start extracting information
+ tags = None
+ if webpage:
+ # use og:video:tag (not logged in)
+ og_video_tags = re.finditer(r'<meta\s+property="og:video:tag"\s*content="(.*?)">', webpage)
+ tags = list(filter(None, (clean_html(x.group(1)) for x in og_video_tags)))
+ if not tags:
+ # use keywords and split with comma (not logged in)
+ kwds = self._html_search_meta('keywords', webpage, default=None)
+ if kwds:
+ tags = [x for x in kwds.split(',') if x]
+ if not tags:
+ # find in json (logged in)
+ tags = traverse_obj(api_data, ('tag', 'items', ..., 'name'))
+
+ thumb_prefs = qualities(['url', 'middleUrl', 'largeUrl', 'player', 'ogp'])
+
+ def get_video_info(*items, get_first=True, **kwargs):
+ return traverse_obj(api_data, ('video', *items), get_all=not get_first, **kwargs)
- # Get flv/swf info
- timestamp = None
- video_real_url = try_get(api_data, lambda x: x['video']['smileInfo']['url'])
- if video_real_url:
- is_economy = video_real_url.endswith('low')
+ return {
+ 'id': video_id,
+ '_api_data': api_data,
+ 'title': get_video_info(('originalTitle', 'title')) or self._og_search_title(webpage, default=None),
+ 'formats': formats,
+ 'availability': availability,
+ 'thumbnails': [{
+ 'id': key,
+ 'url': url,
+ 'ext': 'jpg',
+ 'preference': thumb_prefs(key),
+ **parse_resolution(url, lenient=True),
+ } for key, url in (get_video_info('thumbnail') or {}).items() if url],
+ 'description': clean_html(get_video_info('description')),
+ 'uploader': traverse_obj(api_data, ('owner', 'nickname'), ('channel', 'name'), ('community', 'name')),
+ 'uploader_id': str_or_none(traverse_obj(api_data, ('owner', 'id'), ('channel', 'id'), ('community', 'id'))),
+ 'timestamp': parse_iso8601(get_video_info('registeredAt')) or parse_iso8601(
+ self._html_search_meta('video:release_date', webpage, 'date published', default=None)),
+ 'channel': traverse_obj(api_data, ('channel', 'name'), ('community', 'name')),
+ 'channel_id': traverse_obj(api_data, ('channel', 'id'), ('community', 'id')),
+ 'view_count': int_or_none(get_video_info('count', 'view')),
+ 'tags': tags,
+ 'genre': traverse_obj(api_data, ('genre', 'label'), ('genre', 'key')),
+ 'comment_count': get_video_info('count', 'comment', expected_type=int),
+ 'duration': (
+ parse_duration(self._html_search_meta('video:duration', webpage, 'video duration', default=None))
+ or get_video_info('duration')),
+ 'webpage_url': url_or_none(url) or f'https://www.nicovideo.jp/watch/{video_id}',
+ 'subtitles': self.extract_subtitles(video_id, api_data),
+ }
- if is_economy:
- self.report_warning('Site is currently in economy mode! You will only have access to lower quality streams')
+ def _get_subtitles(self, video_id, api_data):
+ comments_info = traverse_obj(api_data, ('comment', 'nvComment', {dict})) or {}
+ if not comments_info.get('server'):
+ return
+
+ danmaku = traverse_obj(self._download_json(
+ f'{comments_info["server"]}/v1/threads', video_id, data=json.dumps({
+ 'additionals': {},
+ 'params': comments_info.get('params'),
+ 'threadKey': comments_info.get('threadKey'),
+ }).encode(), fatal=False,
+ headers={
+ 'Referer': 'https://www.nicovideo.jp/',
+ 'Origin': 'https://www.nicovideo.jp',
+ 'Content-Type': 'text/plain;charset=UTF-8',
+ 'x-client-os-type': 'others',
+ 'x-frontend-id': '6',
+ 'x-frontend-version': '0',
+ },
+ note='Downloading comments', errnote='Failed to download comments'),
+ ('data', 'threads', ..., 'comments', ...))
- # Invoking ffprobe to determine resolution
- pp = FFmpegPostProcessor(self._downloader)
- cookies = self._get_cookies('https://nicovideo.jp').output(header='', sep='; path=/; domain=nicovideo.jp;\n')
+ return {
+ 'comments': [{
+ 'ext': 'json',
+ 'data': json.dumps(danmaku),
+ }],
+ }
- self.to_screen('%s: %s' % (video_id, 'Checking smile format with ffprobe'))
- try:
- metadata = pp.get_metadata_object(video_real_url, ['-cookies', cookies])
- except PostProcessingError as err:
- raise ExtractorError(err.msg, expected=True)
-
- v_stream = a_stream = {}
-
- # Some complex swf files doesn't have video stream (e.g. nm4809023)
- for stream in metadata['streams']:
- if stream['codec_type'] == 'video':
- v_stream = stream
- elif stream['codec_type'] == 'audio':
- a_stream = stream
-
- # Community restricted videos seem to have issues with the thumb API not returning anything at all
- filesize = int(
- (get_video_info_xml('size_high') if not is_economy else get_video_info_xml('size_low'))
- or metadata['format']['size']
- )
- extension = (
- get_video_info_xml('movie_type')
- or 'mp4' if 'mp4' in metadata['format']['format_name'] else metadata['format']['format_name']
- )
-
- # 'creation_time' tag on video stream of re-encoded SMILEVIDEO mp4 files are '1970-01-01T00:00:00.000000Z'.
- timestamp = (
- parse_iso8601(get_video_info_web('first_retrieve'))
- or unified_timestamp(get_video_info_web('postedDateTime'))
- )
- metadata_timestamp = (
- parse_iso8601(try_get(v_stream, lambda x: x['tags']['creation_time']))
- or timestamp if extension != 'mp4' else 0
- )
-
- # According to compconf, smile videos from pre-2017 are always better quality than their DMC counterparts
- smile_threshold_timestamp = parse_iso8601('2016-12-08T00:00:00+09:00')
-
- is_source = timestamp < smile_threshold_timestamp or metadata_timestamp > 0
-
- # If movie file size is unstable, old server movie is not source movie.
- if filesize > 1:
- formats.append({
- 'url': video_real_url,
- 'format_id': 'smile' if not is_economy else 'smile_low',
- 'format_note': 'SMILEVIDEO source' if not is_economy else 'SMILEVIDEO low quality',
- 'ext': extension,
- 'container': extension,
- 'vcodec': v_stream.get('codec_name'),
- 'acodec': a_stream.get('codec_name'),
- # Some complex swf files doesn't have total bit rate metadata (e.g. nm6049209)
- 'tbr': int_or_none(metadata['format'].get('bit_rate'), scale=1000),
- 'vbr': int_or_none(v_stream.get('bit_rate'), scale=1000),
- 'abr': int_or_none(a_stream.get('bit_rate'), scale=1000),
- 'height': int_or_none(v_stream.get('height')),
- 'width': int_or_none(v_stream.get('width')),
- 'source_preference': 5 if not is_economy else -2,
- 'quality': 5 if is_source and not is_economy else None,
- 'filesize': filesize
- })
+class NiconicoPlaylistBaseIE(InfoExtractor):
+ _PAGE_SIZE = 100
- self._sort_formats(formats)
+ _API_HEADERS = {
+ 'X-Frontend-ID': '6',
+ 'X-Frontend-Version': '0',
+ 'X-Niconico-Language': 'en-us'
+ }
- # Start extracting information
- title = (
- get_video_info_xml('title') # prefer to get the untranslated original title
- or get_video_info_web(['originalTitle', 'title'])
- or self._og_search_title(webpage, default=None)
- or self._html_search_regex(
- r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
- webpage, 'video title'))
-
- watch_api_data_string = self._html_search_regex(
- r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
- webpage, 'watch api data', default=None)
- watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
- video_detail = watch_api_data.get('videoDetail', {})
-
- thumbnail = (
- self._html_search_regex(r'<meta property="og:image" content="([^"]+)">', webpage, 'thumbnail data', default=None)
- or dict_get( # choose highest from 720p to 240p
- get_video_info_web('thumbnail'),
- ['ogp', 'player', 'largeUrl', 'middleUrl', 'url'])
- or self._html_search_meta('image', webpage, 'thumbnail', default=None)
- or video_detail.get('thumbnail'))
-
- description = get_video_info_web('description')
-
- if not timestamp:
- match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
- if match:
- timestamp = parse_iso8601(match.replace('+', ':00+'))
- if not timestamp and video_detail.get('postedAt'):
- timestamp = parse_iso8601(
- video_detail['postedAt'].replace('/', '-'),
- delimiter=' ', timezone=datetime.timedelta(hours=9))
- timestamp = timestamp or try_get(api_data, lambda x: parse_iso8601(x['video']['registeredAt']))
-
- view_count = int_or_none(get_video_info_web(['view_counter', 'viewCount']))
- if not view_count:
- match = self._html_search_regex(
- r'>Views: <strong[^>]*>([^<]+)</strong>',
- webpage, 'view count', default=None)
- if match:
- view_count = int_or_none(match.replace(',', ''))
- view_count = (
- view_count
- or video_detail.get('viewCount')
- or try_get(api_data, lambda x: x['video']['count']['view']))
-
- comment_count = (
- int_or_none(get_video_info_web('comment_num'))
- or video_detail.get('commentCount')
- or try_get(api_data, lambda x: x['video']['count']['comment']))
-
- if not comment_count:
- match = self._html_search_regex(
- r'>Comments: <strong[^>]*>([^<]+)</strong>',
- webpage, 'comment count', default=None)
- if match:
- comment_count = int_or_none(match.replace(',', ''))
-
- duration = (parse_duration(
- get_video_info_web('length')
- or self._html_search_meta(
- 'video:duration', webpage, 'video duration', default=None))
- or video_detail.get('length')
- or get_video_info_web('duration'))
-
- webpage_url = get_video_info_web('watch_url') or url
-
- # for channel movie and community movie
- channel_id = try_get(
- api_data,
- (lambda x: x['channel']['globalId'],
- lambda x: x['community']['globalId']))
- channel = try_get(
- api_data,
- (lambda x: x['channel']['name'],
- lambda x: x['community']['name']))
-
- # Note: cannot use api_data.get('owner', {}) because owner may be set to "null"
- # in the JSON, which will cause None to be returned instead of {}.
- owner = try_get(api_data, lambda x: x.get('owner'), dict) or {}
- uploader_id = str_or_none(
- get_video_info_web(['ch_id', 'user_id'])
- or owner.get('id')
- or channel_id
- )
- uploader = (
- get_video_info_web(['ch_name', 'user_nickname'])
- or owner.get('nickname')
- or channel
- )
+ def _call_api(self, list_id, resource, query):
+ raise NotImplementedError('Must be implemented in subclasses')
+ @staticmethod
+ def _parse_owner(item):
return {
- 'id': video_id,
- '_api_data': api_data,
- 'title': title,
- 'formats': formats,
- 'thumbnail': thumbnail,
- 'description': description,
- 'uploader': uploader,
- 'timestamp': timestamp,
- 'uploader_id': uploader_id,
- 'channel': channel,
- 'channel_id': channel_id,
- 'view_count': view_count,
- 'comment_count': comment_count,
- 'duration': duration,
- 'webpage_url': webpage_url,
+ 'uploader': traverse_obj(item, ('owner', 'name')),
+ 'uploader_id': traverse_obj(item, ('owner', 'id')),
}
+ def _fetch_page(self, list_id, page):
+ page += 1
+ resp = self._call_api(list_id, 'page %d' % page, {
+ 'page': page,
+ 'pageSize': self._PAGE_SIZE,
+ })
+ # this is needed to support both mylist and user
+ for video in traverse_obj(resp, ('items', ..., ('video', None))) or []:
+ video_id = video.get('id')
+ if not video_id:
+ # skip {"video": {"id": "blablabla", ...}}
+ continue
+ count = video.get('count') or {}
+ get_count = lambda x: int_or_none(count.get(x))
+ yield {
+ '_type': 'url',
+ 'id': video_id,
+ 'title': video.get('title'),
+ 'url': f'https://www.nicovideo.jp/watch/{video_id}',
+ 'description': video.get('shortDescription'),
+ 'duration': int_or_none(video.get('duration')),
+ 'view_count': get_count('view'),
+ 'comment_count': get_count('comment'),
+ 'thumbnail': traverse_obj(video, ('thumbnail', ('nHdUrl', 'largeUrl', 'listingUrl', 'url'))),
+ 'ie_key': NiconicoIE.ie_key(),
+ **self._parse_owner(video),
+ }
+
+ def _entries(self, list_id):
+ return OnDemandPagedList(functools.partial(self._fetch_page, list_id), self._PAGE_SIZE)
+
-class NiconicoPlaylistIE(InfoExtractor):
- _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/(?:user/\d+/|my/)?mylist/(?P<id>\d+)'
+class NiconicoPlaylistIE(NiconicoPlaylistBaseIE):
+ IE_NAME = 'niconico:playlist'
+ _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp|nico\.ms)/(?:user/\d+/)?(?:my/)?mylist/(?:#/)?(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.nicovideo.jp/mylist/27411728',
'uploader': 'のっく',
'uploader_id': '805442',
},
- 'playlist_mincount': 225,
+ 'playlist_mincount': 291,
}, {
'url': 'https://www.nicovideo.jp/user/805442/mylist/27411728',
'only_matching': True,
+ }, {
+ 'url': 'https://www.nicovideo.jp/my/mylist/#/68048635',
+ 'only_matching': True,
}]
- _API_HEADERS = {
- 'X-Frontend-ID': '6',
- 'X-Frontend-Version': '0'
- }
+ def _call_api(self, list_id, resource, query):
+ return self._download_json(
+ f'https://nvapi.nicovideo.jp/v2/mylists/{list_id}', list_id,
+ f'Downloading {resource}', query=query,
+ headers=self._API_HEADERS)['data']['mylist']
def _real_extract(self, url):
list_id = self._match_id(url)
+ mylist = self._call_api(list_id, 'list', {
+ 'pageSize': 1,
+ })
+ return self.playlist_result(
+ self._entries(list_id), list_id,
+ mylist.get('name'), mylist.get('description'), **self._parse_owner(mylist))
- def get_page_data(pagenum, pagesize):
- return self._download_json(
- 'http://nvapi.nicovideo.jp/v2/mylists/' + list_id, list_id,
- query={'page': 1 + pagenum, 'pageSize': pagesize},
- headers=self._API_HEADERS).get('data').get('mylist')
-
- data = get_page_data(0, 1)
- title = data.get('name')
- description = data.get('description')
- uploader = data.get('owner').get('name')
- uploader_id = data.get('owner').get('id')
-
- def pagefunc(pagenum):
- data = get_page_data(pagenum, 25)
- return ({
- '_type': 'url',
- 'url': 'http://www.nicovideo.jp/watch/' + item.get('watchId'),
- } for item in data.get('items'))
- return {
- '_type': 'playlist',
- 'id': list_id,
- 'title': title,
- 'description': description,
- 'uploader': uploader,
- 'uploader_id': uploader_id,
- 'entries': OnDemandPagedList(pagefunc, 25),
- }
+class NiconicoSeriesIE(InfoExtractor):
+ IE_NAME = 'niconico:series'
+ _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp(?:/user/\d+)?|nico\.ms)/series/(?P<id>\d+)'
+
+ _TESTS = [{
+ 'url': 'https://www.nicovideo.jp/user/44113208/series/110226',
+ 'info_dict': {
+ 'id': '110226',
+ 'title': 'ご立派ァ!のシリーズ',
+ },
+ 'playlist_mincount': 10,
+ }, {
+ 'url': 'https://www.nicovideo.jp/series/12312/',
+ 'info_dict': {
+ 'id': '12312',
+ 'title': 'バトルスピリッツ お勧めカード紹介(調整中)',
+ },
+ 'playlist_mincount': 103,
+ }, {
+ 'url': 'https://nico.ms/series/203559',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ list_id = self._match_id(url)
+ webpage = self._download_webpage(url, list_id)
+
+ title = self._search_regex(
+ (r'<title>「(.+)(全',
+ r'<div class="TwitterShareButton"\s+data-text="(.+)\s+https:'),
+ webpage, 'title', fatal=False)
+ if title:
+ title = unescapeHTML(title)
+ json_data = next(self._yield_json_ld(webpage, None, fatal=False))
+ return self.playlist_from_matches(
+ traverse_obj(json_data, ('itemListElement', ..., 'url')), list_id, title, ie=NiconicoIE)
+
+
+class NiconicoHistoryIE(NiconicoPlaylistBaseIE):
+ IE_NAME = 'niconico:history'
+ IE_DESC = 'NicoNico user history or likes. Requires cookies.'
+ _VALID_URL = r'https?://(?:www\.|sp\.)?nicovideo\.jp/my/(?P<id>history(?:/like)?)'
+
+ _TESTS = [{
+ 'note': 'PC page, with /video',
+ 'url': 'https://www.nicovideo.jp/my/history/video',
+ 'only_matching': True,
+ }, {
+ 'note': 'PC page, without /video',
+ 'url': 'https://www.nicovideo.jp/my/history',
+ 'only_matching': True,
+ }, {
+ 'note': 'mobile page, with /video',
+ 'url': 'https://sp.nicovideo.jp/my/history/video',
+ 'only_matching': True,
+ }, {
+ 'note': 'mobile page, without /video',
+ 'url': 'https://sp.nicovideo.jp/my/history',
+ 'only_matching': True,
+ }, {
+ 'note': 'PC page',
+ 'url': 'https://www.nicovideo.jp/my/history/like',
+ 'only_matching': True,
+ }, {
+ 'note': 'Mobile page',
+ 'url': 'https://sp.nicovideo.jp/my/history/like',
+ 'only_matching': True,
+ }]
+
+ def _call_api(self, list_id, resource, query):
+ path = 'likes' if list_id == 'history/like' else 'watch/history'
+ return self._download_json(
+ f'https://nvapi.nicovideo.jp/v1/users/me/{path}', list_id,
+ f'Downloading {resource}', query=query, headers=self._API_HEADERS)['data']
+
+ def _real_extract(self, url):
+ list_id = self._match_id(url)
+ try:
+ mylist = self._call_api(list_id, 'list', {'pageSize': 1})
+ except ExtractorError as e:
+ if isinstance(e.cause, HTTPError) and e.cause.status == 401:
+ self.raise_login_required('You have to be logged in to get your history')
+ raise
+ return self.playlist_result(self._entries(list_id), list_id, **self._parse_owner(mylist))
class NicovideoSearchBaseIE(InfoExtractor):
+ _SEARCH_TYPE = 'search'
+
def _entries(self, url, item_id, query=None, note='Downloading page %(page)s'):
query = query or {}
pages = [query['page']] if 'page' in query else itertools.count(1)
webpage = self._download_webpage(url, item_id, query=query, note=note % {'page': page_num})
results = re.findall(r'(?<=data-video-id=)["\']?(?P<videoid>.*?)(?=["\'])', webpage)
for item in results:
- yield self.url_result(f'http://www.nicovideo.jp/watch/{item}', 'Niconico', item)
+ yield self.url_result(f'https://www.nicovideo.jp/watch/{item}', 'Niconico', item)
if not results:
break
def _search_results(self, query):
return self._entries(
- self._proto_relative_url(f'//www.nicovideo.jp/search/{query}'), query)
+ self._proto_relative_url(f'//www.nicovideo.jp/{self._SEARCH_TYPE}/{query}'), query)
class NicovideoSearchIE(NicovideoSearchBaseIE, SearchInfoExtractor):
'playlist_mincount': 1610,
}]
- _START_DATE = datetime.date(2007, 1, 1)
+ _START_DATE = dt.date(2007, 1, 1)
_RESULTS_PER_PAGE = 32
_MAX_PAGES = 50
def _entries(self, url, item_id, start_date=None, end_date=None):
- start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date()
+ start_date, end_date = start_date or self._START_DATE, end_date or dt.datetime.now().date()
# If the last page has a full page of videos, we need to break down the query interval further
last_page_len = len(list(self._get_entries_for_date(
yield from super()._entries(url, item_id, query=query, note=note)
+class NicovideoTagURLIE(NicovideoSearchBaseIE):
+ IE_NAME = 'niconico:tag'
+ IE_DESC = 'NicoNico video tag URLs'
+ _SEARCH_TYPE = 'tag'
+ _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/tag/(?P<id>[^?#&]+)?'
+ _TESTS = [{
+ 'url': 'https://www.nicovideo.jp/tag/ドキュメンタリー淫夢',
+ 'info_dict': {
+ 'id': 'ドキュメンタリー淫夢',
+ 'title': 'ドキュメンタリー淫夢'
+ },
+ 'playlist_mincount': 400,
+ }]
+
+ def _real_extract(self, url):
+ query = self._match_id(url)
+ return self.playlist_result(self._entries(url, query), query, query)
+
+
class NiconicoUserIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/user/(?P<id>\d+)/?(?:$|[#?])'
_TEST = {
def _real_extract(self, url):
list_id = self._match_id(url)
return self.playlist_result(self._entries(list_id), list_id, ie=NiconicoIE.ie_key())
+
+
+class NiconicoLiveIE(InfoExtractor):
+ IE_NAME = 'niconico:live'
+ IE_DESC = 'ニコニコ生放送'
+ _VALID_URL = r'https?://(?:sp\.)?live2?\.nicovideo\.jp/(?:watch|gate)/(?P<id>lv\d+)'
+ _TESTS = [{
+ 'note': 'this test case includes invisible characters for title, pasting them as-is',
+ 'url': 'https://live.nicovideo.jp/watch/lv339533123',
+ 'info_dict': {
+ 'id': 'lv339533123',
+ 'title': '激辛ペヤング食べます( ;ᯅ; )(歌枠オーディション参加中)',
+ 'view_count': 1526,
+ 'comment_count': 1772,
+ 'description': '初めましてもかって言います❕\nのんびり自由に適当に暮らしてます',
+ 'uploader': 'もか',
+ 'channel': 'ゲストさんのコミュニティ',
+ 'channel_id': 'co5776900',
+ 'channel_url': 'https://com.nicovideo.jp/community/co5776900',
+ 'timestamp': 1670677328,
+ 'is_live': True,
+ },
+ 'skip': 'livestream',
+ }, {
+ 'url': 'https://live2.nicovideo.jp/watch/lv339533123',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://sp.live.nicovideo.jp/watch/lv339533123',
+ 'only_matching': True,
+ }, {
+ 'url': 'https://sp.live2.nicovideo.jp/watch/lv339533123',
+ 'only_matching': True,
+ }]
+
+ _KNOWN_LATENCY = ('high', 'low')
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage, urlh = self._download_webpage_handle(f'https://live.nicovideo.jp/watch/{video_id}', video_id)
+
+ embedded_data = self._parse_json(unescapeHTML(self._search_regex(
+ r'<script\s+id="embedded-data"\s*data-props="(.+?)"', webpage, 'embedded data')), video_id)
+
+ ws_url = traverse_obj(embedded_data, ('site', 'relive', 'webSocketUrl'))
+ if not ws_url:
+ raise ExtractorError('The live hasn\'t started yet or already ended.', expected=True)
+ ws_url = update_url_query(ws_url, {
+ 'frontend_id': traverse_obj(embedded_data, ('site', 'frontendId')) or '9',
+ })
+
+ hostname = remove_start(urllib.parse.urlparse(urlh.url).hostname, 'sp.')
+ latency = try_get(self._configuration_arg('latency'), lambda x: x[0])
+ if latency not in self._KNOWN_LATENCY:
+ latency = 'high'
+
+ ws = self._request_webpage(
+ Request(ws_url, headers={'Origin': f'https://{hostname}'}),
+ video_id=video_id, note='Connecting to WebSocket server')
+
+ self.write_debug('[debug] Sending HLS server request')
+ ws.send(json.dumps({
+ 'type': 'startWatching',
+ 'data': {
+ 'stream': {
+ 'quality': 'abr',
+ 'protocol': 'hls+fmp4',
+ 'latency': latency,
+ 'chasePlay': False
+ },
+ 'room': {
+ 'protocol': 'webSocket',
+ 'commentable': True
+ },
+ 'reconnect': False,
+ }
+ }))
+
+ while True:
+ recv = ws.recv()
+ if not recv:
+ continue
+ data = json.loads(recv)
+ if not isinstance(data, dict):
+ continue
+ if data.get('type') == 'stream':
+ m3u8_url = data['data']['uri']
+ qualities = data['data']['availableQualities']
+ break
+ elif data.get('type') == 'disconnect':
+ self.write_debug(recv)
+ raise ExtractorError('Disconnected at middle of extraction')
+ elif data.get('type') == 'error':
+ self.write_debug(recv)
+ message = traverse_obj(data, ('body', 'code')) or recv
+ raise ExtractorError(message)
+ elif self.get_param('verbose', False):
+ if len(recv) > 100:
+ recv = recv[:100] + '...'
+ self.write_debug('Server said: %s' % recv)
+
+ title = traverse_obj(embedded_data, ('program', 'title')) or self._html_search_meta(
+ ('og:title', 'twitter:title'), webpage, 'live title', fatal=False)
+
+ raw_thumbs = traverse_obj(embedded_data, ('program', 'thumbnail')) or {}
+ thumbnails = []
+ for name, value in raw_thumbs.items():
+ if not isinstance(value, dict):
+ thumbnails.append({
+ 'id': name,
+ 'url': value,
+ **parse_resolution(value, lenient=True),
+ })
+ continue
+
+ for k, img_url in value.items():
+ res = parse_resolution(k, lenient=True) or parse_resolution(img_url, lenient=True)
+ width, height = res.get('width'), res.get('height')
+
+ thumbnails.append({
+ 'id': f'{name}_{width}x{height}',
+ 'url': img_url,
+ **res,
+ })
+
+ formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', live=True)
+ for fmt, q in zip(formats, reversed(qualities[1:])):
+ fmt.update({
+ 'format_id': q,
+ 'protocol': 'niconico_live',
+ 'ws': ws,
+ 'video_id': video_id,
+ 'live_latency': latency,
+ 'origin': hostname,
+ })
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ **traverse_obj(embedded_data, {
+ 'view_count': ('program', 'statistics', 'watchCount'),
+ 'comment_count': ('program', 'statistics', 'commentCount'),
+ 'uploader': ('program', 'supplier', 'name'),
+ 'channel': ('socialGroup', 'name'),
+ 'channel_id': ('socialGroup', 'id'),
+ 'channel_url': ('socialGroup', 'socialGroupPageUrl'),
+ }),
+ 'description': clean_html(traverse_obj(embedded_data, ('program', 'description'))),
+ 'timestamp': int_or_none(traverse_obj(embedded_data, ('program', 'openTime'))),
+ 'is_live': True,
+ 'thumbnails': thumbnails,
+ 'formats': formats,
+ }