]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/niconico.py
[cleanup] Standardize `import datetime as dt` (#8978)
[yt-dlp.git] / yt_dlp / extractor / niconico.py
1 import datetime as dt
2 import functools
3 import itertools
4 import json
5 import re
6 import time
7 import urllib.parse
8
9 from .common import InfoExtractor, SearchInfoExtractor
10 from ..networking import Request
11 from ..networking.exceptions import HTTPError
12 from ..utils import (
13 ExtractorError,
14 OnDemandPagedList,
15 clean_html,
16 float_or_none,
17 int_or_none,
18 join_nonempty,
19 parse_duration,
20 parse_iso8601,
21 parse_resolution,
22 qualities,
23 remove_start,
24 str_or_none,
25 traverse_obj,
26 try_get,
27 unescapeHTML,
28 update_url_query,
29 url_or_none,
30 urlencode_postdata,
31 urljoin,
32 )
33
34
35 class NiconicoIE(InfoExtractor):
36 IE_NAME = 'niconico'
37 IE_DESC = 'ニコニコ動画'
38 _GEO_COUNTRIES = ['JP']
39 _GEO_BYPASS = False
40
41 _TESTS = [{
42 'url': 'http://www.nicovideo.jp/watch/sm22312215',
43 'md5': 'd1a75c0823e2f629128c43e1212760f9',
44 'info_dict': {
45 'id': 'sm22312215',
46 'ext': 'mp4',
47 'title': 'Big Buck Bunny',
48 'thumbnail': r're:https?://.*',
49 'uploader': 'takuya0301',
50 'uploader_id': '2698420',
51 'upload_date': '20131123',
52 'timestamp': int, # timestamp is unstable
53 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
54 'duration': 33,
55 'view_count': int,
56 'comment_count': int,
57 'genres': ['未設定'],
58 'tags': [],
59 'expected_protocol': str,
60 },
61 }, {
62 # File downloaded with and without credentials are different, so omit
63 # the md5 field
64 'url': 'http://www.nicovideo.jp/watch/nm14296458',
65 'info_dict': {
66 'id': 'nm14296458',
67 'ext': 'mp4',
68 'title': '【Kagamine Rin】Dance on media【Original】take2!',
69 'description': 'md5:9368f2b1f4178de64f2602c2f3d6cbf5',
70 'thumbnail': r're:https?://.*',
71 'uploader': 'りょうた',
72 'uploader_id': '18822557',
73 'upload_date': '20110429',
74 'timestamp': 1304065916,
75 'duration': 208.0,
76 'comment_count': int,
77 'view_count': int,
78 'genres': ['音楽・サウンド'],
79 'tags': ['Translation_Request', 'Kagamine_Rin', 'Rin_Original'],
80 'expected_protocol': str,
81 },
82 }, {
83 # 'video exists but is marked as "deleted"
84 # md5 is unstable
85 'url': 'http://www.nicovideo.jp/watch/sm10000',
86 'info_dict': {
87 'id': 'sm10000',
88 'ext': 'unknown_video',
89 'description': 'deleted',
90 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
91 'thumbnail': r're:https?://.*',
92 'upload_date': '20071224',
93 'timestamp': int, # timestamp field has different value if logged in
94 'duration': 304,
95 'view_count': int,
96 },
97 'skip': 'Requires an account',
98 }, {
99 'url': 'http://www.nicovideo.jp/watch/so22543406',
100 'info_dict': {
101 'id': '1388129933',
102 'ext': 'mp4',
103 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
104 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
105 'thumbnail': r're:https?://.*',
106 'timestamp': 1388851200,
107 'upload_date': '20140104',
108 'uploader': 'アニメロチャンネル',
109 'uploader_id': '312',
110 },
111 'skip': 'The viewing period of the video you were searching for has expired.',
112 }, {
113 # video not available via `getflv`; "old" HTML5 video
114 'url': 'http://www.nicovideo.jp/watch/sm1151009',
115 'md5': 'f95a3d259172667b293530cc2e41ebda',
116 'info_dict': {
117 'id': 'sm1151009',
118 'ext': 'mp4',
119 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
120 'description': 'md5:f95a3d259172667b293530cc2e41ebda',
121 'thumbnail': r're:https?://.*',
122 'duration': 184,
123 'timestamp': 1190835883,
124 'upload_date': '20070926',
125 'uploader': 'denden2',
126 'uploader_id': '1392194',
127 'view_count': int,
128 'comment_count': int,
129 'genres': ['ゲーム'],
130 'tags': [],
131 'expected_protocol': str,
132 },
133 }, {
134 # "New" HTML5 video
135 # md5 is unstable
136 'url': 'http://www.nicovideo.jp/watch/sm31464864',
137 'info_dict': {
138 'id': 'sm31464864',
139 'ext': 'mp4',
140 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
141 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
142 'timestamp': 1498481660,
143 'upload_date': '20170626',
144 'uploader': 'no-namamae',
145 'uploader_id': '40826363',
146 'thumbnail': r're:https?://.*',
147 'duration': 198,
148 'view_count': int,
149 'comment_count': int,
150 'genres': ['アニメ'],
151 'tags': [],
152 'expected_protocol': str,
153 },
154 }, {
155 # Video without owner
156 'url': 'http://www.nicovideo.jp/watch/sm18238488',
157 'md5': 'd265680a1f92bdcbbd2a507fc9e78a9e',
158 'info_dict': {
159 'id': 'sm18238488',
160 'ext': 'mp4',
161 'title': '【実写版】ミュータントタートルズ',
162 'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
163 'timestamp': 1341128008,
164 'upload_date': '20120701',
165 'thumbnail': r're:https?://.*',
166 'duration': 5271,
167 'view_count': int,
168 'comment_count': int,
169 'genres': ['エンターテイメント'],
170 'tags': [],
171 'expected_protocol': str,
172 },
173 }, {
174 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
175 'only_matching': True,
176 }, {
177 'note': 'a video that is only served as an ENCRYPTED HLS.',
178 'url': 'https://www.nicovideo.jp/watch/so38016254',
179 'only_matching': True,
180 }]
181
182 _VALID_URL = r'https?://(?:(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch|nico\.ms)/(?P<id>(?:[a-z]{2})?[0-9]+)'
183 _NETRC_MACHINE = 'niconico'
184 _API_HEADERS = {
185 'X-Frontend-ID': '6',
186 'X-Frontend-Version': '0',
187 'X-Niconico-Language': 'en-us',
188 'Referer': 'https://www.nicovideo.jp/',
189 'Origin': 'https://www.nicovideo.jp',
190 }
191
192 def _perform_login(self, username, password):
193 login_ok = True
194 login_form_strs = {
195 'mail_tel': username,
196 'password': password,
197 }
198 self._request_webpage(
199 'https://account.nicovideo.jp/login', None,
200 note='Acquiring Login session')
201 page = self._download_webpage(
202 'https://account.nicovideo.jp/login/redirector?show_button_twitter=1&site=niconico&show_button_facebook=1', None,
203 note='Logging in', errnote='Unable to log in',
204 data=urlencode_postdata(login_form_strs),
205 headers={
206 'Referer': 'https://account.nicovideo.jp/login',
207 'Content-Type': 'application/x-www-form-urlencoded',
208 })
209 if 'oneTimePw' in page:
210 post_url = self._search_regex(
211 r'<form[^>]+action=(["\'])(?P<url>.+?)\1', page, 'post url', group='url')
212 page = self._download_webpage(
213 urljoin('https://account.nicovideo.jp', post_url), None,
214 note='Performing MFA', errnote='Unable to complete MFA',
215 data=urlencode_postdata({
216 'otp': self._get_tfa_info('6 digits code')
217 }), headers={
218 'Content-Type': 'application/x-www-form-urlencoded',
219 })
220 if 'oneTimePw' in page or 'formError' in page:
221 err_msg = self._html_search_regex(
222 r'formError["\']+>(.*?)</div>', page, 'form_error',
223 default='There\'s an error but the message can\'t be parsed.',
224 flags=re.DOTALL)
225 self.report_warning(f'Unable to log in: MFA challenge failed, "{err_msg}"')
226 return False
227 login_ok = 'class="notice error"' not in page
228 if not login_ok:
229 self.report_warning('Unable to log in: bad username or password')
230 return login_ok
231
232 def _get_heartbeat_info(self, info_dict):
233 video_id, video_src_id, audio_src_id = info_dict['url'].split(':')[1].split('/')
234 dmc_protocol = info_dict['expected_protocol']
235
236 api_data = (
237 info_dict.get('_api_data')
238 or self._parse_json(
239 self._html_search_regex(
240 'data-api-data="([^"]+)"',
241 self._download_webpage('https://www.nicovideo.jp/watch/' + video_id, video_id),
242 'API data', default='{}'),
243 video_id))
244
245 session_api_data = try_get(api_data, lambda x: x['media']['delivery']['movie']['session'])
246 session_api_endpoint = try_get(session_api_data, lambda x: x['urls'][0])
247
248 def ping():
249 tracking_id = traverse_obj(api_data, ('media', 'delivery', 'trackingId'))
250 if tracking_id:
251 tracking_url = update_url_query('https://nvapi.nicovideo.jp/v1/2ab0cbaa/watch', {'t': tracking_id})
252 watch_request_response = self._download_json(
253 tracking_url, video_id,
254 note='Acquiring permission for downloading video', fatal=False,
255 headers=self._API_HEADERS)
256 if traverse_obj(watch_request_response, ('meta', 'status')) != 200:
257 self.report_warning('Failed to acquire permission for playing video. Video download may fail.')
258
259 yesno = lambda x: 'yes' if x else 'no'
260
261 if dmc_protocol == 'http':
262 protocol = 'http'
263 protocol_parameters = {
264 'http_output_download_parameters': {
265 'use_ssl': yesno(session_api_data['urls'][0]['isSsl']),
266 'use_well_known_port': yesno(session_api_data['urls'][0]['isWellKnownPort']),
267 }
268 }
269 elif dmc_protocol == 'hls':
270 protocol = 'm3u8'
271 segment_duration = try_get(self._configuration_arg('segment_duration'), lambda x: int(x[0])) or 6000
272 parsed_token = self._parse_json(session_api_data['token'], video_id)
273 encryption = traverse_obj(api_data, ('media', 'delivery', 'encryption'))
274 protocol_parameters = {
275 'hls_parameters': {
276 'segment_duration': segment_duration,
277 'transfer_preset': '',
278 'use_ssl': yesno(session_api_data['urls'][0]['isSsl']),
279 'use_well_known_port': yesno(session_api_data['urls'][0]['isWellKnownPort']),
280 }
281 }
282 if 'hls_encryption' in parsed_token and encryption:
283 protocol_parameters['hls_parameters']['encryption'] = {
284 parsed_token['hls_encryption']: {
285 'encrypted_key': encryption['encryptedKey'],
286 'key_uri': encryption['keyUri'],
287 }
288 }
289 else:
290 protocol = 'm3u8_native'
291 else:
292 raise ExtractorError(f'Unsupported DMC protocol: {dmc_protocol}')
293
294 session_response = self._download_json(
295 session_api_endpoint['url'], video_id,
296 query={'_format': 'json'},
297 headers={'Content-Type': 'application/json'},
298 note='Downloading JSON metadata for %s' % info_dict['format_id'],
299 data=json.dumps({
300 'session': {
301 'client_info': {
302 'player_id': session_api_data.get('playerId'),
303 },
304 'content_auth': {
305 'auth_type': try_get(session_api_data, lambda x: x['authTypes'][session_api_data['protocols'][0]]),
306 'content_key_timeout': session_api_data.get('contentKeyTimeout'),
307 'service_id': 'nicovideo',
308 'service_user_id': session_api_data.get('serviceUserId')
309 },
310 'content_id': session_api_data.get('contentId'),
311 'content_src_id_sets': [{
312 'content_src_ids': [{
313 'src_id_to_mux': {
314 'audio_src_ids': [audio_src_id],
315 'video_src_ids': [video_src_id],
316 }
317 }]
318 }],
319 'content_type': 'movie',
320 'content_uri': '',
321 'keep_method': {
322 'heartbeat': {
323 'lifetime': session_api_data.get('heartbeatLifetime')
324 }
325 },
326 'priority': session_api_data['priority'],
327 'protocol': {
328 'name': 'http',
329 'parameters': {
330 'http_parameters': {
331 'parameters': protocol_parameters
332 }
333 }
334 },
335 'recipe_id': session_api_data.get('recipeId'),
336 'session_operation_auth': {
337 'session_operation_auth_by_signature': {
338 'signature': session_api_data.get('signature'),
339 'token': session_api_data.get('token'),
340 }
341 },
342 'timing_constraint': 'unlimited'
343 }
344 }).encode())
345
346 info_dict['url'] = session_response['data']['session']['content_uri']
347 info_dict['protocol'] = protocol
348
349 # get heartbeat info
350 heartbeat_info_dict = {
351 'url': session_api_endpoint['url'] + '/' + session_response['data']['session']['id'] + '?_format=json&_method=PUT',
352 'data': json.dumps(session_response['data']),
353 # interval, convert milliseconds to seconds, then halve to make a buffer.
354 'interval': float_or_none(session_api_data.get('heartbeatLifetime'), scale=3000),
355 'ping': ping
356 }
357
358 return info_dict, heartbeat_info_dict
359
360 def _extract_format_for_quality(self, video_id, audio_quality, video_quality, dmc_protocol):
361
362 if not audio_quality.get('isAvailable') or not video_quality.get('isAvailable'):
363 return None
364
365 format_id = '-'.join(
366 [remove_start(s['id'], 'archive_') for s in (video_quality, audio_quality)] + [dmc_protocol])
367
368 vid_qual_label = traverse_obj(video_quality, ('metadata', 'label'))
369
370 return {
371 'url': 'niconico_dmc:%s/%s/%s' % (video_id, video_quality['id'], audio_quality['id']),
372 'format_id': format_id,
373 'format_note': join_nonempty('DMC', vid_qual_label, dmc_protocol.upper(), delim=' '),
374 'ext': 'mp4', # Session API are used in HTML5, which always serves mp4
375 'acodec': 'aac',
376 'vcodec': 'h264',
377 **traverse_obj(audio_quality, ('metadata', {
378 'abr': ('bitrate', {functools.partial(float_or_none, scale=1000)}),
379 'asr': ('samplingRate', {int_or_none}),
380 })),
381 **traverse_obj(video_quality, ('metadata', {
382 'vbr': ('bitrate', {functools.partial(float_or_none, scale=1000)}),
383 'height': ('resolution', 'height', {int_or_none}),
384 'width': ('resolution', 'width', {int_or_none}),
385 })),
386 'quality': -2 if 'low' in video_quality['id'] else None,
387 'protocol': 'niconico_dmc',
388 'expected_protocol': dmc_protocol, # XXX: This is not a documented field
389 'http_headers': {
390 'Origin': 'https://www.nicovideo.jp',
391 'Referer': 'https://www.nicovideo.jp/watch/' + video_id,
392 }
393 }
394
395 def _yield_dmc_formats(self, api_data, video_id):
396 dmc_data = traverse_obj(api_data, ('media', 'delivery', 'movie'))
397 audios = traverse_obj(dmc_data, ('audios', ..., {dict}))
398 videos = traverse_obj(dmc_data, ('videos', ..., {dict}))
399 protocols = traverse_obj(dmc_data, ('session', 'protocols', ..., {str}))
400 if not all((audios, videos, protocols)):
401 return
402
403 for audio_quality, video_quality, protocol in itertools.product(audios, videos, protocols):
404 if fmt := self._extract_format_for_quality(video_id, audio_quality, video_quality, protocol):
405 yield fmt
406
407 def _yield_dms_formats(self, api_data, video_id):
408 fmt_filter = lambda _, v: v['isAvailable'] and v['id']
409 videos = traverse_obj(api_data, ('media', 'domand', 'videos', fmt_filter))
410 audios = traverse_obj(api_data, ('media', 'domand', 'audios', fmt_filter))
411 access_key = traverse_obj(api_data, ('media', 'domand', 'accessRightKey', {str}))
412 track_id = traverse_obj(api_data, ('client', 'watchTrackId', {str}))
413 if not all((videos, audios, access_key, track_id)):
414 return
415
416 dms_m3u8_url = self._download_json(
417 f'https://nvapi.nicovideo.jp/v1/watch/{video_id}/access-rights/hls', video_id,
418 data=json.dumps({
419 'outputs': list(itertools.product((v['id'] for v in videos), (a['id'] for a in audios)))
420 }).encode(), query={'actionTrackId': track_id}, headers={
421 'x-access-right-key': access_key,
422 'x-frontend-id': 6,
423 'x-frontend-version': 0,
424 'x-request-with': 'https://www.nicovideo.jp',
425 })['data']['contentUrl']
426 # Getting all audio formats results in duplicate video formats which we filter out later
427 dms_fmts = self._extract_m3u8_formats(dms_m3u8_url, video_id)
428
429 # m3u8 extraction does not provide audio bitrates, so extract from the API data and fix
430 for audio_fmt in traverse_obj(dms_fmts, lambda _, v: v['vcodec'] == 'none'):
431 yield {
432 **audio_fmt,
433 **traverse_obj(audios, (lambda _, v: audio_fmt['format_id'].startswith(v['id']), {
434 'format_id': ('id', {str}),
435 'abr': ('bitRate', {functools.partial(float_or_none, scale=1000)}),
436 'asr': ('samplingRate', {int_or_none}),
437 }), get_all=False),
438 'acodec': 'aac',
439 'ext': 'm4a',
440 }
441
442 # Sort before removing dupes to keep the format dicts with the lowest tbr
443 video_fmts = sorted((fmt for fmt in dms_fmts if fmt['vcodec'] != 'none'), key=lambda f: f['tbr'])
444 self._remove_duplicate_formats(video_fmts)
445 # Calculate the true vbr/tbr by subtracting the lowest abr
446 min_abr = min(traverse_obj(audios, (..., 'bitRate', {float_or_none})), default=0) / 1000
447 for video_fmt in video_fmts:
448 video_fmt['tbr'] -= min_abr
449 video_fmt['format_id'] = f'video-{video_fmt["tbr"]:.0f}'
450 yield video_fmt
451
452 def _real_extract(self, url):
453 video_id = self._match_id(url)
454
455 try:
456 webpage, handle = self._download_webpage_handle(
457 'https://www.nicovideo.jp/watch/' + video_id, video_id)
458 if video_id.startswith('so'):
459 video_id = self._match_id(handle.url)
460
461 api_data = self._parse_json(self._html_search_regex(
462 'data-api-data="([^"]+)"', webpage,
463 'API data', default='{}'), video_id)
464 except ExtractorError as e:
465 try:
466 api_data = self._download_json(
467 'https://www.nicovideo.jp/api/watch/v3/%s?_frontendId=6&_frontendVersion=0&actionTrackId=AAAAAAAAAA_%d' % (video_id, round(time.time() * 1000)), video_id,
468 note='Downloading API JSON', errnote='Unable to fetch data')['data']
469 except ExtractorError:
470 if not isinstance(e.cause, HTTPError):
471 raise
472 webpage = e.cause.response.read().decode('utf-8', 'replace')
473 error_msg = self._html_search_regex(
474 r'(?s)<section\s+class="(?:(?:ErrorMessage|WatchExceptionPage-message)\s*)+">(.+?)</section>',
475 webpage, 'error reason', default=None)
476 if not error_msg:
477 raise
478 raise ExtractorError(clean_html(error_msg), expected=True)
479
480 availability = self._availability(**(traverse_obj(api_data, ('payment', 'video', {
481 'needs_premium': ('isPremium', {bool}),
482 'needs_subscription': ('isAdmission', {bool}),
483 })) or {'needs_auth': True}))
484 formats = [*self._yield_dmc_formats(api_data, video_id),
485 *self._yield_dms_formats(api_data, video_id)]
486 if not formats:
487 fail_msg = clean_html(self._html_search_regex(
488 r'<p[^>]+\bclass="fail-message"[^>]*>(?P<msg>.+?)</p>',
489 webpage, 'fail message', default=None, group='msg'))
490 if fail_msg:
491 self.to_screen(f'Niconico said: {fail_msg}')
492 if fail_msg and 'された地域と同じ地域からのみ視聴できます。' in fail_msg:
493 availability = None
494 self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True)
495 elif availability == 'premium_only':
496 self.raise_login_required('This video requires premium', metadata_available=True)
497 elif availability == 'subscriber_only':
498 self.raise_login_required('This video is for members only', metadata_available=True)
499 elif availability == 'needs_auth':
500 self.raise_login_required(metadata_available=False)
501
502 # Start extracting information
503 tags = None
504 if webpage:
505 # use og:video:tag (not logged in)
506 og_video_tags = re.finditer(r'<meta\s+property="og:video:tag"\s*content="(.*?)">', webpage)
507 tags = list(filter(None, (clean_html(x.group(1)) for x in og_video_tags)))
508 if not tags:
509 # use keywords and split with comma (not logged in)
510 kwds = self._html_search_meta('keywords', webpage, default=None)
511 if kwds:
512 tags = [x for x in kwds.split(',') if x]
513 if not tags:
514 # find in json (logged in)
515 tags = traverse_obj(api_data, ('tag', 'items', ..., 'name'))
516
517 thumb_prefs = qualities(['url', 'middleUrl', 'largeUrl', 'player', 'ogp'])
518
519 def get_video_info(*items, get_first=True, **kwargs):
520 return traverse_obj(api_data, ('video', *items), get_all=not get_first, **kwargs)
521
522 return {
523 'id': video_id,
524 '_api_data': api_data,
525 'title': get_video_info(('originalTitle', 'title')) or self._og_search_title(webpage, default=None),
526 'formats': formats,
527 'availability': availability,
528 'thumbnails': [{
529 'id': key,
530 'url': url,
531 'ext': 'jpg',
532 'preference': thumb_prefs(key),
533 **parse_resolution(url, lenient=True),
534 } for key, url in (get_video_info('thumbnail') or {}).items() if url],
535 'description': clean_html(get_video_info('description')),
536 'uploader': traverse_obj(api_data, ('owner', 'nickname'), ('channel', 'name'), ('community', 'name')),
537 'uploader_id': str_or_none(traverse_obj(api_data, ('owner', 'id'), ('channel', 'id'), ('community', 'id'))),
538 'timestamp': parse_iso8601(get_video_info('registeredAt')) or parse_iso8601(
539 self._html_search_meta('video:release_date', webpage, 'date published', default=None)),
540 'channel': traverse_obj(api_data, ('channel', 'name'), ('community', 'name')),
541 'channel_id': traverse_obj(api_data, ('channel', 'id'), ('community', 'id')),
542 'view_count': int_or_none(get_video_info('count', 'view')),
543 'tags': tags,
544 'genre': traverse_obj(api_data, ('genre', 'label'), ('genre', 'key')),
545 'comment_count': get_video_info('count', 'comment', expected_type=int),
546 'duration': (
547 parse_duration(self._html_search_meta('video:duration', webpage, 'video duration', default=None))
548 or get_video_info('duration')),
549 'webpage_url': url_or_none(url) or f'https://www.nicovideo.jp/watch/{video_id}',
550 'subtitles': self.extract_subtitles(video_id, api_data),
551 }
552
553 def _get_subtitles(self, video_id, api_data):
554 comments_info = traverse_obj(api_data, ('comment', 'nvComment', {dict})) or {}
555 if not comments_info.get('server'):
556 return
557
558 danmaku = traverse_obj(self._download_json(
559 f'{comments_info["server"]}/v1/threads', video_id, data=json.dumps({
560 'additionals': {},
561 'params': comments_info.get('params'),
562 'threadKey': comments_info.get('threadKey'),
563 }).encode(), fatal=False,
564 headers={
565 'Referer': 'https://www.nicovideo.jp/',
566 'Origin': 'https://www.nicovideo.jp',
567 'Content-Type': 'text/plain;charset=UTF-8',
568 'x-client-os-type': 'others',
569 'x-frontend-id': '6',
570 'x-frontend-version': '0',
571 },
572 note='Downloading comments', errnote='Failed to download comments'),
573 ('data', 'threads', ..., 'comments', ...))
574
575 return {
576 'comments': [{
577 'ext': 'json',
578 'data': json.dumps(danmaku),
579 }],
580 }
581
582
583 class NiconicoPlaylistBaseIE(InfoExtractor):
584 _PAGE_SIZE = 100
585
586 _API_HEADERS = {
587 'X-Frontend-ID': '6',
588 'X-Frontend-Version': '0',
589 'X-Niconico-Language': 'en-us'
590 }
591
592 def _call_api(self, list_id, resource, query):
593 raise NotImplementedError('Must be implemented in subclasses')
594
595 @staticmethod
596 def _parse_owner(item):
597 return {
598 'uploader': traverse_obj(item, ('owner', 'name')),
599 'uploader_id': traverse_obj(item, ('owner', 'id')),
600 }
601
602 def _fetch_page(self, list_id, page):
603 page += 1
604 resp = self._call_api(list_id, 'page %d' % page, {
605 'page': page,
606 'pageSize': self._PAGE_SIZE,
607 })
608 # this is needed to support both mylist and user
609 for video in traverse_obj(resp, ('items', ..., ('video', None))) or []:
610 video_id = video.get('id')
611 if not video_id:
612 # skip {"video": {"id": "blablabla", ...}}
613 continue
614 count = video.get('count') or {}
615 get_count = lambda x: int_or_none(count.get(x))
616 yield {
617 '_type': 'url',
618 'id': video_id,
619 'title': video.get('title'),
620 'url': f'https://www.nicovideo.jp/watch/{video_id}',
621 'description': video.get('shortDescription'),
622 'duration': int_or_none(video.get('duration')),
623 'view_count': get_count('view'),
624 'comment_count': get_count('comment'),
625 'thumbnail': traverse_obj(video, ('thumbnail', ('nHdUrl', 'largeUrl', 'listingUrl', 'url'))),
626 'ie_key': NiconicoIE.ie_key(),
627 **self._parse_owner(video),
628 }
629
630 def _entries(self, list_id):
631 return OnDemandPagedList(functools.partial(self._fetch_page, list_id), self._PAGE_SIZE)
632
633
634 class NiconicoPlaylistIE(NiconicoPlaylistBaseIE):
635 IE_NAME = 'niconico:playlist'
636 _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp|nico\.ms)/(?:user/\d+/)?(?:my/)?mylist/(?:#/)?(?P<id>\d+)'
637
638 _TESTS = [{
639 'url': 'http://www.nicovideo.jp/mylist/27411728',
640 'info_dict': {
641 'id': '27411728',
642 'title': 'AKB48のオールナイトニッポン',
643 'description': 'md5:d89694c5ded4b6c693dea2db6e41aa08',
644 'uploader': 'のっく',
645 'uploader_id': '805442',
646 },
647 'playlist_mincount': 291,
648 }, {
649 'url': 'https://www.nicovideo.jp/user/805442/mylist/27411728',
650 'only_matching': True,
651 }, {
652 'url': 'https://www.nicovideo.jp/my/mylist/#/68048635',
653 'only_matching': True,
654 }]
655
656 def _call_api(self, list_id, resource, query):
657 return self._download_json(
658 f'https://nvapi.nicovideo.jp/v2/mylists/{list_id}', list_id,
659 f'Downloading {resource}', query=query,
660 headers=self._API_HEADERS)['data']['mylist']
661
662 def _real_extract(self, url):
663 list_id = self._match_id(url)
664 mylist = self._call_api(list_id, 'list', {
665 'pageSize': 1,
666 })
667 return self.playlist_result(
668 self._entries(list_id), list_id,
669 mylist.get('name'), mylist.get('description'), **self._parse_owner(mylist))
670
671
672 class NiconicoSeriesIE(InfoExtractor):
673 IE_NAME = 'niconico:series'
674 _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp(?:/user/\d+)?|nico\.ms)/series/(?P<id>\d+)'
675
676 _TESTS = [{
677 'url': 'https://www.nicovideo.jp/user/44113208/series/110226',
678 'info_dict': {
679 'id': '110226',
680 'title': 'ご立派ァ!のシリーズ',
681 },
682 'playlist_mincount': 10,
683 }, {
684 'url': 'https://www.nicovideo.jp/series/12312/',
685 'info_dict': {
686 'id': '12312',
687 'title': 'バトルスピリッツ お勧めカード紹介(調整中)',
688 },
689 'playlist_mincount': 103,
690 }, {
691 'url': 'https://nico.ms/series/203559',
692 'only_matching': True,
693 }]
694
695 def _real_extract(self, url):
696 list_id = self._match_id(url)
697 webpage = self._download_webpage(url, list_id)
698
699 title = self._search_regex(
700 (r'<title>「(.+)(全',
701 r'<div class="TwitterShareButton"\s+data-text="(.+)\s+https:'),
702 webpage, 'title', fatal=False)
703 if title:
704 title = unescapeHTML(title)
705 json_data = next(self._yield_json_ld(webpage, None, fatal=False))
706 return self.playlist_from_matches(
707 traverse_obj(json_data, ('itemListElement', ..., 'url')), list_id, title, ie=NiconicoIE)
708
709
710 class NiconicoHistoryIE(NiconicoPlaylistBaseIE):
711 IE_NAME = 'niconico:history'
712 IE_DESC = 'NicoNico user history or likes. Requires cookies.'
713 _VALID_URL = r'https?://(?:www\.|sp\.)?nicovideo\.jp/my/(?P<id>history(?:/like)?)'
714
715 _TESTS = [{
716 'note': 'PC page, with /video',
717 'url': 'https://www.nicovideo.jp/my/history/video',
718 'only_matching': True,
719 }, {
720 'note': 'PC page, without /video',
721 'url': 'https://www.nicovideo.jp/my/history',
722 'only_matching': True,
723 }, {
724 'note': 'mobile page, with /video',
725 'url': 'https://sp.nicovideo.jp/my/history/video',
726 'only_matching': True,
727 }, {
728 'note': 'mobile page, without /video',
729 'url': 'https://sp.nicovideo.jp/my/history',
730 'only_matching': True,
731 }, {
732 'note': 'PC page',
733 'url': 'https://www.nicovideo.jp/my/history/like',
734 'only_matching': True,
735 }, {
736 'note': 'Mobile page',
737 'url': 'https://sp.nicovideo.jp/my/history/like',
738 'only_matching': True,
739 }]
740
741 def _call_api(self, list_id, resource, query):
742 path = 'likes' if list_id == 'history/like' else 'watch/history'
743 return self._download_json(
744 f'https://nvapi.nicovideo.jp/v1/users/me/{path}', list_id,
745 f'Downloading {resource}', query=query, headers=self._API_HEADERS)['data']
746
747 def _real_extract(self, url):
748 list_id = self._match_id(url)
749 try:
750 mylist = self._call_api(list_id, 'list', {'pageSize': 1})
751 except ExtractorError as e:
752 if isinstance(e.cause, HTTPError) and e.cause.status == 401:
753 self.raise_login_required('You have to be logged in to get your history')
754 raise
755 return self.playlist_result(self._entries(list_id), list_id, **self._parse_owner(mylist))
756
757
758 class NicovideoSearchBaseIE(InfoExtractor):
759 _SEARCH_TYPE = 'search'
760
761 def _entries(self, url, item_id, query=None, note='Downloading page %(page)s'):
762 query = query or {}
763 pages = [query['page']] if 'page' in query else itertools.count(1)
764 for page_num in pages:
765 query['page'] = str(page_num)
766 webpage = self._download_webpage(url, item_id, query=query, note=note % {'page': page_num})
767 results = re.findall(r'(?<=data-video-id=)["\']?(?P<videoid>.*?)(?=["\'])', webpage)
768 for item in results:
769 yield self.url_result(f'https://www.nicovideo.jp/watch/{item}', 'Niconico', item)
770 if not results:
771 break
772
773 def _search_results(self, query):
774 return self._entries(
775 self._proto_relative_url(f'//www.nicovideo.jp/{self._SEARCH_TYPE}/{query}'), query)
776
777
778 class NicovideoSearchIE(NicovideoSearchBaseIE, SearchInfoExtractor):
779 IE_DESC = 'Nico video search'
780 IE_NAME = 'nicovideo:search'
781 _SEARCH_KEY = 'nicosearch'
782
783
784 class NicovideoSearchURLIE(NicovideoSearchBaseIE):
785 IE_NAME = f'{NicovideoSearchIE.IE_NAME}_url'
786 IE_DESC = 'Nico video search URLs'
787 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/search/(?P<id>[^?#&]+)?'
788 _TESTS = [{
789 'url': 'http://www.nicovideo.jp/search/sm9',
790 'info_dict': {
791 'id': 'sm9',
792 'title': 'sm9'
793 },
794 'playlist_mincount': 40,
795 }, {
796 'url': 'https://www.nicovideo.jp/search/sm9?sort=h&order=d&end=2020-12-31&start=2020-01-01',
797 'info_dict': {
798 'id': 'sm9',
799 'title': 'sm9'
800 },
801 'playlist_count': 31,
802 }]
803
804 def _real_extract(self, url):
805 query = self._match_id(url)
806 return self.playlist_result(self._entries(url, query), query, query)
807
808
809 class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor):
810 IE_DESC = 'Nico video search, newest first'
811 IE_NAME = f'{NicovideoSearchIE.IE_NAME}:date'
812 _SEARCH_KEY = 'nicosearchdate'
813 _TESTS = [{
814 'url': 'nicosearchdateall:a',
815 'info_dict': {
816 'id': 'a',
817 'title': 'a'
818 },
819 'playlist_mincount': 1610,
820 }]
821
822 _START_DATE = dt.date(2007, 1, 1)
823 _RESULTS_PER_PAGE = 32
824 _MAX_PAGES = 50
825
826 def _entries(self, url, item_id, start_date=None, end_date=None):
827 start_date, end_date = start_date or self._START_DATE, end_date or dt.datetime.now().date()
828
829 # If the last page has a full page of videos, we need to break down the query interval further
830 last_page_len = len(list(self._get_entries_for_date(
831 url, item_id, start_date, end_date, self._MAX_PAGES,
832 note=f'Checking number of videos from {start_date} to {end_date}')))
833 if (last_page_len == self._RESULTS_PER_PAGE and start_date != end_date):
834 midpoint = start_date + ((end_date - start_date) // 2)
835 yield from self._entries(url, item_id, midpoint, end_date)
836 yield from self._entries(url, item_id, start_date, midpoint)
837 else:
838 self.to_screen(f'{item_id}: Downloading results from {start_date} to {end_date}')
839 yield from self._get_entries_for_date(
840 url, item_id, start_date, end_date, note=' Downloading page %(page)s')
841
842 def _get_entries_for_date(self, url, item_id, start_date, end_date=None, page_num=None, note=None):
843 query = {
844 'start': str(start_date),
845 'end': str(end_date or start_date),
846 'sort': 'f',
847 'order': 'd',
848 }
849 if page_num:
850 query['page'] = str(page_num)
851
852 yield from super()._entries(url, item_id, query=query, note=note)
853
854
855 class NicovideoTagURLIE(NicovideoSearchBaseIE):
856 IE_NAME = 'niconico:tag'
857 IE_DESC = 'NicoNico video tag URLs'
858 _SEARCH_TYPE = 'tag'
859 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/tag/(?P<id>[^?#&]+)?'
860 _TESTS = [{
861 'url': 'https://www.nicovideo.jp/tag/ドキュメンタリー淫夢',
862 'info_dict': {
863 'id': 'ドキュメンタリー淫夢',
864 'title': 'ドキュメンタリー淫夢'
865 },
866 'playlist_mincount': 400,
867 }]
868
869 def _real_extract(self, url):
870 query = self._match_id(url)
871 return self.playlist_result(self._entries(url, query), query, query)
872
873
874 class NiconicoUserIE(InfoExtractor):
875 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/user/(?P<id>\d+)/?(?:$|[#?])'
876 _TEST = {
877 'url': 'https://www.nicovideo.jp/user/419948',
878 'info_dict': {
879 'id': '419948',
880 },
881 'playlist_mincount': 101,
882 }
883 _API_URL = "https://nvapi.nicovideo.jp/v1/users/%s/videos?sortKey=registeredAt&sortOrder=desc&pageSize=%s&page=%s"
884 _PAGE_SIZE = 100
885
886 _API_HEADERS = {
887 'X-Frontend-ID': '6',
888 'X-Frontend-Version': '0'
889 }
890
891 def _entries(self, list_id):
892 total_count = 1
893 count = page_num = 0
894 while count < total_count:
895 json_parsed = self._download_json(
896 self._API_URL % (list_id, self._PAGE_SIZE, page_num + 1), list_id,
897 headers=self._API_HEADERS,
898 note='Downloading JSON metadata%s' % (' page %d' % page_num if page_num else ''))
899 if not page_num:
900 total_count = int_or_none(json_parsed['data'].get('totalCount'))
901 for entry in json_parsed["data"]["items"]:
902 count += 1
903 yield self.url_result('https://www.nicovideo.jp/watch/%s' % entry['id'])
904 page_num += 1
905
906 def _real_extract(self, url):
907 list_id = self._match_id(url)
908 return self.playlist_result(self._entries(list_id), list_id, ie=NiconicoIE.ie_key())
909
910
911 class NiconicoLiveIE(InfoExtractor):
912 IE_NAME = 'niconico:live'
913 IE_DESC = 'ニコニコ生放送'
914 _VALID_URL = r'https?://(?:sp\.)?live2?\.nicovideo\.jp/(?:watch|gate)/(?P<id>lv\d+)'
915 _TESTS = [{
916 'note': 'this test case includes invisible characters for title, pasting them as-is',
917 'url': 'https://live.nicovideo.jp/watch/lv339533123',
918 'info_dict': {
919 'id': 'lv339533123',
920 'title': '激辛ペヤング食べます‪( ;ᯅ; )‬(歌枠オーディション参加中)',
921 'view_count': 1526,
922 'comment_count': 1772,
923 'description': '初めましてもかって言います❕\nのんびり自由に適当に暮らしてます',
924 'uploader': 'もか',
925 'channel': 'ゲストさんのコミュニティ',
926 'channel_id': 'co5776900',
927 'channel_url': 'https://com.nicovideo.jp/community/co5776900',
928 'timestamp': 1670677328,
929 'is_live': True,
930 },
931 'skip': 'livestream',
932 }, {
933 'url': 'https://live2.nicovideo.jp/watch/lv339533123',
934 'only_matching': True,
935 }, {
936 'url': 'https://sp.live.nicovideo.jp/watch/lv339533123',
937 'only_matching': True,
938 }, {
939 'url': 'https://sp.live2.nicovideo.jp/watch/lv339533123',
940 'only_matching': True,
941 }]
942
943 _KNOWN_LATENCY = ('high', 'low')
944
945 def _real_extract(self, url):
946 video_id = self._match_id(url)
947 webpage, urlh = self._download_webpage_handle(f'https://live.nicovideo.jp/watch/{video_id}', video_id)
948
949 embedded_data = self._parse_json(unescapeHTML(self._search_regex(
950 r'<script\s+id="embedded-data"\s*data-props="(.+?)"', webpage, 'embedded data')), video_id)
951
952 ws_url = traverse_obj(embedded_data, ('site', 'relive', 'webSocketUrl'))
953 if not ws_url:
954 raise ExtractorError('The live hasn\'t started yet or already ended.', expected=True)
955 ws_url = update_url_query(ws_url, {
956 'frontend_id': traverse_obj(embedded_data, ('site', 'frontendId')) or '9',
957 })
958
959 hostname = remove_start(urllib.parse.urlparse(urlh.url).hostname, 'sp.')
960 latency = try_get(self._configuration_arg('latency'), lambda x: x[0])
961 if latency not in self._KNOWN_LATENCY:
962 latency = 'high'
963
964 ws = self._request_webpage(
965 Request(ws_url, headers={'Origin': f'https://{hostname}'}),
966 video_id=video_id, note='Connecting to WebSocket server')
967
968 self.write_debug('[debug] Sending HLS server request')
969 ws.send(json.dumps({
970 'type': 'startWatching',
971 'data': {
972 'stream': {
973 'quality': 'abr',
974 'protocol': 'hls+fmp4',
975 'latency': latency,
976 'chasePlay': False
977 },
978 'room': {
979 'protocol': 'webSocket',
980 'commentable': True
981 },
982 'reconnect': False,
983 }
984 }))
985
986 while True:
987 recv = ws.recv()
988 if not recv:
989 continue
990 data = json.loads(recv)
991 if not isinstance(data, dict):
992 continue
993 if data.get('type') == 'stream':
994 m3u8_url = data['data']['uri']
995 qualities = data['data']['availableQualities']
996 break
997 elif data.get('type') == 'disconnect':
998 self.write_debug(recv)
999 raise ExtractorError('Disconnected at middle of extraction')
1000 elif data.get('type') == 'error':
1001 self.write_debug(recv)
1002 message = traverse_obj(data, ('body', 'code')) or recv
1003 raise ExtractorError(message)
1004 elif self.get_param('verbose', False):
1005 if len(recv) > 100:
1006 recv = recv[:100] + '...'
1007 self.write_debug('Server said: %s' % recv)
1008
1009 title = traverse_obj(embedded_data, ('program', 'title')) or self._html_search_meta(
1010 ('og:title', 'twitter:title'), webpage, 'live title', fatal=False)
1011
1012 raw_thumbs = traverse_obj(embedded_data, ('program', 'thumbnail')) or {}
1013 thumbnails = []
1014 for name, value in raw_thumbs.items():
1015 if not isinstance(value, dict):
1016 thumbnails.append({
1017 'id': name,
1018 'url': value,
1019 **parse_resolution(value, lenient=True),
1020 })
1021 continue
1022
1023 for k, img_url in value.items():
1024 res = parse_resolution(k, lenient=True) or parse_resolution(img_url, lenient=True)
1025 width, height = res.get('width'), res.get('height')
1026
1027 thumbnails.append({
1028 'id': f'{name}_{width}x{height}',
1029 'url': img_url,
1030 **res,
1031 })
1032
1033 formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', live=True)
1034 for fmt, q in zip(formats, reversed(qualities[1:])):
1035 fmt.update({
1036 'format_id': q,
1037 'protocol': 'niconico_live',
1038 'ws': ws,
1039 'video_id': video_id,
1040 'live_latency': latency,
1041 'origin': hostname,
1042 })
1043
1044 return {
1045 'id': video_id,
1046 'title': title,
1047 **traverse_obj(embedded_data, {
1048 'view_count': ('program', 'statistics', 'watchCount'),
1049 'comment_count': ('program', 'statistics', 'commentCount'),
1050 'uploader': ('program', 'supplier', 'name'),
1051 'channel': ('socialGroup', 'name'),
1052 'channel_id': ('socialGroup', 'id'),
1053 'channel_url': ('socialGroup', 'socialGroupPageUrl'),
1054 }),
1055 'description': clean_html(traverse_obj(embedded_data, ('program', 'description'))),
1056 'timestamp': int_or_none(traverse_obj(embedded_data, ('program', 'openTime'))),
1057 'is_live': True,
1058 'thumbnails': thumbnails,
1059 'formats': formats,
1060 }