]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/niconico.py
[youtube] Prefer UTC upload date for videos (#2223)
[yt-dlp.git] / yt_dlp / extractor / niconico.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import datetime
5 import itertools
6 import functools
7 import json
8 import re
9
10 from .common import InfoExtractor, SearchInfoExtractor
11 from ..postprocessor.ffmpeg import FFmpegPostProcessor
12 from ..compat import (
13 compat_str,
14 compat_parse_qs,
15 compat_urllib_parse_urlparse,
16 compat_HTTPError,
17 )
18 from ..utils import (
19 ExtractorError,
20 dict_get,
21 float_or_none,
22 int_or_none,
23 OnDemandPagedList,
24 parse_duration,
25 parse_iso8601,
26 PostProcessingError,
27 remove_start,
28 str_or_none,
29 traverse_obj,
30 try_get,
31 unescapeHTML,
32 unified_timestamp,
33 urlencode_postdata,
34 xpath_text,
35 )
36
37
38 class NiconicoIE(InfoExtractor):
39 IE_NAME = 'niconico'
40 IE_DESC = 'ニコニコ動画'
41
42 _TESTS = [{
43 'url': 'http://www.nicovideo.jp/watch/sm22312215',
44 'md5': 'a5bad06f1347452102953f323c69da34s',
45 'info_dict': {
46 'id': 'sm22312215',
47 'ext': 'mp4',
48 'title': 'Big Buck Bunny',
49 'thumbnail': r're:https?://.*',
50 'uploader': 'takuya0301',
51 'uploader_id': '2698420',
52 'upload_date': '20131123',
53 'timestamp': int, # timestamp is unstable
54 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
55 'duration': 33,
56 'view_count': int,
57 'comment_count': int,
58 },
59 'skip': 'Requires an account',
60 }, {
61 # File downloaded with and without credentials are different, so omit
62 # the md5 field
63 'url': 'http://www.nicovideo.jp/watch/nm14296458',
64 'info_dict': {
65 'id': 'nm14296458',
66 'ext': 'swf',
67 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
68 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
69 'thumbnail': r're:https?://.*',
70 'uploader': 'りょうた',
71 'uploader_id': '18822557',
72 'upload_date': '20110429',
73 'timestamp': 1304065916,
74 'duration': 209,
75 },
76 'skip': 'Requires an account',
77 }, {
78 # 'video exists but is marked as "deleted"
79 # md5 is unstable
80 'url': 'http://www.nicovideo.jp/watch/sm10000',
81 'info_dict': {
82 'id': 'sm10000',
83 'ext': 'unknown_video',
84 'description': 'deleted',
85 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
86 'thumbnail': r're:https?://.*',
87 'upload_date': '20071224',
88 'timestamp': int, # timestamp field has different value if logged in
89 'duration': 304,
90 'view_count': int,
91 },
92 'skip': 'Requires an account',
93 }, {
94 'url': 'http://www.nicovideo.jp/watch/so22543406',
95 'info_dict': {
96 'id': '1388129933',
97 'ext': 'mp4',
98 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
99 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
100 'thumbnail': r're:https?://.*',
101 'timestamp': 1388851200,
102 'upload_date': '20140104',
103 'uploader': 'アニメロチャンネル',
104 'uploader_id': '312',
105 },
106 'skip': 'The viewing period of the video you were searching for has expired.',
107 }, {
108 # video not available via `getflv`; "old" HTML5 video
109 'url': 'http://www.nicovideo.jp/watch/sm1151009',
110 'md5': '8fa81c364eb619d4085354eab075598a',
111 'info_dict': {
112 'id': 'sm1151009',
113 'ext': 'mp4',
114 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
115 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7',
116 'thumbnail': r're:https?://.*',
117 'duration': 184,
118 'timestamp': 1190868283,
119 'upload_date': '20070927',
120 'uploader': 'denden2',
121 'uploader_id': '1392194',
122 'view_count': int,
123 'comment_count': int,
124 },
125 'skip': 'Requires an account',
126 }, {
127 # "New" HTML5 video
128 # md5 is unstable
129 'url': 'http://www.nicovideo.jp/watch/sm31464864',
130 'info_dict': {
131 'id': 'sm31464864',
132 'ext': 'mp4',
133 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
134 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
135 'timestamp': 1498514060,
136 'upload_date': '20170626',
137 'uploader': 'ゲスト',
138 'uploader_id': '40826363',
139 'thumbnail': r're:https?://.*',
140 'duration': 198,
141 'view_count': int,
142 'comment_count': int,
143 },
144 'skip': 'Requires an account',
145 }, {
146 # Video without owner
147 'url': 'http://www.nicovideo.jp/watch/sm18238488',
148 'md5': 'd265680a1f92bdcbbd2a507fc9e78a9e',
149 'info_dict': {
150 'id': 'sm18238488',
151 'ext': 'mp4',
152 'title': '【実写版】ミュータントタートルズ',
153 'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
154 'timestamp': 1341160408,
155 'upload_date': '20120701',
156 'uploader': None,
157 'uploader_id': None,
158 'thumbnail': r're:https?://.*',
159 'duration': 5271,
160 'view_count': int,
161 'comment_count': int,
162 },
163 'skip': 'Requires an account',
164 }, {
165 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
166 'only_matching': True,
167 }]
168
169 _VALID_URL = r'https?://(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
170 _NETRC_MACHINE = 'niconico'
171
172 _API_HEADERS = {
173 'X-Frontend-ID': '6',
174 'X-Frontend-Version': '0'
175 }
176
177 def _real_initialize(self):
178 self._login()
179
180 def _login(self):
181 username, password = self._get_login_info()
182 # No authentication to be performed
183 if not username:
184 return True
185
186 # Log in
187 login_ok = True
188 login_form_strs = {
189 'mail_tel': username,
190 'password': password,
191 }
192 urlh = self._request_webpage(
193 'https://account.nicovideo.jp/api/v1/login', None,
194 note='Logging in', errnote='Unable to log in',
195 data=urlencode_postdata(login_form_strs))
196 if urlh is False:
197 login_ok = False
198 else:
199 parts = compat_urllib_parse_urlparse(urlh.geturl())
200 if compat_parse_qs(parts.query).get('message', [None])[0] == 'cant_login':
201 login_ok = False
202 if not login_ok:
203 self.report_warning('unable to log in: bad username or password')
204 return login_ok
205
206 def _get_heartbeat_info(self, info_dict):
207
208 video_id, video_src_id, audio_src_id = info_dict['url'].split(':')[1].split('/')
209
210 api_data = (
211 info_dict.get('_api_data')
212 or self._parse_json(
213 self._html_search_regex(
214 'data-api-data="([^"]+)"',
215 self._download_webpage('http://www.nicovideo.jp/watch/' + video_id, video_id),
216 'API data', default='{}'),
217 video_id))
218
219 session_api_data = try_get(api_data, lambda x: x['media']['delivery']['movie']['session'])
220 session_api_endpoint = try_get(session_api_data, lambda x: x['urls'][0])
221
222 def ping():
223 status = try_get(
224 self._download_json(
225 'https://nvapi.nicovideo.jp/v1/2ab0cbaa/watch', video_id,
226 query={'t': try_get(api_data, lambda x: x['media']['delivery']['trackingId'])},
227 note='Acquiring permission for downloading video',
228 headers=self._API_HEADERS),
229 lambda x: x['meta']['status'])
230 if status != 200:
231 self.report_warning('Failed to acquire permission for playing video. The video may not download.')
232
233 yesno = lambda x: 'yes' if x else 'no'
234
235 # m3u8 (encryption)
236 if try_get(api_data, lambda x: x['media']['delivery']['encryption']) is not None:
237 protocol = 'm3u8'
238 encryption = self._parse_json(session_api_data['token'], video_id)['hls_encryption']
239 session_api_http_parameters = {
240 'parameters': {
241 'hls_parameters': {
242 'encryption': {
243 encryption: {
244 'encrypted_key': try_get(api_data, lambda x: x['media']['delivery']['encryption']['encryptedKey']),
245 'key_uri': try_get(api_data, lambda x: x['media']['delivery']['encryption']['keyUri'])
246 }
247 },
248 'transfer_preset': '',
249 'use_ssl': yesno(session_api_endpoint['isSsl']),
250 'use_well_known_port': yesno(session_api_endpoint['isWellKnownPort']),
251 'segment_duration': 6000,
252 }
253 }
254 }
255 # http
256 else:
257 protocol = 'http'
258 session_api_http_parameters = {
259 'parameters': {
260 'http_output_download_parameters': {
261 'use_ssl': yesno(session_api_endpoint['isSsl']),
262 'use_well_known_port': yesno(session_api_endpoint['isWellKnownPort']),
263 }
264 }
265 }
266
267 session_response = self._download_json(
268 session_api_endpoint['url'], video_id,
269 query={'_format': 'json'},
270 headers={'Content-Type': 'application/json'},
271 note='Downloading JSON metadata for %s' % info_dict['format_id'],
272 data=json.dumps({
273 'session': {
274 'client_info': {
275 'player_id': session_api_data.get('playerId'),
276 },
277 'content_auth': {
278 'auth_type': try_get(session_api_data, lambda x: x['authTypes'][session_api_data['protocols'][0]]),
279 'content_key_timeout': session_api_data.get('contentKeyTimeout'),
280 'service_id': 'nicovideo',
281 'service_user_id': session_api_data.get('serviceUserId')
282 },
283 'content_id': session_api_data.get('contentId'),
284 'content_src_id_sets': [{
285 'content_src_ids': [{
286 'src_id_to_mux': {
287 'audio_src_ids': [audio_src_id],
288 'video_src_ids': [video_src_id],
289 }
290 }]
291 }],
292 'content_type': 'movie',
293 'content_uri': '',
294 'keep_method': {
295 'heartbeat': {
296 'lifetime': session_api_data.get('heartbeatLifetime')
297 }
298 },
299 'priority': session_api_data.get('priority'),
300 'protocol': {
301 'name': 'http',
302 'parameters': {
303 'http_parameters': session_api_http_parameters
304 }
305 },
306 'recipe_id': session_api_data.get('recipeId'),
307 'session_operation_auth': {
308 'session_operation_auth_by_signature': {
309 'signature': session_api_data.get('signature'),
310 'token': session_api_data.get('token'),
311 }
312 },
313 'timing_constraint': 'unlimited'
314 }
315 }).encode())
316
317 info_dict['url'] = session_response['data']['session']['content_uri']
318 info_dict['protocol'] = protocol
319
320 # get heartbeat info
321 heartbeat_info_dict = {
322 'url': session_api_endpoint['url'] + '/' + session_response['data']['session']['id'] + '?_format=json&_method=PUT',
323 'data': json.dumps(session_response['data']),
324 # interval, convert milliseconds to seconds, then halve to make a buffer.
325 'interval': float_or_none(session_api_data.get('heartbeatLifetime'), scale=3000),
326 'ping': ping
327 }
328
329 return info_dict, heartbeat_info_dict
330
331 def _extract_format_for_quality(self, api_data, video_id, audio_quality, video_quality):
332 def parse_format_id(id_code):
333 mobj = re.match(r'''(?x)
334 (?:archive_)?
335 (?:(?P<codec>[^_]+)_)?
336 (?:(?P<br>[\d]+)kbps_)?
337 (?:(?P<res>[\d+]+)p_)?
338 ''', '%s_' % id_code)
339 return mobj.groupdict() if mobj else {}
340
341 protocol = 'niconico_dmc'
342 format_id = '-'.join(map(lambda s: remove_start(s['id'], 'archive_'), [video_quality, audio_quality]))
343 vdict = parse_format_id(video_quality['id'])
344 adict = parse_format_id(audio_quality['id'])
345 resolution = try_get(video_quality, lambda x: x['metadata']['resolution'], dict) or {'height': vdict.get('res')}
346 vbr = try_get(video_quality, lambda x: x['metadata']['bitrate'], float)
347
348 return {
349 'url': '%s:%s/%s/%s' % (protocol, video_id, video_quality['id'], audio_quality['id']),
350 'format_id': format_id,
351 'format_note': 'DMC %s' % try_get(video_quality, lambda x: x['metadata']['label'], compat_str),
352 'ext': 'mp4', # Session API are used in HTML5, which always serves mp4
353 'vcodec': vdict.get('codec'),
354 'acodec': adict.get('codec'),
355 'vbr': float_or_none(vbr, 1000) or float_or_none(vdict.get('br')),
356 'abr': float_or_none(audio_quality.get('bitrate'), 1000) or float_or_none(adict.get('br')),
357 'height': int_or_none(resolution.get('height', vdict.get('res'))),
358 'width': int_or_none(resolution.get('width')),
359 'quality': -2 if 'low' in format_id else -1, # Default quality value is -1
360 'protocol': protocol,
361 'http_headers': {
362 'Origin': 'https://www.nicovideo.jp',
363 'Referer': 'https://www.nicovideo.jp/watch/' + video_id,
364 }
365 }
366
367 def _real_extract(self, url):
368 video_id = self._match_id(url)
369
370 # Get video webpage for API data.
371 webpage, handle = self._download_webpage_handle(
372 'http://www.nicovideo.jp/watch/' + video_id, video_id)
373 if video_id.startswith('so'):
374 video_id = self._match_id(handle.geturl())
375
376 api_data = self._parse_json(self._html_search_regex(
377 'data-api-data="([^"]+)"', webpage,
378 'API data', default='{}'), video_id)
379
380 def get_video_info_web(items):
381 return dict_get(api_data['video'], items)
382
383 # Get video info
384 video_info_xml = self._download_xml(
385 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id,
386 video_id, note='Downloading video info page')
387
388 def get_video_info_xml(items):
389 if not isinstance(items, list):
390 items = [items]
391 for item in items:
392 ret = xpath_text(video_info_xml, './/' + item)
393 if ret:
394 return ret
395
396 if get_video_info_xml('error'):
397 error_code = get_video_info_xml('code')
398
399 if error_code == 'DELETED':
400 raise ExtractorError('The video has been deleted.',
401 expected=True)
402 elif error_code == 'NOT_FOUND':
403 raise ExtractorError('The video is not found.',
404 expected=True)
405 elif error_code == 'COMMUNITY':
406 self.to_screen('%s: The video is community members only.' % video_id)
407 else:
408 raise ExtractorError('%s reports error: %s' % (self.IE_NAME, error_code))
409
410 # Start extracting video formats
411 formats = []
412
413 # Get HTML5 videos info
414 quality_info = try_get(api_data, lambda x: x['media']['delivery']['movie'])
415 if not quality_info:
416 raise ExtractorError('The video can\'t be downloaded', expected=True)
417
418 for audio_quality in quality_info.get('audios') or {}:
419 for video_quality in quality_info.get('videos') or {}:
420 if not audio_quality.get('isAvailable') or not video_quality.get('isAvailable'):
421 continue
422 formats.append(self._extract_format_for_quality(
423 api_data, video_id, audio_quality, video_quality))
424
425 # Get flv/swf info
426 timestamp = None
427 video_real_url = try_get(api_data, lambda x: x['video']['smileInfo']['url'])
428 if video_real_url:
429 is_economy = video_real_url.endswith('low')
430
431 if is_economy:
432 self.report_warning('Site is currently in economy mode! You will only have access to lower quality streams')
433
434 # Invoking ffprobe to determine resolution
435 pp = FFmpegPostProcessor(self._downloader)
436 cookies = self._get_cookies('https://nicovideo.jp').output(header='', sep='; path=/; domain=nicovideo.jp;\n')
437
438 self.to_screen('%s: %s' % (video_id, 'Checking smile format with ffprobe'))
439
440 try:
441 metadata = pp.get_metadata_object(video_real_url, ['-cookies', cookies])
442 except PostProcessingError as err:
443 raise ExtractorError(err.msg, expected=True)
444
445 v_stream = a_stream = {}
446
447 # Some complex swf files doesn't have video stream (e.g. nm4809023)
448 for stream in metadata['streams']:
449 if stream['codec_type'] == 'video':
450 v_stream = stream
451 elif stream['codec_type'] == 'audio':
452 a_stream = stream
453
454 # Community restricted videos seem to have issues with the thumb API not returning anything at all
455 filesize = int(
456 (get_video_info_xml('size_high') if not is_economy else get_video_info_xml('size_low'))
457 or metadata['format']['size']
458 )
459 extension = (
460 get_video_info_xml('movie_type')
461 or 'mp4' if 'mp4' in metadata['format']['format_name'] else metadata['format']['format_name']
462 )
463
464 # 'creation_time' tag on video stream of re-encoded SMILEVIDEO mp4 files are '1970-01-01T00:00:00.000000Z'.
465 timestamp = (
466 parse_iso8601(get_video_info_web('first_retrieve'))
467 or unified_timestamp(get_video_info_web('postedDateTime'))
468 )
469 metadata_timestamp = (
470 parse_iso8601(try_get(v_stream, lambda x: x['tags']['creation_time']))
471 or timestamp if extension != 'mp4' else 0
472 )
473
474 # According to compconf, smile videos from pre-2017 are always better quality than their DMC counterparts
475 smile_threshold_timestamp = parse_iso8601('2016-12-08T00:00:00+09:00')
476
477 is_source = timestamp < smile_threshold_timestamp or metadata_timestamp > 0
478
479 # If movie file size is unstable, old server movie is not source movie.
480 if filesize > 1:
481 formats.append({
482 'url': video_real_url,
483 'format_id': 'smile' if not is_economy else 'smile_low',
484 'format_note': 'SMILEVIDEO source' if not is_economy else 'SMILEVIDEO low quality',
485 'ext': extension,
486 'container': extension,
487 'vcodec': v_stream.get('codec_name'),
488 'acodec': a_stream.get('codec_name'),
489 # Some complex swf files doesn't have total bit rate metadata (e.g. nm6049209)
490 'tbr': int_or_none(metadata['format'].get('bit_rate'), scale=1000),
491 'vbr': int_or_none(v_stream.get('bit_rate'), scale=1000),
492 'abr': int_or_none(a_stream.get('bit_rate'), scale=1000),
493 'height': int_or_none(v_stream.get('height')),
494 'width': int_or_none(v_stream.get('width')),
495 'source_preference': 5 if not is_economy else -2,
496 'quality': 5 if is_source and not is_economy else None,
497 'filesize': filesize
498 })
499
500 self._sort_formats(formats)
501
502 # Start extracting information
503 title = (
504 get_video_info_xml('title') # prefer to get the untranslated original title
505 or get_video_info_web(['originalTitle', 'title'])
506 or self._og_search_title(webpage, default=None)
507 or self._html_search_regex(
508 r'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
509 webpage, 'video title'))
510
511 watch_api_data_string = self._html_search_regex(
512 r'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
513 webpage, 'watch api data', default=None)
514 watch_api_data = self._parse_json(watch_api_data_string, video_id) if watch_api_data_string else {}
515 video_detail = watch_api_data.get('videoDetail', {})
516
517 thumbnail = (
518 self._html_search_regex(r'<meta property="og:image" content="([^"]+)">', webpage, 'thumbnail data', default=None)
519 or dict_get( # choose highest from 720p to 240p
520 get_video_info_web('thumbnail'),
521 ['ogp', 'player', 'largeUrl', 'middleUrl', 'url'])
522 or self._html_search_meta('image', webpage, 'thumbnail', default=None)
523 or video_detail.get('thumbnail'))
524
525 description = get_video_info_web('description')
526
527 if not timestamp:
528 match = self._html_search_meta('datePublished', webpage, 'date published', default=None)
529 if match:
530 timestamp = parse_iso8601(match.replace('+', ':00+'))
531 if not timestamp and video_detail.get('postedAt'):
532 timestamp = parse_iso8601(
533 video_detail['postedAt'].replace('/', '-'),
534 delimiter=' ', timezone=datetime.timedelta(hours=9))
535 timestamp = timestamp or try_get(api_data, lambda x: parse_iso8601(x['video']['registeredAt']))
536
537 view_count = int_or_none(get_video_info_web(['view_counter', 'viewCount']))
538 if not view_count:
539 match = self._html_search_regex(
540 r'>Views: <strong[^>]*>([^<]+)</strong>',
541 webpage, 'view count', default=None)
542 if match:
543 view_count = int_or_none(match.replace(',', ''))
544 view_count = (
545 view_count
546 or video_detail.get('viewCount')
547 or try_get(api_data, lambda x: x['video']['count']['view']))
548
549 comment_count = (
550 int_or_none(get_video_info_web('comment_num'))
551 or video_detail.get('commentCount')
552 or try_get(api_data, lambda x: x['video']['count']['comment']))
553
554 if not comment_count:
555 match = self._html_search_regex(
556 r'>Comments: <strong[^>]*>([^<]+)</strong>',
557 webpage, 'comment count', default=None)
558 if match:
559 comment_count = int_or_none(match.replace(',', ''))
560
561 duration = (parse_duration(
562 get_video_info_web('length')
563 or self._html_search_meta(
564 'video:duration', webpage, 'video duration', default=None))
565 or video_detail.get('length')
566 or get_video_info_web('duration'))
567
568 webpage_url = get_video_info_web('watch_url') or url
569
570 # for channel movie and community movie
571 channel_id = try_get(
572 api_data,
573 (lambda x: x['channel']['globalId'],
574 lambda x: x['community']['globalId']))
575 channel = try_get(
576 api_data,
577 (lambda x: x['channel']['name'],
578 lambda x: x['community']['name']))
579
580 # Note: cannot use api_data.get('owner', {}) because owner may be set to "null"
581 # in the JSON, which will cause None to be returned instead of {}.
582 owner = try_get(api_data, lambda x: x.get('owner'), dict) or {}
583 uploader_id = str_or_none(
584 get_video_info_web(['ch_id', 'user_id'])
585 or owner.get('id')
586 or channel_id
587 )
588 uploader = (
589 get_video_info_web(['ch_name', 'user_nickname'])
590 or owner.get('nickname')
591 or channel
592 )
593
594 return {
595 'id': video_id,
596 '_api_data': api_data,
597 'title': title,
598 'formats': formats,
599 'thumbnail': thumbnail,
600 'description': description,
601 'uploader': uploader,
602 'timestamp': timestamp,
603 'uploader_id': uploader_id,
604 'channel': channel,
605 'channel_id': channel_id,
606 'view_count': view_count,
607 'comment_count': comment_count,
608 'duration': duration,
609 'webpage_url': webpage_url,
610 }
611
612
613 class NiconicoPlaylistBaseIE(InfoExtractor):
614 _PAGE_SIZE = 100
615
616 _API_HEADERS = {
617 'X-Frontend-ID': '6',
618 'X-Frontend-Version': '0',
619 'X-Niconico-Language': 'en-us'
620 }
621
622 def _call_api(self, list_id, resource, query):
623 "Implement this in child class"
624 pass
625
626 @staticmethod
627 def _parse_owner(item):
628 return {
629 'uploader': traverse_obj(item, ('owner', 'name')),
630 'uploader_id': traverse_obj(item, ('owner', 'id')),
631 }
632
633 def _fetch_page(self, list_id, page):
634 page += 1
635 resp = self._call_api(list_id, 'page %d' % page, {
636 'page': page,
637 'pageSize': self._PAGE_SIZE,
638 })
639 # this is needed to support both mylist and user
640 for video in traverse_obj(resp, ('items', ..., ('video', None))) or []:
641 video_id = video.get('id')
642 if not video_id:
643 # skip {"video": {"id": "blablabla", ...}}
644 continue
645 count = video.get('count') or {}
646 get_count = lambda x: int_or_none(count.get(x))
647 yield {
648 '_type': 'url',
649 'id': video_id,
650 'title': video.get('title'),
651 'url': f'https://www.nicovideo.jp/watch/{video_id}',
652 'description': video.get('shortDescription'),
653 'duration': int_or_none(video.get('duration')),
654 'view_count': get_count('view'),
655 'comment_count': get_count('comment'),
656 'thumbnail': traverse_obj(video, ('thumbnail', ('nHdUrl', 'largeUrl', 'listingUrl', 'url'))),
657 'ie_key': NiconicoIE.ie_key(),
658 **self._parse_owner(video),
659 }
660
661 def _entries(self, list_id):
662 return OnDemandPagedList(functools.partial(self._fetch_page, list_id), self._PAGE_SIZE)
663
664
665 class NiconicoPlaylistIE(NiconicoPlaylistBaseIE):
666 IE_NAME = 'niconico:playlist'
667 _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp|nico\.ms)/(?:user/\d+/)?(?:my/)?mylist/(?:#/)?(?P<id>\d+)'
668
669 _TESTS = [{
670 'url': 'http://www.nicovideo.jp/mylist/27411728',
671 'info_dict': {
672 'id': '27411728',
673 'title': 'AKB48のオールナイトニッポン',
674 'description': 'md5:d89694c5ded4b6c693dea2db6e41aa08',
675 'uploader': 'のっく',
676 'uploader_id': '805442',
677 },
678 'playlist_mincount': 291,
679 }, {
680 'url': 'https://www.nicovideo.jp/user/805442/mylist/27411728',
681 'only_matching': True,
682 }, {
683 'url': 'https://www.nicovideo.jp/my/mylist/#/68048635',
684 'only_matching': True,
685 }]
686
687 def _call_api(self, list_id, resource, query):
688 return self._download_json(
689 f'https://nvapi.nicovideo.jp/v2/mylists/{list_id}', list_id,
690 f'Downloading {resource}', query=query,
691 headers=self._API_HEADERS)['data']['mylist']
692
693 def _real_extract(self, url):
694 list_id = self._match_id(url)
695 mylist = self._call_api(list_id, 'list', {
696 'pageSize': 1,
697 })
698 return self.playlist_result(
699 self._entries(list_id), list_id,
700 mylist.get('name'), mylist.get('description'), **self._parse_owner(mylist))
701
702
703 class NiconicoSeriesIE(InfoExtractor):
704 IE_NAME = 'niconico:series'
705 _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp|nico\.ms)/series/(?P<id>\d+)'
706
707 _TESTS = [{
708 'url': 'https://www.nicovideo.jp/series/110226',
709 'info_dict': {
710 'id': '110226',
711 'title': 'ご立派ァ!のシリーズ',
712 },
713 'playlist_mincount': 10, # as of 2021/03/17
714 }, {
715 'url': 'https://www.nicovideo.jp/series/12312/',
716 'info_dict': {
717 'id': '12312',
718 'title': 'バトルスピリッツ お勧めカード紹介(調整中)',
719 },
720 'playlist_mincount': 97, # as of 2021/03/17
721 }, {
722 'url': 'https://nico.ms/series/203559',
723 'only_matching': True,
724 }]
725
726 def _real_extract(self, url):
727 list_id = self._match_id(url)
728 webpage = self._download_webpage(f'https://www.nicovideo.jp/series/{list_id}', list_id)
729
730 title = self._search_regex(
731 (r'<title>「(.+)(全',
732 r'<div class="TwitterShareButton"\s+data-text="(.+)\s+https:'),
733 webpage, 'title', fatal=False)
734 if title:
735 title = unescapeHTML(title)
736 playlist = [
737 self.url_result(f'https://www.nicovideo.jp/watch/{v_id}', video_id=v_id)
738 for v_id in re.findall(r'href="/watch/([a-z0-9]+)" data-href="/watch/\1', webpage)]
739 return self.playlist_result(playlist, list_id, title)
740
741
742 class NiconicoHistoryIE(NiconicoPlaylistBaseIE):
743 IE_NAME = 'niconico:history'
744 IE_DESC = 'NicoNico user history. Requires cookies.'
745 _VALID_URL = r'https?://(?:www\.|sp\.)?nicovideo\.jp/my/history'
746
747 _TESTS = [{
748 'note': 'PC page, with /video',
749 'url': 'https://www.nicovideo.jp/my/history/video',
750 'only_matching': True,
751 }, {
752 'note': 'PC page, without /video',
753 'url': 'https://www.nicovideo.jp/my/history',
754 'only_matching': True,
755 }, {
756 'note': 'mobile page, with /video',
757 'url': 'https://sp.nicovideo.jp/my/history/video',
758 'only_matching': True,
759 }, {
760 'note': 'mobile page, without /video',
761 'url': 'https://sp.nicovideo.jp/my/history',
762 'only_matching': True,
763 }]
764
765 def _call_api(self, list_id, resource, query):
766 return self._download_json(
767 'https://nvapi.nicovideo.jp/v1/users/me/watch/history', 'history',
768 f'Downloading {resource}', query=query,
769 headers=self._API_HEADERS)['data']
770
771 def _real_extract(self, url):
772 list_id = 'history'
773 try:
774 mylist = self._call_api(list_id, 'list', {
775 'pageSize': 1,
776 })
777 except ExtractorError as e:
778 if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
779 self.raise_login_required('You have to be logged in to get your watch history')
780 raise
781 return self.playlist_result(self._entries(list_id), list_id, **self._parse_owner(mylist))
782
783
784 class NicovideoSearchBaseIE(InfoExtractor):
785 _SEARCH_TYPE = 'search'
786
787 def _entries(self, url, item_id, query=None, note='Downloading page %(page)s'):
788 query = query or {}
789 pages = [query['page']] if 'page' in query else itertools.count(1)
790 for page_num in pages:
791 query['page'] = str(page_num)
792 webpage = self._download_webpage(url, item_id, query=query, note=note % {'page': page_num})
793 results = re.findall(r'(?<=data-video-id=)["\']?(?P<videoid>.*?)(?=["\'])', webpage)
794 for item in results:
795 yield self.url_result(f'http://www.nicovideo.jp/watch/{item}', 'Niconico', item)
796 if not results:
797 break
798
799 def _search_results(self, query):
800 return self._entries(
801 self._proto_relative_url(f'//www.nicovideo.jp/{self._SEARCH_TYPE}/{query}'), query)
802
803
804 class NicovideoSearchIE(NicovideoSearchBaseIE, SearchInfoExtractor):
805 IE_DESC = 'Nico video search'
806 IE_NAME = 'nicovideo:search'
807 _SEARCH_KEY = 'nicosearch'
808
809
810 class NicovideoSearchURLIE(NicovideoSearchBaseIE):
811 IE_NAME = f'{NicovideoSearchIE.IE_NAME}_url'
812 IE_DESC = 'Nico video search URLs'
813 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/search/(?P<id>[^?#&]+)?'
814 _TESTS = [{
815 'url': 'http://www.nicovideo.jp/search/sm9',
816 'info_dict': {
817 'id': 'sm9',
818 'title': 'sm9'
819 },
820 'playlist_mincount': 40,
821 }, {
822 'url': 'https://www.nicovideo.jp/search/sm9?sort=h&order=d&end=2020-12-31&start=2020-01-01',
823 'info_dict': {
824 'id': 'sm9',
825 'title': 'sm9'
826 },
827 'playlist_count': 31,
828 }]
829
830 def _real_extract(self, url):
831 query = self._match_id(url)
832 return self.playlist_result(self._entries(url, query), query, query)
833
834
835 class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor):
836 IE_DESC = 'Nico video search, newest first'
837 IE_NAME = f'{NicovideoSearchIE.IE_NAME}:date'
838 _SEARCH_KEY = 'nicosearchdate'
839 _TESTS = [{
840 'url': 'nicosearchdateall:a',
841 'info_dict': {
842 'id': 'a',
843 'title': 'a'
844 },
845 'playlist_mincount': 1610,
846 }]
847
848 _START_DATE = datetime.date(2007, 1, 1)
849 _RESULTS_PER_PAGE = 32
850 _MAX_PAGES = 50
851
852 def _entries(self, url, item_id, start_date=None, end_date=None):
853 start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date()
854
855 # If the last page has a full page of videos, we need to break down the query interval further
856 last_page_len = len(list(self._get_entries_for_date(
857 url, item_id, start_date, end_date, self._MAX_PAGES,
858 note=f'Checking number of videos from {start_date} to {end_date}')))
859 if (last_page_len == self._RESULTS_PER_PAGE and start_date != end_date):
860 midpoint = start_date + ((end_date - start_date) // 2)
861 yield from self._entries(url, item_id, midpoint, end_date)
862 yield from self._entries(url, item_id, start_date, midpoint)
863 else:
864 self.to_screen(f'{item_id}: Downloading results from {start_date} to {end_date}')
865 yield from self._get_entries_for_date(
866 url, item_id, start_date, end_date, note=' Downloading page %(page)s')
867
868 def _get_entries_for_date(self, url, item_id, start_date, end_date=None, page_num=None, note=None):
869 query = {
870 'start': str(start_date),
871 'end': str(end_date or start_date),
872 'sort': 'f',
873 'order': 'd',
874 }
875 if page_num:
876 query['page'] = str(page_num)
877
878 yield from super()._entries(url, item_id, query=query, note=note)
879
880
881 class NicovideoTagURLIE(NicovideoSearchBaseIE):
882 IE_NAME = 'niconico:tag'
883 IE_DESC = 'NicoNico video tag URLs'
884 _SEARCH_TYPE = 'tag'
885 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/tag/(?P<id>[^?#&]+)?'
886 _TESTS = [{
887 'url': 'https://www.nicovideo.jp/tag/ドキュメンタリー淫夢',
888 'info_dict': {
889 'id': 'ドキュメンタリー淫夢',
890 'title': 'ドキュメンタリー淫夢'
891 },
892 'playlist_mincount': 400,
893 }]
894
895 def _real_extract(self, url):
896 query = self._match_id(url)
897 return self.playlist_result(self._entries(url, query), query, query)
898
899
900 class NiconicoUserIE(InfoExtractor):
901 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/user/(?P<id>\d+)/?(?:$|[#?])'
902 _TEST = {
903 'url': 'https://www.nicovideo.jp/user/419948',
904 'info_dict': {
905 'id': '419948',
906 },
907 'playlist_mincount': 101,
908 }
909 _API_URL = "https://nvapi.nicovideo.jp/v1/users/%s/videos?sortKey=registeredAt&sortOrder=desc&pageSize=%s&page=%s"
910 _PAGE_SIZE = 100
911
912 _API_HEADERS = {
913 'X-Frontend-ID': '6',
914 'X-Frontend-Version': '0'
915 }
916
917 def _entries(self, list_id):
918 total_count = 1
919 count = page_num = 0
920 while count < total_count:
921 json_parsed = self._download_json(
922 self._API_URL % (list_id, self._PAGE_SIZE, page_num + 1), list_id,
923 headers=self._API_HEADERS,
924 note='Downloading JSON metadata%s' % (' page %d' % page_num if page_num else ''))
925 if not page_num:
926 total_count = int_or_none(json_parsed['data'].get('totalCount'))
927 for entry in json_parsed["data"]["items"]:
928 count += 1
929 yield self.url_result('https://www.nicovideo.jp/watch/%s' % entry['id'])
930 page_num += 1
931
932 def _real_extract(self, url):
933 list_id = self._match_id(url)
934 return self.playlist_result(self._entries(list_id), list_id, ie=NiconicoIE.ie_key())