2 from __future__
import unicode_literals
10 from .common
import InfoExtractor
, SearchInfoExtractor
11 from ..postprocessor
.ffmpeg
import FFmpegPostProcessor
12 from ..compat
import (
15 compat_urllib_parse_urlparse
,
38 class NiconicoIE(InfoExtractor
):
43 'url': 'http://www.nicovideo.jp/watch/sm22312215',
44 'md5': 'a5bad06f1347452102953f323c69da34s',
48 'title': 'Big Buck Bunny',
49 'thumbnail': r
're:https?://.*',
50 'uploader': 'takuya0301',
51 'uploader_id': '2698420',
52 'upload_date': '20131123',
53 'timestamp': int, # timestamp is unstable
54 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
59 'skip': 'Requires an account',
61 # File downloaded with and without credentials are different, so omit
63 'url': 'http://www.nicovideo.jp/watch/nm14296458',
67 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
68 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
69 'thumbnail': r
're:https?://.*',
71 'uploader_id': '18822557',
72 'upload_date': '20110429',
73 'timestamp': 1304065916,
76 'skip': 'Requires an account',
78 # 'video exists but is marked as "deleted"
80 'url': 'http://www.nicovideo.jp/watch/sm10000',
83 'ext': 'unknown_video',
84 'description': 'deleted',
85 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
86 'thumbnail': r
're:https?://.*',
87 'upload_date': '20071224',
88 'timestamp': int, # timestamp field has different value if logged in
92 'skip': 'Requires an account',
94 'url': 'http://www.nicovideo.jp/watch/so22543406',
98 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
99 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
100 'thumbnail': r
're:https?://.*',
101 'timestamp': 1388851200,
102 'upload_date': '20140104',
103 'uploader': 'アニメロチャンネル',
104 'uploader_id': '312',
106 'skip': 'The viewing period of the video you were searching for has expired.',
108 # video not available via `getflv`; "old" HTML5 video
109 'url': 'http://www.nicovideo.jp/watch/sm1151009',
110 'md5': '8fa81c364eb619d4085354eab075598a',
114 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
115 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7',
116 'thumbnail': r
're:https?://.*',
118 'timestamp': 1190868283,
119 'upload_date': '20070927',
120 'uploader': 'denden2',
121 'uploader_id': '1392194',
123 'comment_count': int,
125 'skip': 'Requires an account',
129 'url': 'http://www.nicovideo.jp/watch/sm31464864',
133 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
134 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
135 'timestamp': 1498514060,
136 'upload_date': '20170626',
138 'uploader_id': '40826363',
139 'thumbnail': r
're:https?://.*',
142 'comment_count': int,
144 'skip': 'Requires an account',
146 # Video without owner
147 'url': 'http://www.nicovideo.jp/watch/sm18238488',
148 'md5': 'd265680a1f92bdcbbd2a507fc9e78a9e',
152 'title': '【実写版】ミュータントタートルズ',
153 'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
154 'timestamp': 1341160408,
155 'upload_date': '20120701',
158 'thumbnail': r
're:https?://.*',
161 'comment_count': int,
163 'skip': 'Requires an account',
165 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
166 'only_matching': True,
169 _VALID_URL
= r
'https?://(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
170 _NETRC_MACHINE
= 'niconico'
173 'X-Frontend-ID': '6',
174 'X-Frontend-Version': '0'
177 def _real_initialize(self
):
181 username
, password
= self
._get
_login
_info
()
182 # No authentication to be performed
189 'mail_tel': username
,
190 'password': password
,
192 urlh
= self
._request
_webpage
(
193 'https://account.nicovideo.jp/api/v1/login', None,
194 note
='Logging in', errnote
='Unable to log in',
195 data
=urlencode_postdata(login_form_strs
))
199 parts
= compat_urllib_parse_urlparse(urlh
.geturl())
200 if compat_parse_qs(parts
.query
).get('message', [None])[0] == 'cant_login':
203 self
.report_warning('unable to log in: bad username or password')
206 def _get_heartbeat_info(self
, info_dict
):
208 video_id
, video_src_id
, audio_src_id
= info_dict
['url'].split(':')[1].split('/')
211 info_dict
.get('_api_data')
213 self
._html
_search
_regex
(
214 'data-api-data="([^"]+)"',
215 self
._download
_webpage
('http://www.nicovideo.jp/watch/' + video_id
, video_id
),
216 'API data', default
='{}'),
219 session_api_data
= try_get(api_data
, lambda x
: x
['media']['delivery']['movie']['session'])
220 session_api_endpoint
= try_get(session_api_data
, lambda x
: x
['urls'][0])
225 'https://nvapi.nicovideo.jp/v1/2ab0cbaa/watch', video_id
,
226 query
={'t': try_get(api_data, lambda x: x['media']['delivery']['trackingId'])}
,
227 note
='Acquiring permission for downloading video',
228 headers
=self
._API
_HEADERS
),
229 lambda x
: x
['meta']['status'])
231 self
.report_warning('Failed to acquire permission for playing video. The video may not download.')
233 yesno
= lambda x
: 'yes' if x
else 'no'
236 if try_get(api_data
, lambda x
: x
['media']['delivery']['encryption']) is not None:
238 encryption
= self
._parse
_json
(session_api_data
['token'], video_id
)['hls_encryption']
239 session_api_http_parameters
= {
244 'encrypted_key': try_get(api_data
, lambda x
: x
['media']['delivery']['encryption']['encryptedKey']),
245 'key_uri': try_get(api_data
, lambda x
: x
['media']['delivery']['encryption']['keyUri'])
248 'transfer_preset': '',
249 'use_ssl': yesno(session_api_endpoint
['isSsl']),
250 'use_well_known_port': yesno(session_api_endpoint
['isWellKnownPort']),
251 'segment_duration': 6000,
258 session_api_http_parameters
= {
260 'http_output_download_parameters': {
261 'use_ssl': yesno(session_api_endpoint
['isSsl']),
262 'use_well_known_port': yesno(session_api_endpoint
['isWellKnownPort']),
267 session_response
= self
._download
_json
(
268 session_api_endpoint
['url'], video_id
,
269 query
={'_format': 'json'}
,
270 headers
={'Content-Type': 'application/json'}
,
271 note
='Downloading JSON metadata for %s' % info_dict
['format_id'],
275 'player_id': session_api_data
.get('playerId'),
278 'auth_type': try_get(session_api_data
, lambda x
: x
['authTypes'][session_api_data
['protocols'][0]]),
279 'content_key_timeout': session_api_data
.get('contentKeyTimeout'),
280 'service_id': 'nicovideo',
281 'service_user_id': session_api_data
.get('serviceUserId')
283 'content_id': session_api_data
.get('contentId'),
284 'content_src_id_sets': [{
285 'content_src_ids': [{
287 'audio_src_ids': [audio_src_id
],
288 'video_src_ids': [video_src_id
],
292 'content_type': 'movie',
296 'lifetime': session_api_data
.get('heartbeatLifetime')
299 'priority': session_api_data
.get('priority'),
303 'http_parameters': session_api_http_parameters
306 'recipe_id': session_api_data
.get('recipeId'),
307 'session_operation_auth': {
308 'session_operation_auth_by_signature': {
309 'signature': session_api_data
.get('signature'),
310 'token': session_api_data
.get('token'),
313 'timing_constraint': 'unlimited'
317 info_dict
['url'] = session_response
['data']['session']['content_uri']
318 info_dict
['protocol'] = protocol
321 heartbeat_info_dict
= {
322 'url': session_api_endpoint
['url'] + '/' + session_response
['data']['session']['id'] + '?_format=json&_method=PUT',
323 'data': json
.dumps(session_response
['data']),
324 # interval, convert milliseconds to seconds, then halve to make a buffer.
325 'interval': float_or_none(session_api_data
.get('heartbeatLifetime'), scale
=3000),
329 return info_dict
, heartbeat_info_dict
331 def _extract_format_for_quality(self
, api_data
, video_id
, audio_quality
, video_quality
):
332 def parse_format_id(id_code
):
333 mobj
= re
.match(r
'''(?x)
335 (?:(?P<codec>[^_]+)_)?
336 (?:(?P<br>[\d]+)kbps_)?
337 (?:(?P<res>[\d+]+)p_)?
338 ''', '%s_' % id_code
)
339 return mobj
.groupdict() if mobj
else {}
341 protocol
= 'niconico_dmc'
342 format_id
= '-'.join(map(lambda s
: remove_start(s
['id'], 'archive_'), [video_quality
, audio_quality
]))
343 vdict
= parse_format_id(video_quality
['id'])
344 adict
= parse_format_id(audio_quality
['id'])
345 resolution
= try_get(video_quality
, lambda x
: x
['metadata']['resolution'], dict) or {'height': vdict.get('res')}
346 vbr
= try_get(video_quality
, lambda x
: x
['metadata']['bitrate'], float)
349 'url': '%s:%s/%s/%s' % (protocol
, video_id
, video_quality
['id'], audio_quality
['id']),
350 'format_id': format_id
,
351 'format_note': 'DMC %s' % try_get(video_quality
, lambda x
: x
['metadata']['label'], compat_str
),
352 'ext': 'mp4', # Session API are used in HTML5, which always serves mp4
353 'vcodec': vdict
.get('codec'),
354 'acodec': adict
.get('codec'),
355 'vbr': float_or_none(vbr
, 1000) or float_or_none(vdict
.get('br')),
356 'abr': float_or_none(audio_quality
.get('bitrate'), 1000) or float_or_none(adict
.get('br')),
357 'height': int_or_none(resolution
.get('height', vdict
.get('res'))),
358 'width': int_or_none(resolution
.get('width')),
359 'quality': -2 if 'low' in format_id
else -1, # Default quality value is -1
360 'protocol': protocol
,
362 'Origin': 'https://www.nicovideo.jp',
363 'Referer': 'https://www.nicovideo.jp/watch/' + video_id
,
367 def _real_extract(self
, url
):
368 video_id
= self
._match
_id
(url
)
370 # Get video webpage for API data.
371 webpage
, handle
= self
._download
_webpage
_handle
(
372 'http://www.nicovideo.jp/watch/' + video_id
, video_id
)
373 if video_id
.startswith('so'):
374 video_id
= self
._match
_id
(handle
.geturl())
376 api_data
= self
._parse
_json
(self
._html
_search
_regex
(
377 'data-api-data="([^"]+)"', webpage
,
378 'API data', default
='{}'), video_id
)
380 def get_video_info_web(items
):
381 return dict_get(api_data
['video'], items
)
384 video_info_xml
= self
._download
_xml
(
385 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id
,
386 video_id
, note
='Downloading video info page')
388 def get_video_info_xml(items
):
389 if not isinstance(items
, list):
392 ret
= xpath_text(video_info_xml
, './/' + item
)
396 if get_video_info_xml('error'):
397 error_code
= get_video_info_xml('code')
399 if error_code
== 'DELETED':
400 raise ExtractorError('The video has been deleted.',
402 elif error_code
== 'NOT_FOUND':
403 raise ExtractorError('The video is not found.',
405 elif error_code
== 'COMMUNITY':
406 self
.to_screen('%s: The video is community members only.' % video_id
)
408 raise ExtractorError('%s reports error: %s' % (self
.IE_NAME
, error_code
))
410 # Start extracting video formats
413 # Get HTML5 videos info
414 quality_info
= try_get(api_data
, lambda x
: x
['media']['delivery']['movie'])
416 raise ExtractorError('The video can\'t be downloaded', expected
=True)
418 for audio_quality
in quality_info
.get('audios') or {}:
419 for video_quality
in quality_info
.get('videos') or {}:
420 if not audio_quality
.get('isAvailable') or not video_quality
.get('isAvailable'):
422 formats
.append(self
._extract
_format
_for
_quality
(
423 api_data
, video_id
, audio_quality
, video_quality
))
427 video_real_url
= try_get(api_data
, lambda x
: x
['video']['smileInfo']['url'])
429 is_economy
= video_real_url
.endswith('low')
432 self
.report_warning('Site is currently in economy mode! You will only have access to lower quality streams')
434 # Invoking ffprobe to determine resolution
435 pp
= FFmpegPostProcessor(self
._downloader
)
436 cookies
= self
._get
_cookies
('https://nicovideo.jp').output(header
='', sep
='; path=/; domain=nicovideo.jp;\n')
438 self
.to_screen('%s: %s' % (video_id
, 'Checking smile format with ffprobe'))
441 metadata
= pp
.get_metadata_object(video_real_url
, ['-cookies', cookies
])
442 except PostProcessingError
as err
:
443 raise ExtractorError(err
.msg
, expected
=True)
445 v_stream
= a_stream
= {}
447 # Some complex swf files doesn't have video stream (e.g. nm4809023)
448 for stream
in metadata
['streams']:
449 if stream
['codec_type'] == 'video':
451 elif stream
['codec_type'] == 'audio':
454 # Community restricted videos seem to have issues with the thumb API not returning anything at all
456 (get_video_info_xml('size_high') if not is_economy
else get_video_info_xml('size_low'))
457 or metadata
['format']['size']
460 get_video_info_xml('movie_type')
461 or 'mp4' if 'mp4' in metadata
['format']['format_name'] else metadata
['format']['format_name']
464 # 'creation_time' tag on video stream of re-encoded SMILEVIDEO mp4 files are '1970-01-01T00:00:00.000000Z'.
466 parse_iso8601(get_video_info_web('first_retrieve'))
467 or unified_timestamp(get_video_info_web('postedDateTime'))
469 metadata_timestamp
= (
470 parse_iso8601(try_get(v_stream
, lambda x
: x
['tags']['creation_time']))
471 or timestamp
if extension
!= 'mp4' else 0
474 # According to compconf, smile videos from pre-2017 are always better quality than their DMC counterparts
475 smile_threshold_timestamp
= parse_iso8601('2016-12-08T00:00:00+09:00')
477 is_source
= timestamp
< smile_threshold_timestamp
or metadata_timestamp
> 0
479 # If movie file size is unstable, old server movie is not source movie.
482 'url': video_real_url
,
483 'format_id': 'smile' if not is_economy
else 'smile_low',
484 'format_note': 'SMILEVIDEO source' if not is_economy
else 'SMILEVIDEO low quality',
486 'container': extension
,
487 'vcodec': v_stream
.get('codec_name'),
488 'acodec': a_stream
.get('codec_name'),
489 # Some complex swf files doesn't have total bit rate metadata (e.g. nm6049209)
490 'tbr': int_or_none(metadata
['format'].get('bit_rate'), scale
=1000),
491 'vbr': int_or_none(v_stream
.get('bit_rate'), scale
=1000),
492 'abr': int_or_none(a_stream
.get('bit_rate'), scale
=1000),
493 'height': int_or_none(v_stream
.get('height')),
494 'width': int_or_none(v_stream
.get('width')),
495 'source_preference': 5 if not is_economy
else -2,
496 'quality': 5 if is_source
and not is_economy
else None,
500 self
._sort
_formats
(formats
)
502 # Start extracting information
504 get_video_info_xml('title') # prefer to get the untranslated original title
505 or get_video_info_web(['originalTitle', 'title'])
506 or self
._og
_search
_title
(webpage
, default
=None)
507 or self
._html
_search
_regex
(
508 r
'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
509 webpage
, 'video title'))
511 watch_api_data_string
= self
._html
_search
_regex
(
512 r
'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
513 webpage
, 'watch api data', default
=None)
514 watch_api_data
= self
._parse
_json
(watch_api_data_string
, video_id
) if watch_api_data_string
else {}
515 video_detail
= watch_api_data
.get('videoDetail', {})
518 self
._html
_search
_regex
(r
'<meta property="og:image" content="([^"]+)">', webpage
, 'thumbnail data', default
=None)
519 or dict_get( # choose highest from 720p to 240p
520 get_video_info_web('thumbnail'),
521 ['ogp', 'player', 'largeUrl', 'middleUrl', 'url'])
522 or self
._html
_search
_meta
('image', webpage
, 'thumbnail', default
=None)
523 or video_detail
.get('thumbnail'))
525 description
= get_video_info_web('description')
528 match
= self
._html
_search
_meta
('datePublished', webpage
, 'date published', default
=None)
530 timestamp
= parse_iso8601(match
.replace('+', ':00+'))
531 if not timestamp
and video_detail
.get('postedAt'):
532 timestamp
= parse_iso8601(
533 video_detail
['postedAt'].replace('/', '-'),
534 delimiter
=' ', timezone
=datetime
.timedelta(hours
=9))
535 timestamp
= timestamp
or try_get(api_data
, lambda x
: parse_iso8601(x
['video']['registeredAt']))
537 view_count
= int_or_none(get_video_info_web(['view_counter', 'viewCount']))
539 match
= self
._html
_search
_regex
(
540 r
'>Views: <strong[^>]*>([^<]+)</strong>',
541 webpage
, 'view count', default
=None)
543 view_count
= int_or_none(match
.replace(',', ''))
546 or video_detail
.get('viewCount')
547 or try_get(api_data
, lambda x
: x
['video']['count']['view']))
550 int_or_none(get_video_info_web('comment_num'))
551 or video_detail
.get('commentCount')
552 or try_get(api_data
, lambda x
: x
['video']['count']['comment']))
554 if not comment_count
:
555 match
= self
._html
_search
_regex
(
556 r
'>Comments: <strong[^>]*>([^<]+)</strong>',
557 webpage
, 'comment count', default
=None)
559 comment_count
= int_or_none(match
.replace(',', ''))
561 duration
= (parse_duration(
562 get_video_info_web('length')
563 or self
._html
_search
_meta
(
564 'video:duration', webpage
, 'video duration', default
=None))
565 or video_detail
.get('length')
566 or get_video_info_web('duration'))
568 webpage_url
= get_video_info_web('watch_url') or url
570 # for channel movie and community movie
571 channel_id
= try_get(
573 (lambda x
: x
['channel']['globalId'],
574 lambda x
: x
['community']['globalId']))
577 (lambda x
: x
['channel']['name'],
578 lambda x
: x
['community']['name']))
580 # Note: cannot use api_data.get('owner', {}) because owner may be set to "null"
581 # in the JSON, which will cause None to be returned instead of {}.
582 owner
= try_get(api_data
, lambda x
: x
.get('owner'), dict) or {}
583 uploader_id
= str_or_none(
584 get_video_info_web(['ch_id', 'user_id'])
589 get_video_info_web(['ch_name', 'user_nickname'])
590 or owner
.get('nickname')
596 '_api_data': api_data
,
599 'thumbnail': thumbnail
,
600 'description': description
,
601 'uploader': uploader
,
602 'timestamp': timestamp
,
603 'uploader_id': uploader_id
,
605 'channel_id': channel_id
,
606 'view_count': view_count
,
607 'comment_count': comment_count
,
608 'duration': duration
,
609 'webpage_url': webpage_url
,
613 class NiconicoPlaylistBaseIE(InfoExtractor
):
617 'X-Frontend-ID': '6',
618 'X-Frontend-Version': '0',
619 'X-Niconico-Language': 'en-us'
622 def _call_api(self
, list_id
, resource
, query
):
623 "Implement this in child class"
627 def _parse_owner(item
):
629 'uploader': traverse_obj(item
, ('owner', 'name')),
630 'uploader_id': traverse_obj(item
, ('owner', 'id')),
633 def _fetch_page(self
, list_id
, page
):
635 resp
= self
._call
_api
(list_id
, 'page %d' % page
, {
637 'pageSize': self
._PAGE
_SIZE
,
639 # this is needed to support both mylist and user
640 for video
in traverse_obj(resp
, ('items', ..., ('video', None))) or []:
641 video_id
= video
.get('id')
643 # skip {"video": {"id": "blablabla", ...}}
645 count
= video
.get('count') or {}
646 get_count
= lambda x
: int_or_none(count
.get(x
))
650 'title': video
.get('title'),
651 'url': f
'https://www.nicovideo.jp/watch/{video_id}',
652 'description': video
.get('shortDescription'),
653 'duration': int_or_none(video
.get('duration')),
654 'view_count': get_count('view'),
655 'comment_count': get_count('comment'),
656 'thumbnail': traverse_obj(video
, ('thumbnail', ('nHdUrl', 'largeUrl', 'listingUrl', 'url'))),
657 'ie_key': NiconicoIE
.ie_key(),
658 **self
._parse
_owner
(video
),
661 def _entries(self
, list_id
):
662 return OnDemandPagedList(functools
.partial(self
._fetch
_page
, list_id
), self
._PAGE
_SIZE
)
665 class NiconicoPlaylistIE(NiconicoPlaylistBaseIE
):
666 IE_NAME
= 'niconico:playlist'
667 _VALID_URL
= r
'https?://(?:(?:www\.|sp\.)?nicovideo\.jp|nico\.ms)/(?:user/\d+/)?(?:my/)?mylist/(?:#/)?(?P<id>\d+)'
670 'url': 'http://www.nicovideo.jp/mylist/27411728',
673 'title': 'AKB48のオールナイトニッポン',
674 'description': 'md5:d89694c5ded4b6c693dea2db6e41aa08',
676 'uploader_id': '805442',
678 'playlist_mincount': 291,
680 'url': 'https://www.nicovideo.jp/user/805442/mylist/27411728',
681 'only_matching': True,
683 'url': 'https://www.nicovideo.jp/my/mylist/#/68048635',
684 'only_matching': True,
687 def _call_api(self
, list_id
, resource
, query
):
688 return self
._download
_json
(
689 f
'https://nvapi.nicovideo.jp/v2/mylists/{list_id}', list_id
,
690 f
'Downloading {resource}', query
=query
,
691 headers
=self
._API
_HEADERS
)['data']['mylist']
693 def _real_extract(self
, url
):
694 list_id
= self
._match
_id
(url
)
695 mylist
= self
._call
_api
(list_id
, 'list', {
698 return self
.playlist_result(
699 self
._entries
(list_id
), list_id
,
700 mylist
.get('name'), mylist
.get('description'), **self
._parse
_owner
(mylist
))
703 class NiconicoSeriesIE(InfoExtractor
):
704 IE_NAME
= 'niconico:series'
705 _VALID_URL
= r
'https?://(?:(?:www\.|sp\.)?nicovideo\.jp|nico\.ms)/series/(?P<id>\d+)'
708 'url': 'https://www.nicovideo.jp/series/110226',
711 'title': 'ご立派ァ!のシリーズ',
713 'playlist_mincount': 10, # as of 2021/03/17
715 'url': 'https://www.nicovideo.jp/series/12312/',
718 'title': 'バトルスピリッツ お勧めカード紹介(調整中)',
720 'playlist_mincount': 97, # as of 2021/03/17
722 'url': 'https://nico.ms/series/203559',
723 'only_matching': True,
726 def _real_extract(self
, url
):
727 list_id
= self
._match
_id
(url
)
728 webpage
= self
._download
_webpage
(f
'https://www.nicovideo.jp/series/{list_id}', list_id
)
730 title
= self
._search
_regex
(
732 r
'<div class="TwitterShareButton"\s+data-text="(.+)\s+https:'),
733 webpage
, 'title', fatal
=False)
735 title
= unescapeHTML(title
)
737 self
.url_result(f
'https://www.nicovideo.jp/watch/{v_id}', video_id
=v_id
)
738 for v_id
in re
.findall(r
'href="/watch/([a-z0-9]+)" data-href="/watch/\1', webpage
)]
739 return self
.playlist_result(playlist
, list_id
, title
)
742 class NiconicoHistoryIE(NiconicoPlaylistBaseIE
):
743 IE_NAME
= 'niconico:history'
744 IE_DESC
= 'NicoNico user history. Requires cookies.'
745 _VALID_URL
= r
'https?://(?:www\.|sp\.)?nicovideo\.jp/my/history'
748 'note': 'PC page, with /video',
749 'url': 'https://www.nicovideo.jp/my/history/video',
750 'only_matching': True,
752 'note': 'PC page, without /video',
753 'url': 'https://www.nicovideo.jp/my/history',
754 'only_matching': True,
756 'note': 'mobile page, with /video',
757 'url': 'https://sp.nicovideo.jp/my/history/video',
758 'only_matching': True,
760 'note': 'mobile page, without /video',
761 'url': 'https://sp.nicovideo.jp/my/history',
762 'only_matching': True,
765 def _call_api(self
, list_id
, resource
, query
):
766 return self
._download
_json
(
767 'https://nvapi.nicovideo.jp/v1/users/me/watch/history', 'history',
768 f
'Downloading {resource}', query
=query
,
769 headers
=self
._API
_HEADERS
)['data']
771 def _real_extract(self
, url
):
774 mylist
= self
._call
_api
(list_id
, 'list', {
777 except ExtractorError
as e
:
778 if isinstance(e
.cause
, compat_HTTPError
) and e
.cause
.code
== 401:
779 self
.raise_login_required('You have to be logged in to get your watch history')
781 return self
.playlist_result(self
._entries
(list_id
), list_id
, **self
._parse
_owner
(mylist
))
784 class NicovideoSearchBaseIE(InfoExtractor
):
785 _SEARCH_TYPE
= 'search'
787 def _entries(self
, url
, item_id
, query
=None, note
='Downloading page %(page)s'):
789 pages
= [query
['page']] if 'page' in query
else itertools
.count(1)
790 for page_num
in pages
:
791 query
['page'] = str(page_num
)
792 webpage
= self
._download
_webpage
(url
, item_id
, query
=query
, note
=note
% {'page': page_num}
)
793 results
= re
.findall(r
'(?<=data-video-id=)["\']?
(?P
<videoid
>.*?
)(?
=["\'])', webpage)
795 yield self.url_result(f'http://www.nicovideo.jp/watch/{item}', 'Niconico', item)
799 def _search_results(self, query):
800 return self._entries(
801 self._proto_relative_url(f'//www.nicovideo.jp/{self._SEARCH_TYPE}/{query}'), query)
804 class NicovideoSearchIE(NicovideoSearchBaseIE, SearchInfoExtractor):
805 IE_DESC = 'Nico video search'
806 IE_NAME = 'nicovideo:search'
807 _SEARCH_KEY = 'nicosearch'
810 class NicovideoSearchURLIE(NicovideoSearchBaseIE):
811 IE_NAME = f'{NicovideoSearchIE.IE_NAME}_url'
812 IE_DESC = 'Nico video search URLs'
813 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/search/(?P<id>[^?#&]+)?'
815 'url': 'http://www.nicovideo.jp/search/sm9',
820 'playlist_mincount': 40,
822 'url': 'https://www.nicovideo.jp/search/sm9?sort=h&order=d&end=2020-12-31&start=2020-01-01',
827 'playlist_count': 31,
830 def _real_extract(self, url):
831 query = self._match_id(url)
832 return self.playlist_result(self._entries(url, query), query, query)
835 class NicovideoSearchDateIE(NicovideoSearchBaseIE, SearchInfoExtractor):
836 IE_DESC = 'Nico video search, newest first'
837 IE_NAME = f'{NicovideoSearchIE.IE_NAME}:date'
838 _SEARCH_KEY = 'nicosearchdate'
840 'url': 'nicosearchdateall:a',
845 'playlist_mincount': 1610,
848 _START_DATE = datetime.date(2007, 1, 1)
849 _RESULTS_PER_PAGE = 32
852 def _entries(self, url, item_id, start_date=None, end_date=None):
853 start_date, end_date = start_date or self._START_DATE, end_date or datetime.datetime.now().date()
855 # If the last page has a full page of videos, we need to break down the query interval further
856 last_page_len = len(list(self._get_entries_for_date(
857 url, item_id, start_date, end_date, self._MAX_PAGES,
858 note=f'Checking number of videos from {start_date} to {end_date}')))
859 if (last_page_len == self._RESULTS_PER_PAGE and start_date != end_date):
860 midpoint = start_date + ((end_date - start_date) // 2)
861 yield from self._entries(url, item_id, midpoint, end_date)
862 yield from self._entries(url, item_id, start_date, midpoint)
864 self.to_screen(f'{item_id}: Downloading results from {start_date} to {end_date}')
865 yield from self._get_entries_for_date(
866 url, item_id, start_date, end_date, note=' Downloading page %(page)s')
868 def _get_entries_for_date(self, url, item_id, start_date, end_date=None, page_num=None, note=None):
870 'start': str(start_date),
871 'end': str(end_date or start_date),
876 query['page'] = str(page_num)
878 yield from super()._entries(url, item_id, query=query, note=note)
881 class NicovideoTagURLIE(NicovideoSearchBaseIE):
882 IE_NAME = 'niconico:tag'
883 IE_DESC = 'NicoNico video tag URLs'
885 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/tag/(?P<id>[^?#&]+)?'
887 'url': 'https://www.nicovideo.jp/tag/ドキュメンタリー淫夢',
890 'title': 'ドキュメンタリー淫夢'
892 'playlist_mincount': 400,
895 def _real_extract(self, url):
896 query = self._match_id(url)
897 return self.playlist_result(self._entries(url, query), query, query)
900 class NiconicoUserIE(InfoExtractor):
901 _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/user/(?P<id>\d+)/?(?:$|[#?])'
903 'url': 'https://www.nicovideo.jp/user/419948',
907 'playlist_mincount': 101,
909 _API_URL = "https
://nvapi
.nicovideo
.jp
/v1
/users
/%s/videos?sortKey
=registeredAt
&sortOrder
=desc
&pageSize
=%s&page
=%s"
913 'X-Frontend-ID': '6',
914 'X-Frontend-Version': '0'
917 def _entries(self, list_id):
920 while count < total_count:
921 json_parsed = self._download_json(
922 self._API_URL % (list_id, self._PAGE_SIZE, page_num + 1), list_id,
923 headers=self._API_HEADERS,
924 note='Downloading JSON metadata%s' % (' page %d' % page_num if page_num else ''))
926 total_count = int_or_none(json_parsed['data'].get('totalCount'))
927 for entry in json_parsed["data
"]["items
"]:
929 yield self.url_result('https://www.nicovideo.jp/watch/%s' % entry['id'])
932 def _real_extract(self, url):
933 list_id = self._match_id(url)
934 return self.playlist_result(self._entries(list_id), list_id, ie=NiconicoIE.ie_key())