2 from __future__
import unicode_literals
9 from .common
import InfoExtractor
10 from ..compat
import (
12 compat_urllib_parse_urlparse
,
31 class NiconicoIE(InfoExtractor
):
36 'url': 'http://www.nicovideo.jp/watch/sm22312215',
37 'md5': 'd1a75c0823e2f629128c43e1212760f9',
41 'title': 'Big Buck Bunny',
42 'thumbnail': r
're:https?://.*',
43 'uploader': 'takuya0301',
44 'uploader_id': '2698420',
45 'upload_date': '20131123',
46 'timestamp': int, # timestamp is unstable
47 'description': '(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org',
52 'skip': 'Requires an account',
54 # File downloaded with and without credentials are different, so omit
56 'url': 'http://www.nicovideo.jp/watch/nm14296458',
60 'title': '【鏡音リン】Dance on media【オリジナル】take2!',
61 'description': 'md5:689f066d74610b3b22e0f1739add0f58',
62 'thumbnail': r
're:https?://.*',
64 'uploader_id': '18822557',
65 'upload_date': '20110429',
66 'timestamp': 1304065916,
69 'skip': 'Requires an account',
71 # 'video exists but is marked as "deleted"
73 'url': 'http://www.nicovideo.jp/watch/sm10000',
76 'ext': 'unknown_video',
77 'description': 'deleted',
78 'title': 'ドラえもんエターナル第3話「決戦第3新東京市」<前編>',
79 'thumbnail': r
're:https?://.*',
80 'upload_date': '20071224',
81 'timestamp': int, # timestamp field has different value if logged in
85 'skip': 'Requires an account',
87 'url': 'http://www.nicovideo.jp/watch/so22543406',
91 'title': '【第1回】RADIOアニメロミックス ラブライブ!~のぞえりRadio Garden~',
92 'description': 'md5:b27d224bb0ff53d3c8269e9f8b561cf1',
93 'thumbnail': r
're:https?://.*',
94 'timestamp': 1388851200,
95 'upload_date': '20140104',
96 'uploader': 'アニメロチャンネル',
99 'skip': 'The viewing period of the video you were searching for has expired.',
101 # video not available via `getflv`; "old" HTML5 video
102 'url': 'http://www.nicovideo.jp/watch/sm1151009',
103 'md5': '8fa81c364eb619d4085354eab075598a',
107 'title': 'マスターシステム本体内蔵のスペハリのメインテーマ(PSG版)',
108 'description': 'md5:6ee077e0581ff5019773e2e714cdd0b7',
109 'thumbnail': r
're:https?://.*',
111 'timestamp': 1190868283,
112 'upload_date': '20070927',
113 'uploader': 'denden2',
114 'uploader_id': '1392194',
116 'comment_count': int,
118 'skip': 'Requires an account',
122 'url': 'http://www.nicovideo.jp/watch/sm31464864',
126 'title': '新作TVアニメ「戦姫絶唱シンフォギアAXZ」PV 最高画質',
127 'description': 'md5:e52974af9a96e739196b2c1ca72b5feb',
128 'timestamp': 1498514060,
129 'upload_date': '20170626',
131 'uploader_id': '40826363',
132 'thumbnail': r
're:https?://.*',
135 'comment_count': int,
137 'skip': 'Requires an account',
139 # Video without owner
140 'url': 'http://www.nicovideo.jp/watch/sm18238488',
141 'md5': 'd265680a1f92bdcbbd2a507fc9e78a9e',
145 'title': '【実写版】ミュータントタートルズ',
146 'description': 'md5:15df8988e47a86f9e978af2064bf6d8e',
147 'timestamp': 1341160408,
148 'upload_date': '20120701',
151 'thumbnail': r
're:https?://.*',
154 'comment_count': int,
156 'skip': 'Requires an account',
158 'url': 'http://sp.nicovideo.jp/watch/sm28964488?ss_pos=1&cp_in=wt_tg',
159 'only_matching': True,
162 _VALID_URL
= r
'https?://(?:www\.|secure\.|sp\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
163 _NETRC_MACHINE
= 'niconico'
165 def _real_initialize(self
):
169 username
, password
= self
._get
_login
_info
()
170 # No authentication to be performed
177 'mail_tel': username
,
178 'password': password
,
180 urlh
= self
._request
_webpage
(
181 'https://account.nicovideo.jp/api/v1/login', None,
182 note
='Logging in', errnote
='Unable to log in',
183 data
=urlencode_postdata(login_form_strs
))
187 parts
= compat_urllib_parse_urlparse(urlh
.geturl())
188 if compat_parse_qs(parts
.query
).get('message', [None])[0] == 'cant_login':
191 self
._downloader
.report_warning('unable to log in: bad username or password')
194 def _extract_format_for_quality(self
, api_data
, video_id
, audio_quality
, video_quality
):
196 return 'yes' if boolean
else 'no'
198 session_api_data
= api_data
['video']['dmcInfo']['session_api']
199 session_api_endpoint
= session_api_data
['urls'][0]
201 format_id
= '-'.join(map(lambda s
: remove_start(s
['id'], 'archive_'), [video_quality
, audio_quality
]))
203 session_response
= self
._download
_json
(
204 session_api_endpoint
['url'], video_id
,
205 query
={'_format': 'json'}
,
206 headers
={'Content-Type': 'application/json'}
,
207 note
='Downloading JSON metadata for %s' % format_id
,
211 'player_id': session_api_data
['player_id'],
214 'auth_type': session_api_data
['auth_types'][session_api_data
['protocols'][0]],
215 'content_key_timeout': session_api_data
['content_key_timeout'],
216 'service_id': 'nicovideo',
217 'service_user_id': session_api_data
['service_user_id']
219 'content_id': session_api_data
['content_id'],
220 'content_src_id_sets': [{
221 'content_src_ids': [{
223 'audio_src_ids': [audio_quality
['id']],
224 'video_src_ids': [video_quality
['id']],
228 'content_type': 'movie',
232 'lifetime': session_api_data
['heartbeat_lifetime']
235 'priority': session_api_data
['priority'],
241 'http_output_download_parameters': {
242 'use_ssl': yesno(session_api_endpoint
['is_ssl']),
243 'use_well_known_port': yesno(session_api_endpoint
['is_well_known_port']),
249 'recipe_id': session_api_data
['recipe_id'],
250 'session_operation_auth': {
251 'session_operation_auth_by_signature': {
252 'signature': session_api_data
['signature'],
253 'token': session_api_data
['token'],
256 'timing_constraint': 'unlimited'
260 resolution
= video_quality
.get('resolution', {})
263 'url': session_response
['data']['session']['content_uri'],
264 'format_id': format_id
,
265 'ext': 'mp4', # Session API are used in HTML5, which always serves mp4
266 'abr': float_or_none(audio_quality
.get('bitrate'), 1000),
267 'vbr': float_or_none(video_quality
.get('bitrate'), 1000),
268 'height': resolution
.get('height'),
269 'width': resolution
.get('width'),
272 def _real_extract(self
, url
):
273 video_id
= self
._match
_id
(url
)
275 # Get video webpage. We are not actually interested in it for normal
276 # cases, but need the cookies in order to be able to download the
278 webpage
, handle
= self
._download
_webpage
_handle
(
279 'http://www.nicovideo.jp/watch/' + video_id
, video_id
)
280 if video_id
.startswith('so'):
281 video_id
= self
._match
_id
(handle
.geturl())
283 api_data
= self
._parse
_json
(self
._html
_search
_regex
(
284 'data-api-data="([^"]+)"', webpage
,
285 'API data', default
='{}'), video_id
)
287 def _format_id_from_url(video_url
):
288 return 'economy' if video_real_url
.endswith('low') else 'normal'
291 video_real_url
= api_data
['video']['smileInfo']['url']
292 except KeyError: # Flash videos
294 flv_info_webpage
= self
._download
_webpage
(
295 'http://flapi.nicovideo.jp/api/getflv/' + video_id
+ '?as3=1',
296 video_id
, 'Downloading flv info')
298 flv_info
= compat_parse_qs(flv_info_webpage
)
299 if 'url' not in flv_info
:
300 if 'deleted' in flv_info
:
301 raise ExtractorError('The video has been deleted.',
303 elif 'closed' in flv_info
:
304 raise ExtractorError('Niconico videos now require logging in',
306 elif 'error' in flv_info
:
307 raise ExtractorError('%s reports error: %s' % (
308 self
.IE_NAME
, flv_info
['error'][0]), expected
=True)
310 raise ExtractorError('Unable to find video URL')
312 video_info_xml
= self
._download
_xml
(
313 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id
,
314 video_id
, note
='Downloading video info page')
316 def get_video_info(items
):
317 if not isinstance(items
, list):
320 ret
= xpath_text(video_info_xml
, './/' + item
)
324 video_real_url
= flv_info
['url'][0]
326 extension
= get_video_info('movie_type')
328 extension
= determine_ext(video_real_url
)
331 'url': video_real_url
,
333 'format_id': _format_id_from_url(video_real_url
),
338 dmc_info
= api_data
['video'].get('dmcInfo')
339 if dmc_info
: # "New" HTML5 videos
340 quality_info
= dmc_info
['quality']
341 for audio_quality
in quality_info
['audios']:
342 for video_quality
in quality_info
['videos']:
343 if not audio_quality
['available'] or not video_quality
['available']:
345 formats
.append(self
._extract
_format
_for
_quality
(
346 api_data
, video_id
, audio_quality
, video_quality
))
348 self
._sort
_formats
(formats
)
349 else: # "Old" HTML5 videos
351 'url': video_real_url
,
353 'format_id': _format_id_from_url(video_real_url
),
356 def get_video_info(items
):
357 return dict_get(api_data
['video'], items
)
359 # Start extracting information
360 title
= get_video_info('title')
362 title
= self
._og
_search
_title
(webpage
, default
=None)
364 title
= self
._html
_search
_regex
(
365 r
'<span[^>]+class="videoHeaderTitle"[^>]*>([^<]+)</span>',
366 webpage
, 'video title')
368 watch_api_data_string
= self
._html
_search
_regex
(
369 r
'<div[^>]+id="watchAPIDataContainer"[^>]+>([^<]+)</div>',
370 webpage
, 'watch api data', default
=None)
371 watch_api_data
= self
._parse
_json
(watch_api_data_string
, video_id
) if watch_api_data_string
else {}
372 video_detail
= watch_api_data
.get('videoDetail', {})
375 get_video_info(['thumbnail_url', 'thumbnailURL'])
376 or self
._html
_search
_meta
('image', webpage
, 'thumbnail', default
=None)
377 or video_detail
.get('thumbnail'))
379 description
= get_video_info('description')
381 timestamp
= (parse_iso8601(get_video_info('first_retrieve'))
382 or unified_timestamp(get_video_info('postedDateTime')))
384 match
= self
._html
_search
_meta
('datePublished', webpage
, 'date published', default
=None)
386 timestamp
= parse_iso8601(match
.replace('+', ':00+'))
387 if not timestamp
and video_detail
.get('postedAt'):
388 timestamp
= parse_iso8601(
389 video_detail
['postedAt'].replace('/', '-'),
390 delimiter
=' ', timezone
=datetime
.timedelta(hours
=9))
392 view_count
= int_or_none(get_video_info(['view_counter', 'viewCount']))
394 match
= self
._html
_search
_regex
(
395 r
'>Views: <strong[^>]*>([^<]+)</strong>',
396 webpage
, 'view count', default
=None)
398 view_count
= int_or_none(match
.replace(',', ''))
399 view_count
= view_count
or video_detail
.get('viewCount')
401 comment_count
= (int_or_none(get_video_info('comment_num'))
402 or video_detail
.get('commentCount')
403 or try_get(api_data
, lambda x
: x
['thread']['commentCount']))
404 if not comment_count
:
405 match
= self
._html
_search
_regex
(
406 r
'>Comments: <strong[^>]*>([^<]+)</strong>',
407 webpage
, 'comment count', default
=None)
409 comment_count
= int_or_none(match
.replace(',', ''))
411 duration
= (parse_duration(
412 get_video_info('length')
413 or self
._html
_search
_meta
(
414 'video:duration', webpage
, 'video duration', default
=None))
415 or video_detail
.get('length')
416 or get_video_info('duration'))
418 webpage_url
= get_video_info('watch_url') or url
420 # Note: cannot use api_data.get('owner', {}) because owner may be set to "null"
421 # in the JSON, which will cause None to be returned instead of {}.
422 owner
= try_get(api_data
, lambda x
: x
.get('owner'), dict) or {}
423 uploader_id
= get_video_info(['ch_id', 'user_id']) or owner
.get('id')
424 uploader
= get_video_info(['ch_name', 'user_nickname']) or owner
.get('nickname')
430 'thumbnail': thumbnail
,
431 'description': description
,
432 'uploader': uploader
,
433 'timestamp': timestamp
,
434 'uploader_id': uploader_id
,
435 'view_count': view_count
,
436 'comment_count': comment_count
,
437 'duration': duration
,
438 'webpage_url': webpage_url
,
442 class NiconicoPlaylistIE(InfoExtractor
):
443 _VALID_URL
= r
'https?://(?:www\.)?nicovideo\.jp/(?:user/\d+/)?mylist/(?P<id>\d+)'
446 'url': 'http://www.nicovideo.jp/mylist/27411728',
449 'title': 'AKB48のオールナイトニッポン',
450 'description': 'md5:d89694c5ded4b6c693dea2db6e41aa08',
452 'uploader_id': '805442',
454 'playlist_mincount': 225,
456 'url': 'https://www.nicovideo.jp/user/805442/mylist/27411728',
457 'only_matching': True,
461 def _call_api(self
, list_id
, resource
, query
):
462 return self
._download
_json
(
463 'https://nvapi.nicovideo.jp/v2/mylists/' + list_id
, list_id
,
464 'Downloading %s JSON metatdata' % resource
, query
=query
,
465 headers
={'X-Frontend-Id': 6}
)['data']['mylist']
467 def _parse_owner(self
, item
):
468 owner
= item
.get('owner') or {}
471 'uploader': owner
.get('name'),
472 'uploader_id': owner
.get('id'),
476 def _fetch_page(self
, list_id
, page
):
478 items
= self
._call
_api
(list_id
, 'page %d' % page
, {
480 'pageSize': self
._PAGE
_SIZE
,
483 video
= item
.get('video') or {}
484 video_id
= video
.get('id')
487 count
= video
.get('count') or {}
488 get_count
= lambda x
: int_or_none(count
.get(x
))
492 'title': video
.get('title'),
493 'url': 'https://www.nicovideo.jp/watch/' + video_id
,
494 'description': video
.get('shortDescription'),
495 'duration': int_or_none(video
.get('duration')),
496 'view_count': get_count('view'),
497 'comment_count': get_count('comment'),
498 'ie_key': NiconicoIE
.ie_key(),
500 info
.update(self
._parse
_owner
(video
))
503 def _real_extract(self
, url
):
504 list_id
= self
._match
_id
(url
)
505 mylist
= self
._call
_api
(list_id
, 'list', {
508 entries
= InAdvancePagedList(
509 functools
.partial(self
._fetch
_page
, list_id
),
510 math
.ceil(mylist
['totalItemCount'] / self
._PAGE
_SIZE
),
512 result
= self
.playlist_result(
513 entries
, list_id
, mylist
.get('name'), mylist
.get('description'))
514 result
.update(self
._parse
_owner
(mylist
))