2 from __future__
import unicode_literals
8 from .common
import InfoExtractor
, SearchInfoExtractor
27 class BiliBiliIE(InfoExtractor
):
30 (?:(?:www|bangumi)\.)?
35 anime/(?P<anime_id>\d+)/play\#
37 video/[bB][vV](?P<id_bv>[^/?#&]+)
39 (?:/?\?p=(?P<page>\d+))?
43 'url': 'http://www.bilibili.com/video/av1074402/',
44 'md5': '5f7d29e1a2872f3df0cf76b1f87d3788',
49 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
51 'timestamp': 1398012678,
52 'upload_date': '20140420',
53 'thumbnail': r
're:^https?://.+\.jpg',
55 'uploader_id': '156160',
58 # Tested in BiliBiliBangumiIE
59 'url': 'http://bangumi.bilibili.com/anime/1869/play#40062',
60 'only_matching': True,
63 'url': 'http://www.bilibili.tv/video/av1074402/',
64 'only_matching': True,
66 'url': 'http://bangumi.bilibili.com/anime/5802/play#100643',
67 'md5': '3f721ad1e75030cc06faf73587cfec57',
71 'title': 'CHAOS;CHILD',
72 'description': '如果你是神明,并且能够让妄想成为现实。那你会进行怎么样的妄想?是淫靡的世界?独裁社会?毁灭性的制裁?还是……2015年,涩谷。从6年前发生的大灾害“涩谷地震”之后复兴了的这个街区里新设立的私立高中...',
74 'skip': 'Geo-restricted to China',
76 # Title with double quotes
77 'url': 'http://www.bilibili.com/video/av8903802/',
80 'title': '阿滴英文|英文歌分享#6 "Closer',
81 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
85 'id': '8903802_part1',
87 'title': '阿滴英文|英文歌分享#6 "Closer',
88 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
90 'uploader_id': '65880958',
91 'timestamp': 1488382634,
92 'upload_date': '20170301',
95 'skip_download': True, # Test metadata only
99 'id': '8903802_part2',
101 'title': '阿滴英文|英文歌分享#6 "Closer',
102 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
104 'uploader_id': '65880958',
105 'timestamp': 1488382634,
106 'upload_date': '20170301',
109 'skip_download': True, # Test metadata only
113 # new BV video id format
114 'url': 'https://www.bilibili.com/video/BV1JE411F741',
115 'only_matching': True,
118 _APP_KEY
= 'iVGUTjsxvpLeuDCf'
119 _BILIBILI_KEY
= 'aHRmhWMLkdeMuILqORnYZocwMBpMEOdt'
121 def _report_error(self
, result
):
122 if 'message' in result
:
123 raise ExtractorError('%s said: %s' % (self
.IE_NAME
, result
['message']), expected
=True)
124 elif 'code' in result
:
125 raise ExtractorError('%s returns error %d' % (self
.IE_NAME
, result
['code']), expected
=True)
127 raise ExtractorError('Can\'t extract Bangumi episode ID')
129 def _real_extract(self
, url
):
130 url
, smuggled_data
= unsmuggle_url(url
, {})
132 mobj
= re
.match(self
._VALID
_URL
, url
)
133 video_id
= mobj
.group('id_bv') or mobj
.group('id')
135 av_id
, bv_id
= self
._get
_video
_id
_set
(video_id
, mobj
.group('id_bv') is not None)
138 anime_id
= mobj
.group('anime_id')
139 page_id
= mobj
.group('page')
140 webpage
= self
._download
_webpage
(url
, video_id
)
142 if 'anime/' not in url
:
143 cid
= self
._search
_regex
(
144 r
'\bcid(?:["\']:|
=)(\d
+),["\']page(?:["\']:|
=)' + str(page_id), webpage, 'cid
',
146 ) or self._search_regex(
147 r'\bcid
(?
:["\']:|=)(\d+)', webpage, 'cid',
149 ) or compat_parse_qs(self._search_regex(
150 [r'EmbedPlayer\([^)]+,\s*"([^
"]+)"\
)',
151 r'EmbedPlayer\
([^
)]+,\s
*\\"([^"]+)\\"\)',
152 r'<iframe[^>]+src="https
://secure\
.bilibili\
.com
/secure
,([^
"]+)"'],
153 webpage, 'player parameters
'))['cid
'][0]
155 if 'no_bangumi_tip
' not in smuggled_data:
156 self.to_screen('Downloading episode
%s. To download all videos
in anime
%s, re
-run yt
-dlp
with %s' % (
157 video_id, anime_id, compat_urlparse.urljoin(url, '//bangumi
.bilibili
.com
/anime
/%s' % anime_id)))
159 'Content
-Type
': 'application
/x
-www
-form
-urlencoded
; charset
=UTF
-8',
162 headers.update(self.geo_verification_headers())
164 js = self._download_json(
165 'http
://bangumi
.bilibili
.com
/web_api
/get_source
', video_id,
166 data=urlencode_postdata({'episode_id': video_id}),
168 if 'result
' not in js:
169 self._report_error(js)
170 cid = js['result
']['cid
']
175 headers.update(self.geo_verification_headers())
179 RENDITIONS = ('qn
=80&quality
=80&type=', 'quality
=2&type=mp4
')
180 for num, rendition in enumerate(RENDITIONS, start=1):
181 payload = 'appkey
=%s&cid
=%s&otype
=json
&%s' % (self._APP_KEY, cid, rendition)
182 sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf
-8')).hexdigest()
184 video_info = self._download_json(
185 'http
://interface
.bilibili
.com
/v2
/playurl?
%s&sign
=%s' % (payload, sign),
186 video_id, note='Downloading video info page
',
187 headers=headers, fatal=num == len(RENDITIONS))
192 if 'durl
' not in video_info:
193 if num < len(RENDITIONS):
195 self._report_error(video_info)
197 for idx, durl in enumerate(video_info['durl
']):
200 'filesize
': int_or_none(durl['size
']),
202 for backup_url in durl.get('backup_url
', []):
205 # backup URLs have lower priorities
206 'quality
': -2 if 'hd
.mp4
' in backup_url else -3,
209 for a_format in formats:
210 a_format.setdefault('http_headers
', {}).update({
214 self._sort_formats(formats)
217 'id': '%s_part
%s' % (video_id, idx),
218 'duration
': float_or_none(durl.get('length
'), 1000),
223 title = self._html_search_regex(
224 (r'<h1
[^
>]+\btitle
=(["\'])(?P<title>(?:(?!\1).)+)\1',
225 r'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
226 group='title') + ('_p' + str(page_id) if page_id is not None else '')
227 description = self._html_search_meta('description', webpage)
228 timestamp = unified_timestamp(self._html_search_regex(
229 r'<time[^>]+datetime="([^
"]+)"', webpage, 'upload time
',
230 default=None) or self._html_search_meta(
231 'uploadDate
', webpage, 'timestamp
', default=None))
232 thumbnail = self._html_search_meta(['og
:image
', 'thumbnailUrl
'], webpage)
234 # TODO 'view_count
' requires deobfuscating Javascript
236 'id': str(video_id) if page_id is None else '%s_p
%s' % (video_id, page_id),
239 'description
': description,
240 'timestamp
': timestamp,
241 'thumbnail
': thumbnail,
242 'duration
': float_or_none(video_info.get('timelength
'), scale=1000),
245 uploader_mobj = re.search(
246 r'<a
[^
>]+href
="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^
>]*>(?P
<name
>[^
<]+)',
250 'uploader
': uploader_mobj.group('name
'),
251 'uploader_id
': uploader_mobj.group('id'),
254 if not info.get('uploader
'):
255 info['uploader
'] = self._html_search_meta(
256 'author
', webpage, 'uploader
', default=None)
259 if self._downloader.params.get('getcomments
', False):
260 comments = self._get_all_comment_pages(video_id)
262 raw_danmaku = self._get_raw_danmaku(video_id, cid)
264 raw_tags = self._get_tags(video_id)
265 tags = list(map(lambda x: x['tag_name
'], raw_tags))
268 'raw_danmaku
': raw_danmaku,
269 'comments
': comments,
270 'comment_count
': len(comments) if comments is not None else None,
272 'raw_tags
': raw_tags,
276 # Requires https://github.com/m13253/danmaku2ass which is licenced under GPL3
277 # See https://github.com/animelover1984/youtube-dl
278 danmaku = NiconicoIE.CreateDanmaku(raw_danmaku, commentType='Bilibili
', x=1024, y=576)
279 entries[0]['subtitles
'] = {
287 for entry in entries:
290 if len(entries) == 1:
291 entries[0].update(top_level_info)
294 for idx, entry in enumerate(entries):
295 entry['id'] = '%s_part
%d' % (video_id, (idx + 1))
298 '_type
': 'multi_video
',
302 'description
': description,
306 global_info.update(info)
307 global_info.update(top_level_info)
311 def _get_video_id_set(self, id, is_bv):
312 query = {'bvid': id} if is_bv else {'aid': id}
313 response = self._download_json(
314 "http://api.bilibili.cn/x/web-interface/view",
316 note='Grabbing original ID via API
')
318 if response['code
'] == -400:
319 raise ExtractorError('Video ID does
not exist
', expected=True, video_id=id)
320 elif response['code
'] != 0:
321 raise ExtractorError('Unknown error occurred during API
check (code
%s)' % response['code
'], expected=True, video_id=id)
322 return (response['data
']['aid
'], response['data
']['bvid
'])
324 # recursive solution to getting every page of comments for the video
325 # we can stop when we reach a page without any comments
326 def _get_all_comment_pages(self, video_id, commentPageNumber=0):
327 comment_url = "https://api.bilibili.com/x/v2/reply?jsonp=jsonp&pn=%s&type=1&oid=%s&sort=2&_=1567227301685" % (commentPageNumber, video_id)
328 json_str = self._download_webpage(
329 comment_url, video_id,
330 note='Extracting comments
from page
%s' % (commentPageNumber))
331 replies = json.loads(json_str)['data
']['replies
']
334 return self._get_all_children(replies) + self._get_all_comment_pages(video_id, commentPageNumber + 1)
336 # extracts all comments in the tree
337 def _get_all_children(self, replies):
342 for reply in replies:
343 author = reply['member
']['uname
']
344 author_id = reply['member
']['mid
']
346 text = reply['content
']['message
']
347 timestamp = reply['ctime
']
348 parent = reply['parent
'] if reply['parent
'] != 0 else 'root
'
352 "author_id": author_id,
355 "timestamp": timestamp,
360 # from the JSON, the comment structure seems arbitrarily deep, but I could be wrong.
361 # Regardless, this should work.
362 ret += self._get_all_children(reply['replies
'])
366 def _get_raw_danmaku(self, video_id, cid):
367 # This will be useful if I decide to scrape all pages instead of doing them individually
368 # cid_url = "https://www.bilibili.com/widget/getPageList?aid=%s" % (video_id)
369 # cid_str = self._download_webpage(cid_url, video_id, note=False)
370 # cid = json.loads(cid_str)[0]['cid
']
372 danmaku_url = "https://comment.bilibili.com/%s.xml" % (cid)
373 danmaku = self._download_webpage(danmaku_url, video_id, note='Downloading danmaku comments
')
376 def _get_tags(self, video_id):
377 tags_url = "https://api.bilibili.com/x/tag/archive/tags?aid=%s" % (video_id)
378 tags_json = self._download_json(tags_url, video_id, note='Downloading tags
')
379 return tags_json['data
']
382 class BiliBiliBangumiIE(InfoExtractor):
383 _VALID_URL = r'https?
://bangumi\
.bilibili\
.com
/anime
/(?P
<id>\d
+)'
385 IE_NAME = 'bangumi
.bilibili
.com
'
386 IE_DESC = 'BiliBili番剧
'
389 'url
': 'http
://bangumi
.bilibili
.com
/anime
/1869',
393 'description
': 'md5
:6a9622b911565794c11f25f81d6a97d2
',
395 'playlist_count
': 26,
397 'url
': 'http
://bangumi
.bilibili
.com
/anime
/1869',
401 'description
': 'md5
:6a9622b911565794c11f25f81d6a97d2
',
404 'md5
': '91da8621454dd58316851c27c68b0c13
',
409 'description
': '故事发生在日本的江户时代。风是一个小酒馆的打工女。一日,酒馆里来了一群恶霸,虽然他们的举动令风十分不满,但是毕竟风只是一届女流,无法对他们采取什么行动,只能在心里嘟哝。这时,酒家里又进来了个“不良份子
...',
410 'timestamp
': 1414538739,
411 'upload_date
': '20141028',
412 'episode
': '疾风怒涛 Tempestuous Temperaments
',
417 'playlist_items
': '1',
422 def suitable(cls, url):
423 return False if BiliBiliIE.suitable(url) else super(BiliBiliBangumiIE, cls).suitable(url)
425 def _real_extract(self, url):
426 bangumi_id = self._match_id(url)
428 # Sometimes this API returns a JSONP response
429 season_info = self._download_json(
430 'http
://bangumi
.bilibili
.com
/jsonp
/seasoninfo
/%s.ver
' % bangumi_id,
431 bangumi_id, transform_source=strip_jsonp)['result
']
434 '_type
': 'url_transparent
',
435 'url
': smuggle_url(episode['webplay_url
'], {'no_bangumi_tip': 1}),
436 'ie_key
': BiliBiliIE.ie_key(),
437 'timestamp
': parse_iso8601(episode.get('update_time
'), delimiter=' '),
438 'episode
': episode.get('index_title
'),
439 'episode_number
': int_or_none(episode.get('index
')),
440 } for episode in season_info['episodes
']]
442 entries = sorted(entries, key=lambda entry: entry.get('episode_number
'))
444 return self.playlist_result(
446 season_info.get('bangumi_title
'), season_info.get('evaluate
'))
449 class BilibiliChannelIE(InfoExtractor):
450 _VALID_URL = r'https?
://space
.bilibili\
.com
/(?P
<id>\d
+)'
451 # May need to add support for pagination? Need to find a user with many video uploads to test
452 _API_URL = "https://api.bilibili.com/x/space/arc/search?mid=%s&pn=1&ps=25&jsonp=jsonp"
453 _TEST = {} # TODO: Add tests
455 def _real_extract(self, url):
456 list_id = self._match_id(url)
457 json_str = self._download_webpage(self._API_URL % list_id, "None")
459 json_parsed = json.loads(json_str)
462 'ie_key
': BiliBiliIE.ie_key(),
463 'url
': ('https
://www
.bilibili
.com
/video
/%s' %
466 } for entry in json_parsed['data
']['list']['vlist
']]
475 class BiliBiliSearchIE(SearchInfoExtractor):
476 IE_DESC = 'Bilibili video search
, "bilisearch" keyword
'
477 _MAX_RESULTS = 100000
478 _SEARCH_KEY = 'bilisearch
'
479 MAX_NUMBER_OF_RESULTS = 1000
481 def _get_n_results(self, query, n):
482 """Get a specified number of results for a query"""
489 api_url = "https://api.bilibili.com/x/web-interface/search/type?context=&page=%s&order=pubdate&keyword=%s&duration=0&tids_2=&__refresh__=true&search_type=video&tids=0&highlight=1" % (pageNumber, query)
490 json_str = self._download_webpage(
491 api_url, "None", query={"Search_key": query},
492 note='Extracting results
from page
%s' % pageNumber)
493 data = json.loads(json_str)['data
']
495 # FIXME: this is hideous
496 if "result" not in data:
500 'entries
': entries[:n]
503 videos = data['result
']
505 e = self.url_result(video['arcurl
'], 'BiliBili
', str(video['aid
']))
508 if(len(entries) >= n or len(videos) >= BiliBiliSearchIE.MAX_NUMBER_OF_RESULTS):
512 'entries
': entries[:n]
516 class BilibiliAudioBaseIE(InfoExtractor):
517 def _call_api(self, path, sid, query=None):
520 return self._download_json(
521 'https
://www
.bilibili
.com
/audio
/music
-service
-c
/web
/' + path,
522 sid, query=query)['data
']
525 class BilibiliAudioIE(BilibiliAudioBaseIE):
526 _VALID_URL = r'https?
://(?
:www\
.)?bilibili\
.com
/audio
/au(?P
<id>\d
+)'
528 'url
': 'https
://www
.bilibili
.com
/audio
/au1003142
',
529 'md5
': 'fec4987014ec94ef9e666d4d158ad03b
',
533 'title
': '【tsukimi】YELLOW
/ 神山羊
',
535 'comment_count
': int,
536 'description
': 'YELLOW的mp3版!
',
543 'thumbnail
': r're
:^https?
://.+\
.jpg
',
544 'timestamp
': 1564836614,
545 'upload_date
': '20190803',
546 'uploader
': 'tsukimi
-つきみぐー
',
551 def _real_extract(self, url):
552 au_id = self._match_id(url)
554 play_data = self._call_api('url
', au_id)
556 'url
': play_data['cdns
'][0],
557 'filesize
': int_or_none(play_data.get('size
')),
560 song = self._call_api('song
/info
', au_id)
561 title = song['title
']
562 statistic = song.get('statistic
') or {}
565 lyric = song.get('lyric
')
577 'artist
': song.get('author
'),
578 'comment_count
': int_or_none(statistic.get('comment
')),
579 'description
': song.get('intro
'),
580 'duration
': int_or_none(song.get('duration
')),
581 'subtitles
': subtitles,
582 'thumbnail
': song.get('cover
'),
583 'timestamp
': int_or_none(song.get('passtime
')),
584 'uploader
': song.get('uname
'),
585 'view_count
': int_or_none(statistic.get('play
')),
589 class BilibiliAudioAlbumIE(BilibiliAudioBaseIE):
590 _VALID_URL = r'https?
://(?
:www\
.)?bilibili\
.com
/audio
/am(?P
<id>\d
+)'
592 'url
': 'https
://www
.bilibili
.com
/audio
/am10624
',
595 'title
': '每日新曲推荐(每日
11:00更新)
',
596 'description
': '每天
11:00更新,为你推送最新音乐
',
598 'playlist_count
': 19,
601 def _real_extract(self, url):
602 am_id = self._match_id(url)
604 songs = self._call_api(
605 'song
/of
-menu
', am_id, {'sid': am_id, 'pn': 1, 'ps': 100})['data
']
609 sid = str_or_none(song.get('id'))
612 entries.append(self.url_result(
613 'https
://www
.bilibili
.com
/audio
/au
' + sid,
614 BilibiliAudioIE.ie_key(), sid))
617 album_data = self._call_api('menu
/info
', am_id) or {}
618 album_title = album_data.get('title
')
620 for entry in entries:
621 entry['album
'] = album_title
622 return self.playlist_result(
623 entries, am_id, album_title, album_data.get('intro
'))
625 return self.playlist_result(entries, am_id)
628 class BiliBiliPlayerIE(InfoExtractor):
629 _VALID_URL = r'https?
://player\
.bilibili\
.com
/player\
.html
\?.*?
\baid
=(?P
<id>\d
+)'
631 'url
': 'http
://player
.bilibili
.com
/player
.html?aid
=92494333&cid
=157926707&page
=1',
632 'only_matching
': True,
635 def _real_extract(self, url):
636 video_id = self._match_id(url)
637 return self.url_result(
638 'http
://www
.bilibili
.tv
/video
/av
%s/' % video_id,
639 ie=BiliBiliIE.ie_key(), video_id=video_id)