]> jfr.im git - yt-dlp.git/blame - yt_dlp/extractor/bilibili.py
[embedthumbnail] Set mtime correctly
[yt-dlp.git] / yt_dlp / extractor / bilibili.py
CommitLineData
28746fbd
PH
1# coding: utf-8
2from __future__ import unicode_literals
3
04b32c8f 4import hashlib
06167fbb 5import json
520e7533 6import re
28746fbd 7
06167fbb 8from .common import InfoExtractor, SearchInfoExtractor
bd8f48c7
YCH
9from ..compat import (
10 compat_parse_qs,
11 compat_urlparse,
12)
28746fbd 13from ..utils import (
bd8f48c7 14 ExtractorError,
6461f2b7
YCH
15 int_or_none,
16 float_or_none,
bd8f48c7
YCH
17 parse_iso8601,
18 smuggle_url,
4bc15a68 19 str_or_none,
bd8f48c7 20 strip_jsonp,
04b32c8f 21 unified_timestamp,
bd8f48c7 22 unsmuggle_url,
1f85029d 23 urlencode_postdata,
28746fbd
PH
24)
25
26
27class BiliBiliIE(InfoExtractor):
b4eb08bb
S
28 _VALID_URL = r'''(?x)
29 https?://
30 (?:(?:www|bangumi)\.)?
31 bilibili\.(?:tv|com)/
32 (?:
33 (?:
34 video/[aA][vV]|
35 anime/(?P<anime_id>\d+)/play\#
06167fbb 36 )(?P<id>\d+)|
37 video/[bB][vV](?P<id_bv>[^/?#&]+)
b4eb08bb 38 )
06167fbb 39 (?:/?\?p=(?P<page>\d+))?
b4eb08bb 40 '''
28746fbd 41
bd8f48c7 42 _TESTS = [{
06167fbb 43 'url': 'http://www.bilibili.com/video/av1074402/',
3526c304 44 'md5': '5f7d29e1a2872f3df0cf76b1f87d3788',
28746fbd 45 'info_dict': {
04b32c8f 46 'id': '1074402',
3526c304 47 'ext': 'flv',
28746fbd 48 'title': '【金坷垃】金泡沫',
6461f2b7 49 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
3526c304
S
50 'duration': 308.067,
51 'timestamp': 1398012678,
28746fbd 52 'upload_date': '20140420',
ec85ded8 53 'thumbnail': r're:^https?://.+\.jpg',
d90e4030 54 'uploader': '菊子桑',
6461f2b7 55 'uploader_id': '156160',
28746fbd 56 },
bd8f48c7
YCH
57 }, {
58 # Tested in BiliBiliBangumiIE
59 'url': 'http://bangumi.bilibili.com/anime/1869/play#40062',
60 'only_matching': True,
06167fbb 61 }, {
62 # bilibili.tv
63 'url': 'http://www.bilibili.tv/video/av1074402/',
64 'only_matching': True,
bd8f48c7
YCH
65 }, {
66 'url': 'http://bangumi.bilibili.com/anime/5802/play#100643',
67 'md5': '3f721ad1e75030cc06faf73587cfec57',
68 'info_dict': {
69 'id': '100643',
70 'ext': 'mp4',
71 'title': 'CHAOS;CHILD',
72 'description': '如果你是神明,并且能够让妄想成为现实。那你会进行怎么样的妄想?是淫靡的世界?独裁社会?毁灭性的制裁?还是……2015年,涩谷。从6年前发生的大灾害“涩谷地震”之后复兴了的这个街区里新设立的私立高中...',
73 },
74 'skip': 'Geo-restricted to China',
ca270371
YCH
75 }, {
76 # Title with double quotes
77 'url': 'http://www.bilibili.com/video/av8903802/',
78 'info_dict': {
79 'id': '8903802',
ca270371
YCH
80 'title': '阿滴英文|英文歌分享#6 "Closer',
81 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
ca270371 82 },
3526c304
S
83 'playlist': [{
84 'info_dict': {
85 'id': '8903802_part1',
86 'ext': 'flv',
87 'title': '阿滴英文|英文歌分享#6 "Closer',
88 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
89 'uploader': '阿滴英文',
90 'uploader_id': '65880958',
91 'timestamp': 1488382634,
92 'upload_date': '20170301',
93 },
94 'params': {
95 'skip_download': True, # Test metadata only
96 },
97 }, {
98 'info_dict': {
99 'id': '8903802_part2',
100 'ext': 'flv',
101 'title': '阿滴英文|英文歌分享#6 "Closer',
102 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
103 'uploader': '阿滴英文',
104 'uploader_id': '65880958',
105 'timestamp': 1488382634,
106 'upload_date': '20170301',
107 },
108 'params': {
109 'skip_download': True, # Test metadata only
110 },
111 }]
b4eb08bb
S
112 }, {
113 # new BV video id format
114 'url': 'https://www.bilibili.com/video/BV1JE411F741',
115 'only_matching': True,
bd8f48c7 116 }]
28746fbd 117
c9a0ea6e
S
118 _APP_KEY = 'iVGUTjsxvpLeuDCf'
119 _BILIBILI_KEY = 'aHRmhWMLkdeMuILqORnYZocwMBpMEOdt'
6461f2b7 120
bd8f48c7
YCH
121 def _report_error(self, result):
122 if 'message' in result:
123 raise ExtractorError('%s said: %s' % (self.IE_NAME, result['message']), expected=True)
124 elif 'code' in result:
125 raise ExtractorError('%s returns error %d' % (self.IE_NAME, result['code']), expected=True)
126 else:
127 raise ExtractorError('Can\'t extract Bangumi episode ID')
128
520e7533 129 def _real_extract(self, url):
bd8f48c7
YCH
130 url, smuggled_data = unsmuggle_url(url, {})
131
132 mobj = re.match(self._VALID_URL, url)
06167fbb 133 video_id = mobj.group('id_bv') or mobj.group('id')
134
135 av_id, bv_id = self._get_video_id_set(video_id, mobj.group('id_bv') is not None)
136 video_id = av_id
137
bd8f48c7 138 anime_id = mobj.group('anime_id')
06167fbb 139 page_id = mobj.group('page')
6461f2b7 140 webpage = self._download_webpage(url, video_id)
015f3b31 141 headers = {
142 'Referer': url,
143 'Accept': '*/*'
144 }
145 headers.update(self.geo_verification_headers())
6461f2b7 146
bd8f48c7 147 if 'anime/' not in url:
3526c304 148 cid = self._search_regex(
06167fbb 149 r'\bcid(?:["\']:|=)(\d+),["\']page(?:["\']:|=)' + str(page_id), webpage, 'cid',
150 default=None
151 ) or self._search_regex(
61cb6683 152 r'\bcid(?:["\']:|=)(\d+)', webpage, 'cid',
3526c304
S
153 default=None
154 ) or compat_parse_qs(self._search_regex(
95a1322b
S
155 [r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
156 r'EmbedPlayer\([^)]+,\s*\\"([^"]+)\\"\)',
157 r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
7be15d40
P
158 webpage, 'player parameters'))['cid'][0]
159 else:
bd8f48c7 160 if 'no_bangumi_tip' not in smuggled_data:
7a5c1cfe 161 self.to_screen('Downloading episode %s. To download all videos in anime %s, re-run yt-dlp with %s' % (
bd8f48c7 162 video_id, anime_id, compat_urlparse.urljoin(url, '//bangumi.bilibili.com/anime/%s' % anime_id)))
bd8f48c7 163
015f3b31 164 headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
1f85029d
YCH
165 js = self._download_json(
166 'http://bangumi.bilibili.com/web_api/get_source', video_id,
167 data=urlencode_postdata({'episode_id': video_id}),
bd8f48c7
YCH
168 headers=headers)
169 if 'result' not in js:
170 self._report_error(js)
7be15d40 171 cid = js['result']['cid']
04b32c8f 172
d90e4030 173 entries = []
c4a21bc9 174
3526c304
S
175 RENDITIONS = ('qn=80&quality=80&type=', 'quality=2&type=mp4')
176 for num, rendition in enumerate(RENDITIONS, start=1):
177 payload = 'appkey=%s&cid=%s&otype=json&%s' % (self._APP_KEY, cid, rendition)
178 sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
179
180 video_info = self._download_json(
d1239608 181 'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign),
3526c304
S
182 video_id, note='Downloading video info page',
183 headers=headers, fatal=num == len(RENDITIONS))
184
185 if not video_info:
186 continue
187
188 if 'durl' not in video_info:
189 if num < len(RENDITIONS):
190 continue
191 self._report_error(video_info)
192
193 for idx, durl in enumerate(video_info['durl']):
194 formats = [{
195 'url': durl['url'],
196 'filesize': int_or_none(durl['size']),
197 }]
198 for backup_url in durl.get('backup_url', []):
199 formats.append({
200 'url': backup_url,
201 # backup URLs have lower priorities
f983b875 202 'quality': -2 if 'hd.mp4' in backup_url else -3,
3526c304
S
203 })
204
205 for a_format in formats:
206 a_format.setdefault('http_headers', {}).update({
207 'Referer': url,
208 })
209
210 self._sort_formats(formats)
211
212 entries.append({
213 'id': '%s_part%s' % (video_id, idx),
214 'duration': float_or_none(durl.get('length'), 1000),
215 'formats': formats,
6461f2b7 216 })
3526c304 217 break
6461f2b7 218
3526c304 219 title = self._html_search_regex(
06167fbb 220 (r'<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
221 r'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
222 group='title') + ('_p' + str(page_id) if page_id is not None else '')
6461f2b7 223 description = self._html_search_meta('description', webpage)
04b32c8f 224 timestamp = unified_timestamp(self._html_search_regex(
3526c304
S
225 r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time',
226 default=None) or self._html_search_meta(
227 'uploadDate', webpage, 'timestamp', default=None))
1f85029d 228 thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
6461f2b7
YCH
229
230 # TODO 'view_count' requires deobfuscating Javascript
d90e4030 231 info = {
06167fbb 232 'id': str(video_id) if page_id is None else '%s_p%s' % (video_id, page_id),
233 'cid': cid,
d90e4030 234 'title': title,
6461f2b7
YCH
235 'description': description,
236 'timestamp': timestamp,
7be15d40 237 'thumbnail': thumbnail,
04b32c8f 238 'duration': float_or_none(video_info.get('timelength'), scale=1000),
28746fbd 239 }
d90e4030 240
6461f2b7 241 uploader_mobj = re.search(
3526c304 242 r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>(?P<name>[^<]+)',
6461f2b7
YCH
243 webpage)
244 if uploader_mobj:
245 info.update({
246 'uploader': uploader_mobj.group('name'),
247 'uploader_id': uploader_mobj.group('id'),
248 })
06167fbb 249
3526c304
S
250 if not info.get('uploader'):
251 info['uploader'] = self._html_search_meta(
252 'author', webpage, 'uploader', default=None)
6461f2b7 253
06167fbb 254 raw_danmaku = self._get_raw_danmaku(video_id, cid)
255
256 raw_tags = self._get_tags(video_id)
257 tags = list(map(lambda x: x['tag_name'], raw_tags))
258
259 top_level_info = {
260 'raw_danmaku': raw_danmaku,
06167fbb 261 'tags': tags,
262 'raw_tags': raw_tags,
263 }
277d6ff5 264 if self._downloader.params.get('getcomments', False):
265 def get_comments():
266 comments = self._get_all_comment_pages(video_id)
267 return {
268 'comments': comments,
269 'comment_count': len(comments)
270 }
271
272 top_level_info['__post_extractor'] = get_comments
06167fbb 273
274 '''
275 # Requires https://github.com/m13253/danmaku2ass which is licenced under GPL3
276 # See https://github.com/animelover1984/youtube-dl
277 danmaku = NiconicoIE.CreateDanmaku(raw_danmaku, commentType='Bilibili', x=1024, y=576)
278 entries[0]['subtitles'] = {
279 'danmaku': [{
280 'ext': 'ass',
281 'data': danmaku
282 }]
283 }
284 '''
285
6461f2b7
YCH
286 for entry in entries:
287 entry.update(info)
288
d90e4030 289 if len(entries) == 1:
06167fbb 290 entries[0].update(top_level_info)
d90e4030 291 return entries[0]
292 else:
ad73083f
YCH
293 for idx, entry in enumerate(entries):
294 entry['id'] = '%s_part%d' % (video_id, (idx + 1))
295
06167fbb 296 global_info = {
d90e4030 297 '_type': 'multi_video',
520e7533 298 'id': video_id,
06167fbb 299 'bv_id': bv_id,
6461f2b7
YCH
300 'title': title,
301 'description': description,
d90e4030 302 'entries': entries,
6461f2b7 303 }
bd8f48c7 304
06167fbb 305 global_info.update(info)
306 global_info.update(top_level_info)
307
308 return global_info
309
310 def _get_video_id_set(self, id, is_bv):
311 query = {'bvid': id} if is_bv else {'aid': id}
312 response = self._download_json(
313 "http://api.bilibili.cn/x/web-interface/view",
314 id, query=query,
315 note='Grabbing original ID via API')
316
317 if response['code'] == -400:
318 raise ExtractorError('Video ID does not exist', expected=True, video_id=id)
319 elif response['code'] != 0:
320 raise ExtractorError('Unknown error occurred during API check (code %s)' % response['code'], expected=True, video_id=id)
321 return (response['data']['aid'], response['data']['bvid'])
322
323 # recursive solution to getting every page of comments for the video
324 # we can stop when we reach a page without any comments
325 def _get_all_comment_pages(self, video_id, commentPageNumber=0):
326 comment_url = "https://api.bilibili.com/x/v2/reply?jsonp=jsonp&pn=%s&type=1&oid=%s&sort=2&_=1567227301685" % (commentPageNumber, video_id)
327 json_str = self._download_webpage(
328 comment_url, video_id,
329 note='Extracting comments from page %s' % (commentPageNumber))
330 replies = json.loads(json_str)['data']['replies']
331 if replies is None:
332 return []
333 return self._get_all_children(replies) + self._get_all_comment_pages(video_id, commentPageNumber + 1)
334
335 # extracts all comments in the tree
336 def _get_all_children(self, replies):
337 if replies is None:
338 return []
339
340 ret = []
341 for reply in replies:
342 author = reply['member']['uname']
343 author_id = reply['member']['mid']
344 id = reply['rpid']
345 text = reply['content']['message']
346 timestamp = reply['ctime']
347 parent = reply['parent'] if reply['parent'] != 0 else 'root'
348
349 comment = {
350 "author": author,
351 "author_id": author_id,
352 "id": id,
353 "text": text,
354 "timestamp": timestamp,
355 "parent": parent,
356 }
357 ret.append(comment)
358
359 # from the JSON, the comment structure seems arbitrarily deep, but I could be wrong.
360 # Regardless, this should work.
361 ret += self._get_all_children(reply['replies'])
362
363 return ret
364
365 def _get_raw_danmaku(self, video_id, cid):
366 # This will be useful if I decide to scrape all pages instead of doing them individually
367 # cid_url = "https://www.bilibili.com/widget/getPageList?aid=%s" % (video_id)
368 # cid_str = self._download_webpage(cid_url, video_id, note=False)
369 # cid = json.loads(cid_str)[0]['cid']
370
371 danmaku_url = "https://comment.bilibili.com/%s.xml" % (cid)
372 danmaku = self._download_webpage(danmaku_url, video_id, note='Downloading danmaku comments')
373 return danmaku
374
375 def _get_tags(self, video_id):
376 tags_url = "https://api.bilibili.com/x/tag/archive/tags?aid=%s" % (video_id)
377 tags_json = self._download_json(tags_url, video_id, note='Downloading tags')
378 return tags_json['data']
379
bd8f48c7
YCH
380
381class BiliBiliBangumiIE(InfoExtractor):
382 _VALID_URL = r'https?://bangumi\.bilibili\.com/anime/(?P<id>\d+)'
383
384 IE_NAME = 'bangumi.bilibili.com'
385 IE_DESC = 'BiliBili番剧'
386
387 _TESTS = [{
388 'url': 'http://bangumi.bilibili.com/anime/1869',
389 'info_dict': {
390 'id': '1869',
391 'title': '混沌武士',
392 'description': 'md5:6a9622b911565794c11f25f81d6a97d2',
393 },
394 'playlist_count': 26,
395 }, {
396 'url': 'http://bangumi.bilibili.com/anime/1869',
397 'info_dict': {
398 'id': '1869',
399 'title': '混沌武士',
400 'description': 'md5:6a9622b911565794c11f25f81d6a97d2',
401 },
402 'playlist': [{
403 'md5': '91da8621454dd58316851c27c68b0c13',
404 'info_dict': {
405 'id': '40062',
406 'ext': 'mp4',
407 'title': '混沌武士',
408 'description': '故事发生在日本的江户时代。风是一个小酒馆的打工女。一日,酒馆里来了一群恶霸,虽然他们的举动令风十分不满,但是毕竟风只是一届女流,无法对他们采取什么行动,只能在心里嘟哝。这时,酒家里又进来了个“不良份子...',
409 'timestamp': 1414538739,
410 'upload_date': '20141028',
411 'episode': '疾风怒涛 Tempestuous Temperaments',
412 'episode_number': 1,
413 },
414 }],
415 'params': {
416 'playlist_items': '1',
417 },
418 }]
419
420 @classmethod
421 def suitable(cls, url):
422 return False if BiliBiliIE.suitable(url) else super(BiliBiliBangumiIE, cls).suitable(url)
423
424 def _real_extract(self, url):
425 bangumi_id = self._match_id(url)
426
427 # Sometimes this API returns a JSONP response
428 season_info = self._download_json(
429 'http://bangumi.bilibili.com/jsonp/seasoninfo/%s.ver' % bangumi_id,
430 bangumi_id, transform_source=strip_jsonp)['result']
431
432 entries = [{
433 '_type': 'url_transparent',
434 'url': smuggle_url(episode['webplay_url'], {'no_bangumi_tip': 1}),
435 'ie_key': BiliBiliIE.ie_key(),
436 'timestamp': parse_iso8601(episode.get('update_time'), delimiter=' '),
437 'episode': episode.get('index_title'),
438 'episode_number': int_or_none(episode.get('index')),
439 } for episode in season_info['episodes']]
440
441 entries = sorted(entries, key=lambda entry: entry.get('episode_number'))
442
443 return self.playlist_result(
444 entries, bangumi_id,
445 season_info.get('bangumi_title'), season_info.get('evaluate'))
4bc15a68
RA
446
447
06167fbb 448class BilibiliChannelIE(InfoExtractor):
449 _VALID_URL = r'https?://space.bilibili\.com/(?P<id>\d+)'
450 # May need to add support for pagination? Need to find a user with many video uploads to test
451 _API_URL = "https://api.bilibili.com/x/space/arc/search?mid=%s&pn=1&ps=25&jsonp=jsonp"
452 _TEST = {} # TODO: Add tests
453
454 def _real_extract(self, url):
455 list_id = self._match_id(url)
456 json_str = self._download_webpage(self._API_URL % list_id, "None")
457
458 json_parsed = json.loads(json_str)
459 entries = [{
460 '_type': 'url',
461 'ie_key': BiliBiliIE.ie_key(),
462 'url': ('https://www.bilibili.com/video/%s' %
463 entry['bvid']),
464 'id': entry['bvid'],
465 } for entry in json_parsed['data']['list']['vlist']]
466
467 return {
468 '_type': 'playlist',
469 'id': list_id,
470 'entries': entries
471 }
472
473
474class BiliBiliSearchIE(SearchInfoExtractor):
475 IE_DESC = 'Bilibili video search, "bilisearch" keyword'
476 _MAX_RESULTS = 100000
477 _SEARCH_KEY = 'bilisearch'
478 MAX_NUMBER_OF_RESULTS = 1000
479
480 def _get_n_results(self, query, n):
481 """Get a specified number of results for a query"""
482
483 entries = []
484 pageNumber = 0
485 while True:
486 pageNumber += 1
487 # FIXME
488 api_url = "https://api.bilibili.com/x/web-interface/search/type?context=&page=%s&order=pubdate&keyword=%s&duration=0&tids_2=&__refresh__=true&search_type=video&tids=0&highlight=1" % (pageNumber, query)
489 json_str = self._download_webpage(
490 api_url, "None", query={"Search_key": query},
491 note='Extracting results from page %s' % pageNumber)
492 data = json.loads(json_str)['data']
493
494 # FIXME: this is hideous
495 if "result" not in data:
496 return {
497 '_type': 'playlist',
498 'id': query,
499 'entries': entries[:n]
500 }
501
502 videos = data['result']
503 for video in videos:
504 e = self.url_result(video['arcurl'], 'BiliBili', str(video['aid']))
505 entries.append(e)
506
507 if(len(entries) >= n or len(videos) >= BiliBiliSearchIE.MAX_NUMBER_OF_RESULTS):
508 return {
509 '_type': 'playlist',
510 'id': query,
511 'entries': entries[:n]
512 }
513
514
4bc15a68
RA
515class BilibiliAudioBaseIE(InfoExtractor):
516 def _call_api(self, path, sid, query=None):
517 if not query:
518 query = {'sid': sid}
519 return self._download_json(
520 'https://www.bilibili.com/audio/music-service-c/web/' + path,
521 sid, query=query)['data']
522
523
524class BilibiliAudioIE(BilibiliAudioBaseIE):
525 _VALID_URL = r'https?://(?:www\.)?bilibili\.com/audio/au(?P<id>\d+)'
526 _TEST = {
527 'url': 'https://www.bilibili.com/audio/au1003142',
528 'md5': 'fec4987014ec94ef9e666d4d158ad03b',
529 'info_dict': {
530 'id': '1003142',
531 'ext': 'm4a',
532 'title': '【tsukimi】YELLOW / 神山羊',
533 'artist': 'tsukimi',
534 'comment_count': int,
535 'description': 'YELLOW的mp3版!',
536 'duration': 183,
537 'subtitles': {
538 'origin': [{
539 'ext': 'lrc',
540 }],
541 },
542 'thumbnail': r're:^https?://.+\.jpg',
543 'timestamp': 1564836614,
544 'upload_date': '20190803',
545 'uploader': 'tsukimi-つきみぐー',
546 'view_count': int,
547 },
548 }
549
550 def _real_extract(self, url):
551 au_id = self._match_id(url)
552
553 play_data = self._call_api('url', au_id)
554 formats = [{
555 'url': play_data['cdns'][0],
556 'filesize': int_or_none(play_data.get('size')),
f0884c8b 557 'vcodec': 'none'
4bc15a68
RA
558 }]
559
560 song = self._call_api('song/info', au_id)
561 title = song['title']
562 statistic = song.get('statistic') or {}
563
564 subtitles = None
565 lyric = song.get('lyric')
566 if lyric:
567 subtitles = {
568 'origin': [{
569 'url': lyric,
570 }]
571 }
572
573 return {
574 'id': au_id,
575 'title': title,
576 'formats': formats,
577 'artist': song.get('author'),
578 'comment_count': int_or_none(statistic.get('comment')),
579 'description': song.get('intro'),
580 'duration': int_or_none(song.get('duration')),
581 'subtitles': subtitles,
582 'thumbnail': song.get('cover'),
583 'timestamp': int_or_none(song.get('passtime')),
584 'uploader': song.get('uname'),
585 'view_count': int_or_none(statistic.get('play')),
586 }
587
588
589class BilibiliAudioAlbumIE(BilibiliAudioBaseIE):
590 _VALID_URL = r'https?://(?:www\.)?bilibili\.com/audio/am(?P<id>\d+)'
591 _TEST = {
592 'url': 'https://www.bilibili.com/audio/am10624',
593 'info_dict': {
594 'id': '10624',
595 'title': '每日新曲推荐(每日11:00更新)',
596 'description': '每天11:00更新,为你推送最新音乐',
597 },
598 'playlist_count': 19,
599 }
600
601 def _real_extract(self, url):
602 am_id = self._match_id(url)
603
604 songs = self._call_api(
605 'song/of-menu', am_id, {'sid': am_id, 'pn': 1, 'ps': 100})['data']
606
607 entries = []
608 for song in songs:
609 sid = str_or_none(song.get('id'))
610 if not sid:
611 continue
612 entries.append(self.url_result(
613 'https://www.bilibili.com/audio/au' + sid,
614 BilibiliAudioIE.ie_key(), sid))
615
616 if entries:
617 album_data = self._call_api('menu/info', am_id) or {}
618 album_title = album_data.get('title')
619 if album_title:
620 for entry in entries:
621 entry['album'] = album_title
622 return self.playlist_result(
623 entries, am_id, album_title, album_data.get('intro'))
624
625 return self.playlist_result(entries, am_id)
63dce309
S
626
627
628class BiliBiliPlayerIE(InfoExtractor):
629 _VALID_URL = r'https?://player\.bilibili\.com/player\.html\?.*?\baid=(?P<id>\d+)'
630 _TEST = {
631 'url': 'http://player.bilibili.com/player.html?aid=92494333&cid=157926707&page=1',
632 'only_matching': True,
633 }
634
635 def _real_extract(self, url):
636 video_id = self._match_id(url)
637 return self.url_result(
638 'http://www.bilibili.tv/video/av%s/' % video_id,
639 ie=BiliBiliIE.ie_key(), video_id=video_id)