]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/bilibili.py
Add option `--sleep-requests` to sleep b/w requests (Closes #106)
[yt-dlp.git] / yt_dlp / extractor / bilibili.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import hashlib
5 import json
6 import re
7
8 from .common import InfoExtractor, SearchInfoExtractor
9 from ..compat import (
10 compat_parse_qs,
11 compat_urlparse,
12 )
13 from ..utils import (
14 ExtractorError,
15 int_or_none,
16 float_or_none,
17 parse_iso8601,
18 smuggle_url,
19 str_or_none,
20 strip_jsonp,
21 unified_timestamp,
22 unsmuggle_url,
23 urlencode_postdata,
24 )
25
26
27 class BiliBiliIE(InfoExtractor):
28 _VALID_URL = r'''(?x)
29 https?://
30 (?:(?:www|bangumi)\.)?
31 bilibili\.(?:tv|com)/
32 (?:
33 (?:
34 video/[aA][vV]|
35 anime/(?P<anime_id>\d+)/play\#
36 )(?P<id>\d+)|
37 video/[bB][vV](?P<id_bv>[^/?#&]+)
38 )
39 (?:/?\?p=(?P<page>\d+))?
40 '''
41
42 _TESTS = [{
43 'url': 'http://www.bilibili.com/video/av1074402/',
44 'md5': '5f7d29e1a2872f3df0cf76b1f87d3788',
45 'info_dict': {
46 'id': '1074402',
47 'ext': 'flv',
48 'title': '【金坷垃】金泡沫',
49 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
50 'duration': 308.067,
51 'timestamp': 1398012678,
52 'upload_date': '20140420',
53 'thumbnail': r're:^https?://.+\.jpg',
54 'uploader': '菊子桑',
55 'uploader_id': '156160',
56 },
57 }, {
58 # Tested in BiliBiliBangumiIE
59 'url': 'http://bangumi.bilibili.com/anime/1869/play#40062',
60 'only_matching': True,
61 }, {
62 # bilibili.tv
63 'url': 'http://www.bilibili.tv/video/av1074402/',
64 'only_matching': True,
65 }, {
66 'url': 'http://bangumi.bilibili.com/anime/5802/play#100643',
67 'md5': '3f721ad1e75030cc06faf73587cfec57',
68 'info_dict': {
69 'id': '100643',
70 'ext': 'mp4',
71 'title': 'CHAOS;CHILD',
72 'description': '如果你是神明,并且能够让妄想成为现实。那你会进行怎么样的妄想?是淫靡的世界?独裁社会?毁灭性的制裁?还是……2015年,涩谷。从6年前发生的大灾害“涩谷地震”之后复兴了的这个街区里新设立的私立高中...',
73 },
74 'skip': 'Geo-restricted to China',
75 }, {
76 # Title with double quotes
77 'url': 'http://www.bilibili.com/video/av8903802/',
78 'info_dict': {
79 'id': '8903802',
80 'title': '阿滴英文|英文歌分享#6 "Closer',
81 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
82 },
83 'playlist': [{
84 'info_dict': {
85 'id': '8903802_part1',
86 'ext': 'flv',
87 'title': '阿滴英文|英文歌分享#6 "Closer',
88 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
89 'uploader': '阿滴英文',
90 'uploader_id': '65880958',
91 'timestamp': 1488382634,
92 'upload_date': '20170301',
93 },
94 'params': {
95 'skip_download': True, # Test metadata only
96 },
97 }, {
98 'info_dict': {
99 'id': '8903802_part2',
100 'ext': 'flv',
101 'title': '阿滴英文|英文歌分享#6 "Closer',
102 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
103 'uploader': '阿滴英文',
104 'uploader_id': '65880958',
105 'timestamp': 1488382634,
106 'upload_date': '20170301',
107 },
108 'params': {
109 'skip_download': True, # Test metadata only
110 },
111 }]
112 }, {
113 # new BV video id format
114 'url': 'https://www.bilibili.com/video/BV1JE411F741',
115 'only_matching': True,
116 }]
117
118 _APP_KEY = 'iVGUTjsxvpLeuDCf'
119 _BILIBILI_KEY = 'aHRmhWMLkdeMuILqORnYZocwMBpMEOdt'
120
121 def _report_error(self, result):
122 if 'message' in result:
123 raise ExtractorError('%s said: %s' % (self.IE_NAME, result['message']), expected=True)
124 elif 'code' in result:
125 raise ExtractorError('%s returns error %d' % (self.IE_NAME, result['code']), expected=True)
126 else:
127 raise ExtractorError('Can\'t extract Bangumi episode ID')
128
129 def _real_extract(self, url):
130 url, smuggled_data = unsmuggle_url(url, {})
131
132 mobj = re.match(self._VALID_URL, url)
133 video_id = mobj.group('id_bv') or mobj.group('id')
134
135 av_id, bv_id = self._get_video_id_set(video_id, mobj.group('id_bv') is not None)
136 video_id = av_id
137
138 anime_id = mobj.group('anime_id')
139 page_id = mobj.group('page')
140 webpage = self._download_webpage(url, video_id)
141
142 if 'anime/' not in url:
143 cid = self._search_regex(
144 r'\bcid(?:["\']:|=)(\d+),["\']page(?:["\']:|=)' + str(page_id), webpage, 'cid',
145 default=None
146 ) or self._search_regex(
147 r'\bcid(?:["\']:|=)(\d+)', webpage, 'cid',
148 default=None
149 ) or compat_parse_qs(self._search_regex(
150 [r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
151 r'EmbedPlayer\([^)]+,\s*\\"([^"]+)\\"\)',
152 r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
153 webpage, 'player parameters'))['cid'][0]
154 else:
155 if 'no_bangumi_tip' not in smuggled_data:
156 self.to_screen('Downloading episode %s. To download all videos in anime %s, re-run yt-dlp with %s' % (
157 video_id, anime_id, compat_urlparse.urljoin(url, '//bangumi.bilibili.com/anime/%s' % anime_id)))
158 headers = {
159 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
160 'Referer': url
161 }
162 headers.update(self.geo_verification_headers())
163
164 js = self._download_json(
165 'http://bangumi.bilibili.com/web_api/get_source', video_id,
166 data=urlencode_postdata({'episode_id': video_id}),
167 headers=headers)
168 if 'result' not in js:
169 self._report_error(js)
170 cid = js['result']['cid']
171
172 headers = {
173 'Referer': url
174 }
175 headers.update(self.geo_verification_headers())
176
177 entries = []
178
179 RENDITIONS = ('qn=80&quality=80&type=', 'quality=2&type=mp4')
180 for num, rendition in enumerate(RENDITIONS, start=1):
181 payload = 'appkey=%s&cid=%s&otype=json&%s' % (self._APP_KEY, cid, rendition)
182 sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
183
184 video_info = self._download_json(
185 'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign),
186 video_id, note='Downloading video info page',
187 headers=headers, fatal=num == len(RENDITIONS))
188
189 if not video_info:
190 continue
191
192 if 'durl' not in video_info:
193 if num < len(RENDITIONS):
194 continue
195 self._report_error(video_info)
196
197 for idx, durl in enumerate(video_info['durl']):
198 formats = [{
199 'url': durl['url'],
200 'filesize': int_or_none(durl['size']),
201 }]
202 for backup_url in durl.get('backup_url', []):
203 formats.append({
204 'url': backup_url,
205 # backup URLs have lower priorities
206 'quality': -2 if 'hd.mp4' in backup_url else -3,
207 })
208
209 for a_format in formats:
210 a_format.setdefault('http_headers', {}).update({
211 'Referer': url,
212 })
213
214 self._sort_formats(formats)
215
216 entries.append({
217 'id': '%s_part%s' % (video_id, idx),
218 'duration': float_or_none(durl.get('length'), 1000),
219 'formats': formats,
220 })
221 break
222
223 title = self._html_search_regex(
224 (r'<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
225 r'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
226 group='title') + ('_p' + str(page_id) if page_id is not None else '')
227 description = self._html_search_meta('description', webpage)
228 timestamp = unified_timestamp(self._html_search_regex(
229 r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time',
230 default=None) or self._html_search_meta(
231 'uploadDate', webpage, 'timestamp', default=None))
232 thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
233
234 # TODO 'view_count' requires deobfuscating Javascript
235 info = {
236 'id': str(video_id) if page_id is None else '%s_p%s' % (video_id, page_id),
237 'cid': cid,
238 'title': title,
239 'description': description,
240 'timestamp': timestamp,
241 'thumbnail': thumbnail,
242 'duration': float_or_none(video_info.get('timelength'), scale=1000),
243 }
244
245 uploader_mobj = re.search(
246 r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>(?P<name>[^<]+)',
247 webpage)
248 if uploader_mobj:
249 info.update({
250 'uploader': uploader_mobj.group('name'),
251 'uploader_id': uploader_mobj.group('id'),
252 })
253
254 if not info.get('uploader'):
255 info['uploader'] = self._html_search_meta(
256 'author', webpage, 'uploader', default=None)
257
258 comments = None
259 if self._downloader.params.get('getcomments', False):
260 comments = self._get_all_comment_pages(video_id)
261
262 raw_danmaku = self._get_raw_danmaku(video_id, cid)
263
264 raw_tags = self._get_tags(video_id)
265 tags = list(map(lambda x: x['tag_name'], raw_tags))
266
267 top_level_info = {
268 'raw_danmaku': raw_danmaku,
269 'comments': comments,
270 'comment_count': len(comments) if comments is not None else None,
271 'tags': tags,
272 'raw_tags': raw_tags,
273 }
274
275 '''
276 # Requires https://github.com/m13253/danmaku2ass which is licenced under GPL3
277 # See https://github.com/animelover1984/youtube-dl
278 danmaku = NiconicoIE.CreateDanmaku(raw_danmaku, commentType='Bilibili', x=1024, y=576)
279 entries[0]['subtitles'] = {
280 'danmaku': [{
281 'ext': 'ass',
282 'data': danmaku
283 }]
284 }
285 '''
286
287 for entry in entries:
288 entry.update(info)
289
290 if len(entries) == 1:
291 entries[0].update(top_level_info)
292 return entries[0]
293 else:
294 for idx, entry in enumerate(entries):
295 entry['id'] = '%s_part%d' % (video_id, (idx + 1))
296
297 global_info = {
298 '_type': 'multi_video',
299 'id': video_id,
300 'bv_id': bv_id,
301 'title': title,
302 'description': description,
303 'entries': entries,
304 }
305
306 global_info.update(info)
307 global_info.update(top_level_info)
308
309 return global_info
310
311 def _get_video_id_set(self, id, is_bv):
312 query = {'bvid': id} if is_bv else {'aid': id}
313 response = self._download_json(
314 "http://api.bilibili.cn/x/web-interface/view",
315 id, query=query,
316 note='Grabbing original ID via API')
317
318 if response['code'] == -400:
319 raise ExtractorError('Video ID does not exist', expected=True, video_id=id)
320 elif response['code'] != 0:
321 raise ExtractorError('Unknown error occurred during API check (code %s)' % response['code'], expected=True, video_id=id)
322 return (response['data']['aid'], response['data']['bvid'])
323
324 # recursive solution to getting every page of comments for the video
325 # we can stop when we reach a page without any comments
326 def _get_all_comment_pages(self, video_id, commentPageNumber=0):
327 comment_url = "https://api.bilibili.com/x/v2/reply?jsonp=jsonp&pn=%s&type=1&oid=%s&sort=2&_=1567227301685" % (commentPageNumber, video_id)
328 json_str = self._download_webpage(
329 comment_url, video_id,
330 note='Extracting comments from page %s' % (commentPageNumber))
331 replies = json.loads(json_str)['data']['replies']
332 if replies is None:
333 return []
334 return self._get_all_children(replies) + self._get_all_comment_pages(video_id, commentPageNumber + 1)
335
336 # extracts all comments in the tree
337 def _get_all_children(self, replies):
338 if replies is None:
339 return []
340
341 ret = []
342 for reply in replies:
343 author = reply['member']['uname']
344 author_id = reply['member']['mid']
345 id = reply['rpid']
346 text = reply['content']['message']
347 timestamp = reply['ctime']
348 parent = reply['parent'] if reply['parent'] != 0 else 'root'
349
350 comment = {
351 "author": author,
352 "author_id": author_id,
353 "id": id,
354 "text": text,
355 "timestamp": timestamp,
356 "parent": parent,
357 }
358 ret.append(comment)
359
360 # from the JSON, the comment structure seems arbitrarily deep, but I could be wrong.
361 # Regardless, this should work.
362 ret += self._get_all_children(reply['replies'])
363
364 return ret
365
366 def _get_raw_danmaku(self, video_id, cid):
367 # This will be useful if I decide to scrape all pages instead of doing them individually
368 # cid_url = "https://www.bilibili.com/widget/getPageList?aid=%s" % (video_id)
369 # cid_str = self._download_webpage(cid_url, video_id, note=False)
370 # cid = json.loads(cid_str)[0]['cid']
371
372 danmaku_url = "https://comment.bilibili.com/%s.xml" % (cid)
373 danmaku = self._download_webpage(danmaku_url, video_id, note='Downloading danmaku comments')
374 return danmaku
375
376 def _get_tags(self, video_id):
377 tags_url = "https://api.bilibili.com/x/tag/archive/tags?aid=%s" % (video_id)
378 tags_json = self._download_json(tags_url, video_id, note='Downloading tags')
379 return tags_json['data']
380
381
382 class BiliBiliBangumiIE(InfoExtractor):
383 _VALID_URL = r'https?://bangumi\.bilibili\.com/anime/(?P<id>\d+)'
384
385 IE_NAME = 'bangumi.bilibili.com'
386 IE_DESC = 'BiliBili番剧'
387
388 _TESTS = [{
389 'url': 'http://bangumi.bilibili.com/anime/1869',
390 'info_dict': {
391 'id': '1869',
392 'title': '混沌武士',
393 'description': 'md5:6a9622b911565794c11f25f81d6a97d2',
394 },
395 'playlist_count': 26,
396 }, {
397 'url': 'http://bangumi.bilibili.com/anime/1869',
398 'info_dict': {
399 'id': '1869',
400 'title': '混沌武士',
401 'description': 'md5:6a9622b911565794c11f25f81d6a97d2',
402 },
403 'playlist': [{
404 'md5': '91da8621454dd58316851c27c68b0c13',
405 'info_dict': {
406 'id': '40062',
407 'ext': 'mp4',
408 'title': '混沌武士',
409 'description': '故事发生在日本的江户时代。风是一个小酒馆的打工女。一日,酒馆里来了一群恶霸,虽然他们的举动令风十分不满,但是毕竟风只是一届女流,无法对他们采取什么行动,只能在心里嘟哝。这时,酒家里又进来了个“不良份子...',
410 'timestamp': 1414538739,
411 'upload_date': '20141028',
412 'episode': '疾风怒涛 Tempestuous Temperaments',
413 'episode_number': 1,
414 },
415 }],
416 'params': {
417 'playlist_items': '1',
418 },
419 }]
420
421 @classmethod
422 def suitable(cls, url):
423 return False if BiliBiliIE.suitable(url) else super(BiliBiliBangumiIE, cls).suitable(url)
424
425 def _real_extract(self, url):
426 bangumi_id = self._match_id(url)
427
428 # Sometimes this API returns a JSONP response
429 season_info = self._download_json(
430 'http://bangumi.bilibili.com/jsonp/seasoninfo/%s.ver' % bangumi_id,
431 bangumi_id, transform_source=strip_jsonp)['result']
432
433 entries = [{
434 '_type': 'url_transparent',
435 'url': smuggle_url(episode['webplay_url'], {'no_bangumi_tip': 1}),
436 'ie_key': BiliBiliIE.ie_key(),
437 'timestamp': parse_iso8601(episode.get('update_time'), delimiter=' '),
438 'episode': episode.get('index_title'),
439 'episode_number': int_or_none(episode.get('index')),
440 } for episode in season_info['episodes']]
441
442 entries = sorted(entries, key=lambda entry: entry.get('episode_number'))
443
444 return self.playlist_result(
445 entries, bangumi_id,
446 season_info.get('bangumi_title'), season_info.get('evaluate'))
447
448
449 class BilibiliChannelIE(InfoExtractor):
450 _VALID_URL = r'https?://space.bilibili\.com/(?P<id>\d+)'
451 # May need to add support for pagination? Need to find a user with many video uploads to test
452 _API_URL = "https://api.bilibili.com/x/space/arc/search?mid=%s&pn=1&ps=25&jsonp=jsonp"
453 _TEST = {} # TODO: Add tests
454
455 def _real_extract(self, url):
456 list_id = self._match_id(url)
457 json_str = self._download_webpage(self._API_URL % list_id, "None")
458
459 json_parsed = json.loads(json_str)
460 entries = [{
461 '_type': 'url',
462 'ie_key': BiliBiliIE.ie_key(),
463 'url': ('https://www.bilibili.com/video/%s' %
464 entry['bvid']),
465 'id': entry['bvid'],
466 } for entry in json_parsed['data']['list']['vlist']]
467
468 return {
469 '_type': 'playlist',
470 'id': list_id,
471 'entries': entries
472 }
473
474
475 class BiliBiliSearchIE(SearchInfoExtractor):
476 IE_DESC = 'Bilibili video search, "bilisearch" keyword'
477 _MAX_RESULTS = 100000
478 _SEARCH_KEY = 'bilisearch'
479 MAX_NUMBER_OF_RESULTS = 1000
480
481 def _get_n_results(self, query, n):
482 """Get a specified number of results for a query"""
483
484 entries = []
485 pageNumber = 0
486 while True:
487 pageNumber += 1
488 # FIXME
489 api_url = "https://api.bilibili.com/x/web-interface/search/type?context=&page=%s&order=pubdate&keyword=%s&duration=0&tids_2=&__refresh__=true&search_type=video&tids=0&highlight=1" % (pageNumber, query)
490 json_str = self._download_webpage(
491 api_url, "None", query={"Search_key": query},
492 note='Extracting results from page %s' % pageNumber)
493 data = json.loads(json_str)['data']
494
495 # FIXME: this is hideous
496 if "result" not in data:
497 return {
498 '_type': 'playlist',
499 'id': query,
500 'entries': entries[:n]
501 }
502
503 videos = data['result']
504 for video in videos:
505 e = self.url_result(video['arcurl'], 'BiliBili', str(video['aid']))
506 entries.append(e)
507
508 if(len(entries) >= n or len(videos) >= BiliBiliSearchIE.MAX_NUMBER_OF_RESULTS):
509 return {
510 '_type': 'playlist',
511 'id': query,
512 'entries': entries[:n]
513 }
514
515
516 class BilibiliAudioBaseIE(InfoExtractor):
517 def _call_api(self, path, sid, query=None):
518 if not query:
519 query = {'sid': sid}
520 return self._download_json(
521 'https://www.bilibili.com/audio/music-service-c/web/' + path,
522 sid, query=query)['data']
523
524
525 class BilibiliAudioIE(BilibiliAudioBaseIE):
526 _VALID_URL = r'https?://(?:www\.)?bilibili\.com/audio/au(?P<id>\d+)'
527 _TEST = {
528 'url': 'https://www.bilibili.com/audio/au1003142',
529 'md5': 'fec4987014ec94ef9e666d4d158ad03b',
530 'info_dict': {
531 'id': '1003142',
532 'ext': 'm4a',
533 'title': '【tsukimi】YELLOW / 神山羊',
534 'artist': 'tsukimi',
535 'comment_count': int,
536 'description': 'YELLOW的mp3版!',
537 'duration': 183,
538 'subtitles': {
539 'origin': [{
540 'ext': 'lrc',
541 }],
542 },
543 'thumbnail': r're:^https?://.+\.jpg',
544 'timestamp': 1564836614,
545 'upload_date': '20190803',
546 'uploader': 'tsukimi-つきみぐー',
547 'view_count': int,
548 },
549 }
550
551 def _real_extract(self, url):
552 au_id = self._match_id(url)
553
554 play_data = self._call_api('url', au_id)
555 formats = [{
556 'url': play_data['cdns'][0],
557 'filesize': int_or_none(play_data.get('size')),
558 }]
559
560 song = self._call_api('song/info', au_id)
561 title = song['title']
562 statistic = song.get('statistic') or {}
563
564 subtitles = None
565 lyric = song.get('lyric')
566 if lyric:
567 subtitles = {
568 'origin': [{
569 'url': lyric,
570 }]
571 }
572
573 return {
574 'id': au_id,
575 'title': title,
576 'formats': formats,
577 'artist': song.get('author'),
578 'comment_count': int_or_none(statistic.get('comment')),
579 'description': song.get('intro'),
580 'duration': int_or_none(song.get('duration')),
581 'subtitles': subtitles,
582 'thumbnail': song.get('cover'),
583 'timestamp': int_or_none(song.get('passtime')),
584 'uploader': song.get('uname'),
585 'view_count': int_or_none(statistic.get('play')),
586 }
587
588
589 class BilibiliAudioAlbumIE(BilibiliAudioBaseIE):
590 _VALID_URL = r'https?://(?:www\.)?bilibili\.com/audio/am(?P<id>\d+)'
591 _TEST = {
592 'url': 'https://www.bilibili.com/audio/am10624',
593 'info_dict': {
594 'id': '10624',
595 'title': '每日新曲推荐(每日11:00更新)',
596 'description': '每天11:00更新,为你推送最新音乐',
597 },
598 'playlist_count': 19,
599 }
600
601 def _real_extract(self, url):
602 am_id = self._match_id(url)
603
604 songs = self._call_api(
605 'song/of-menu', am_id, {'sid': am_id, 'pn': 1, 'ps': 100})['data']
606
607 entries = []
608 for song in songs:
609 sid = str_or_none(song.get('id'))
610 if not sid:
611 continue
612 entries.append(self.url_result(
613 'https://www.bilibili.com/audio/au' + sid,
614 BilibiliAudioIE.ie_key(), sid))
615
616 if entries:
617 album_data = self._call_api('menu/info', am_id) or {}
618 album_title = album_data.get('title')
619 if album_title:
620 for entry in entries:
621 entry['album'] = album_title
622 return self.playlist_result(
623 entries, am_id, album_title, album_data.get('intro'))
624
625 return self.playlist_result(entries, am_id)
626
627
628 class BiliBiliPlayerIE(InfoExtractor):
629 _VALID_URL = r'https?://player\.bilibili\.com/player\.html\?.*?\baid=(?P<id>\d+)'
630 _TEST = {
631 'url': 'http://player.bilibili.com/player.html?aid=92494333&cid=157926707&page=1',
632 'only_matching': True,
633 }
634
635 def _real_extract(self, url):
636 video_id = self._match_id(url)
637 return self.url_result(
638 'http://www.bilibili.tv/video/av%s/' % video_id,
639 ie=BiliBiliIE.ie_key(), video_id=video_id)