]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/bilibili.py
Extract comments only when needed #95 (Closes #94)
[yt-dlp.git] / yt_dlp / extractor / bilibili.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import hashlib
5 import json
6 import re
7
8 from .common import InfoExtractor, SearchInfoExtractor
9 from ..compat import (
10 compat_parse_qs,
11 compat_urlparse,
12 )
13 from ..utils import (
14 ExtractorError,
15 int_or_none,
16 float_or_none,
17 parse_iso8601,
18 smuggle_url,
19 str_or_none,
20 strip_jsonp,
21 unified_timestamp,
22 unsmuggle_url,
23 urlencode_postdata,
24 )
25
26
27 class BiliBiliIE(InfoExtractor):
28 _VALID_URL = r'''(?x)
29 https?://
30 (?:(?:www|bangumi)\.)?
31 bilibili\.(?:tv|com)/
32 (?:
33 (?:
34 video/[aA][vV]|
35 anime/(?P<anime_id>\d+)/play\#
36 )(?P<id>\d+)|
37 video/[bB][vV](?P<id_bv>[^/?#&]+)
38 )
39 (?:/?\?p=(?P<page>\d+))?
40 '''
41
42 _TESTS = [{
43 'url': 'http://www.bilibili.com/video/av1074402/',
44 'md5': '5f7d29e1a2872f3df0cf76b1f87d3788',
45 'info_dict': {
46 'id': '1074402',
47 'ext': 'flv',
48 'title': '【金坷垃】金泡沫',
49 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
50 'duration': 308.067,
51 'timestamp': 1398012678,
52 'upload_date': '20140420',
53 'thumbnail': r're:^https?://.+\.jpg',
54 'uploader': '菊子桑',
55 'uploader_id': '156160',
56 },
57 }, {
58 # Tested in BiliBiliBangumiIE
59 'url': 'http://bangumi.bilibili.com/anime/1869/play#40062',
60 'only_matching': True,
61 }, {
62 # bilibili.tv
63 'url': 'http://www.bilibili.tv/video/av1074402/',
64 'only_matching': True,
65 }, {
66 'url': 'http://bangumi.bilibili.com/anime/5802/play#100643',
67 'md5': '3f721ad1e75030cc06faf73587cfec57',
68 'info_dict': {
69 'id': '100643',
70 'ext': 'mp4',
71 'title': 'CHAOS;CHILD',
72 'description': '如果你是神明,并且能够让妄想成为现实。那你会进行怎么样的妄想?是淫靡的世界?独裁社会?毁灭性的制裁?还是……2015年,涩谷。从6年前发生的大灾害“涩谷地震”之后复兴了的这个街区里新设立的私立高中...',
73 },
74 'skip': 'Geo-restricted to China',
75 }, {
76 # Title with double quotes
77 'url': 'http://www.bilibili.com/video/av8903802/',
78 'info_dict': {
79 'id': '8903802',
80 'title': '阿滴英文|英文歌分享#6 "Closer',
81 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
82 },
83 'playlist': [{
84 'info_dict': {
85 'id': '8903802_part1',
86 'ext': 'flv',
87 'title': '阿滴英文|英文歌分享#6 "Closer',
88 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
89 'uploader': '阿滴英文',
90 'uploader_id': '65880958',
91 'timestamp': 1488382634,
92 'upload_date': '20170301',
93 },
94 'params': {
95 'skip_download': True, # Test metadata only
96 },
97 }, {
98 'info_dict': {
99 'id': '8903802_part2',
100 'ext': 'flv',
101 'title': '阿滴英文|英文歌分享#6 "Closer',
102 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
103 'uploader': '阿滴英文',
104 'uploader_id': '65880958',
105 'timestamp': 1488382634,
106 'upload_date': '20170301',
107 },
108 'params': {
109 'skip_download': True, # Test metadata only
110 },
111 }]
112 }, {
113 # new BV video id format
114 'url': 'https://www.bilibili.com/video/BV1JE411F741',
115 'only_matching': True,
116 }]
117
118 _APP_KEY = 'iVGUTjsxvpLeuDCf'
119 _BILIBILI_KEY = 'aHRmhWMLkdeMuILqORnYZocwMBpMEOdt'
120
121 def _report_error(self, result):
122 if 'message' in result:
123 raise ExtractorError('%s said: %s' % (self.IE_NAME, result['message']), expected=True)
124 elif 'code' in result:
125 raise ExtractorError('%s returns error %d' % (self.IE_NAME, result['code']), expected=True)
126 else:
127 raise ExtractorError('Can\'t extract Bangumi episode ID')
128
129 def _real_extract(self, url):
130 url, smuggled_data = unsmuggle_url(url, {})
131
132 mobj = re.match(self._VALID_URL, url)
133 video_id = mobj.group('id_bv') or mobj.group('id')
134
135 av_id, bv_id = self._get_video_id_set(video_id, mobj.group('id_bv') is not None)
136 video_id = av_id
137
138 anime_id = mobj.group('anime_id')
139 page_id = mobj.group('page')
140 webpage = self._download_webpage(url, video_id)
141
142 if 'anime/' not in url:
143 cid = self._search_regex(
144 r'\bcid(?:["\']:|=)(\d+),["\']page(?:["\']:|=)' + str(page_id), webpage, 'cid',
145 default=None
146 ) or self._search_regex(
147 r'\bcid(?:["\']:|=)(\d+)', webpage, 'cid',
148 default=None
149 ) or compat_parse_qs(self._search_regex(
150 [r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
151 r'EmbedPlayer\([^)]+,\s*\\"([^"]+)\\"\)',
152 r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
153 webpage, 'player parameters'))['cid'][0]
154 else:
155 if 'no_bangumi_tip' not in smuggled_data:
156 self.to_screen('Downloading episode %s. To download all videos in anime %s, re-run yt-dlp with %s' % (
157 video_id, anime_id, compat_urlparse.urljoin(url, '//bangumi.bilibili.com/anime/%s' % anime_id)))
158 headers = {
159 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
160 'Referer': url
161 }
162 headers.update(self.geo_verification_headers())
163
164 js = self._download_json(
165 'http://bangumi.bilibili.com/web_api/get_source', video_id,
166 data=urlencode_postdata({'episode_id': video_id}),
167 headers=headers)
168 if 'result' not in js:
169 self._report_error(js)
170 cid = js['result']['cid']
171
172 headers = {
173 'Referer': url
174 }
175 headers.update(self.geo_verification_headers())
176
177 entries = []
178
179 RENDITIONS = ('qn=80&quality=80&type=', 'quality=2&type=mp4')
180 for num, rendition in enumerate(RENDITIONS, start=1):
181 payload = 'appkey=%s&cid=%s&otype=json&%s' % (self._APP_KEY, cid, rendition)
182 sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
183
184 video_info = self._download_json(
185 'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign),
186 video_id, note='Downloading video info page',
187 headers=headers, fatal=num == len(RENDITIONS))
188
189 if not video_info:
190 continue
191
192 if 'durl' not in video_info:
193 if num < len(RENDITIONS):
194 continue
195 self._report_error(video_info)
196
197 for idx, durl in enumerate(video_info['durl']):
198 formats = [{
199 'url': durl['url'],
200 'filesize': int_or_none(durl['size']),
201 }]
202 for backup_url in durl.get('backup_url', []):
203 formats.append({
204 'url': backup_url,
205 # backup URLs have lower priorities
206 'quality': -2 if 'hd.mp4' in backup_url else -3,
207 })
208
209 for a_format in formats:
210 a_format.setdefault('http_headers', {}).update({
211 'Referer': url,
212 })
213
214 self._sort_formats(formats)
215
216 entries.append({
217 'id': '%s_part%s' % (video_id, idx),
218 'duration': float_or_none(durl.get('length'), 1000),
219 'formats': formats,
220 })
221 break
222
223 title = self._html_search_regex(
224 (r'<h1[^>]+\btitle=(["\'])(?P<title>(?:(?!\1).)+)\1',
225 r'(?s)<h1[^>]*>(?P<title>.+?)</h1>'), webpage, 'title',
226 group='title') + ('_p' + str(page_id) if page_id is not None else '')
227 description = self._html_search_meta('description', webpage)
228 timestamp = unified_timestamp(self._html_search_regex(
229 r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time',
230 default=None) or self._html_search_meta(
231 'uploadDate', webpage, 'timestamp', default=None))
232 thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
233
234 # TODO 'view_count' requires deobfuscating Javascript
235 info = {
236 'id': str(video_id) if page_id is None else '%s_p%s' % (video_id, page_id),
237 'cid': cid,
238 'title': title,
239 'description': description,
240 'timestamp': timestamp,
241 'thumbnail': thumbnail,
242 'duration': float_or_none(video_info.get('timelength'), scale=1000),
243 }
244
245 uploader_mobj = re.search(
246 r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]*>(?P<name>[^<]+)',
247 webpage)
248 if uploader_mobj:
249 info.update({
250 'uploader': uploader_mobj.group('name'),
251 'uploader_id': uploader_mobj.group('id'),
252 })
253
254 if not info.get('uploader'):
255 info['uploader'] = self._html_search_meta(
256 'author', webpage, 'uploader', default=None)
257
258 raw_danmaku = self._get_raw_danmaku(video_id, cid)
259
260 raw_tags = self._get_tags(video_id)
261 tags = list(map(lambda x: x['tag_name'], raw_tags))
262
263 top_level_info = {
264 'raw_danmaku': raw_danmaku,
265 'tags': tags,
266 'raw_tags': raw_tags,
267 }
268 if self._downloader.params.get('getcomments', False):
269 def get_comments():
270 comments = self._get_all_comment_pages(video_id)
271 return {
272 'comments': comments,
273 'comment_count': len(comments)
274 }
275
276 top_level_info['__post_extractor'] = get_comments
277
278 '''
279 # Requires https://github.com/m13253/danmaku2ass which is licenced under GPL3
280 # See https://github.com/animelover1984/youtube-dl
281 danmaku = NiconicoIE.CreateDanmaku(raw_danmaku, commentType='Bilibili', x=1024, y=576)
282 entries[0]['subtitles'] = {
283 'danmaku': [{
284 'ext': 'ass',
285 'data': danmaku
286 }]
287 }
288 '''
289
290 for entry in entries:
291 entry.update(info)
292
293 if len(entries) == 1:
294 entries[0].update(top_level_info)
295 return entries[0]
296 else:
297 for idx, entry in enumerate(entries):
298 entry['id'] = '%s_part%d' % (video_id, (idx + 1))
299
300 global_info = {
301 '_type': 'multi_video',
302 'id': video_id,
303 'bv_id': bv_id,
304 'title': title,
305 'description': description,
306 'entries': entries,
307 }
308
309 global_info.update(info)
310 global_info.update(top_level_info)
311
312 return global_info
313
314 def _get_video_id_set(self, id, is_bv):
315 query = {'bvid': id} if is_bv else {'aid': id}
316 response = self._download_json(
317 "http://api.bilibili.cn/x/web-interface/view",
318 id, query=query,
319 note='Grabbing original ID via API')
320
321 if response['code'] == -400:
322 raise ExtractorError('Video ID does not exist', expected=True, video_id=id)
323 elif response['code'] != 0:
324 raise ExtractorError('Unknown error occurred during API check (code %s)' % response['code'], expected=True, video_id=id)
325 return (response['data']['aid'], response['data']['bvid'])
326
327 # recursive solution to getting every page of comments for the video
328 # we can stop when we reach a page without any comments
329 def _get_all_comment_pages(self, video_id, commentPageNumber=0):
330 comment_url = "https://api.bilibili.com/x/v2/reply?jsonp=jsonp&pn=%s&type=1&oid=%s&sort=2&_=1567227301685" % (commentPageNumber, video_id)
331 json_str = self._download_webpage(
332 comment_url, video_id,
333 note='Extracting comments from page %s' % (commentPageNumber))
334 replies = json.loads(json_str)['data']['replies']
335 if replies is None:
336 return []
337 return self._get_all_children(replies) + self._get_all_comment_pages(video_id, commentPageNumber + 1)
338
339 # extracts all comments in the tree
340 def _get_all_children(self, replies):
341 if replies is None:
342 return []
343
344 ret = []
345 for reply in replies:
346 author = reply['member']['uname']
347 author_id = reply['member']['mid']
348 id = reply['rpid']
349 text = reply['content']['message']
350 timestamp = reply['ctime']
351 parent = reply['parent'] if reply['parent'] != 0 else 'root'
352
353 comment = {
354 "author": author,
355 "author_id": author_id,
356 "id": id,
357 "text": text,
358 "timestamp": timestamp,
359 "parent": parent,
360 }
361 ret.append(comment)
362
363 # from the JSON, the comment structure seems arbitrarily deep, but I could be wrong.
364 # Regardless, this should work.
365 ret += self._get_all_children(reply['replies'])
366
367 return ret
368
369 def _get_raw_danmaku(self, video_id, cid):
370 # This will be useful if I decide to scrape all pages instead of doing them individually
371 # cid_url = "https://www.bilibili.com/widget/getPageList?aid=%s" % (video_id)
372 # cid_str = self._download_webpage(cid_url, video_id, note=False)
373 # cid = json.loads(cid_str)[0]['cid']
374
375 danmaku_url = "https://comment.bilibili.com/%s.xml" % (cid)
376 danmaku = self._download_webpage(danmaku_url, video_id, note='Downloading danmaku comments')
377 return danmaku
378
379 def _get_tags(self, video_id):
380 tags_url = "https://api.bilibili.com/x/tag/archive/tags?aid=%s" % (video_id)
381 tags_json = self._download_json(tags_url, video_id, note='Downloading tags')
382 return tags_json['data']
383
384
385 class BiliBiliBangumiIE(InfoExtractor):
386 _VALID_URL = r'https?://bangumi\.bilibili\.com/anime/(?P<id>\d+)'
387
388 IE_NAME = 'bangumi.bilibili.com'
389 IE_DESC = 'BiliBili番剧'
390
391 _TESTS = [{
392 'url': 'http://bangumi.bilibili.com/anime/1869',
393 'info_dict': {
394 'id': '1869',
395 'title': '混沌武士',
396 'description': 'md5:6a9622b911565794c11f25f81d6a97d2',
397 },
398 'playlist_count': 26,
399 }, {
400 'url': 'http://bangumi.bilibili.com/anime/1869',
401 'info_dict': {
402 'id': '1869',
403 'title': '混沌武士',
404 'description': 'md5:6a9622b911565794c11f25f81d6a97d2',
405 },
406 'playlist': [{
407 'md5': '91da8621454dd58316851c27c68b0c13',
408 'info_dict': {
409 'id': '40062',
410 'ext': 'mp4',
411 'title': '混沌武士',
412 'description': '故事发生在日本的江户时代。风是一个小酒馆的打工女。一日,酒馆里来了一群恶霸,虽然他们的举动令风十分不满,但是毕竟风只是一届女流,无法对他们采取什么行动,只能在心里嘟哝。这时,酒家里又进来了个“不良份子...',
413 'timestamp': 1414538739,
414 'upload_date': '20141028',
415 'episode': '疾风怒涛 Tempestuous Temperaments',
416 'episode_number': 1,
417 },
418 }],
419 'params': {
420 'playlist_items': '1',
421 },
422 }]
423
424 @classmethod
425 def suitable(cls, url):
426 return False if BiliBiliIE.suitable(url) else super(BiliBiliBangumiIE, cls).suitable(url)
427
428 def _real_extract(self, url):
429 bangumi_id = self._match_id(url)
430
431 # Sometimes this API returns a JSONP response
432 season_info = self._download_json(
433 'http://bangumi.bilibili.com/jsonp/seasoninfo/%s.ver' % bangumi_id,
434 bangumi_id, transform_source=strip_jsonp)['result']
435
436 entries = [{
437 '_type': 'url_transparent',
438 'url': smuggle_url(episode['webplay_url'], {'no_bangumi_tip': 1}),
439 'ie_key': BiliBiliIE.ie_key(),
440 'timestamp': parse_iso8601(episode.get('update_time'), delimiter=' '),
441 'episode': episode.get('index_title'),
442 'episode_number': int_or_none(episode.get('index')),
443 } for episode in season_info['episodes']]
444
445 entries = sorted(entries, key=lambda entry: entry.get('episode_number'))
446
447 return self.playlist_result(
448 entries, bangumi_id,
449 season_info.get('bangumi_title'), season_info.get('evaluate'))
450
451
452 class BilibiliChannelIE(InfoExtractor):
453 _VALID_URL = r'https?://space.bilibili\.com/(?P<id>\d+)'
454 # May need to add support for pagination? Need to find a user with many video uploads to test
455 _API_URL = "https://api.bilibili.com/x/space/arc/search?mid=%s&pn=1&ps=25&jsonp=jsonp"
456 _TEST = {} # TODO: Add tests
457
458 def _real_extract(self, url):
459 list_id = self._match_id(url)
460 json_str = self._download_webpage(self._API_URL % list_id, "None")
461
462 json_parsed = json.loads(json_str)
463 entries = [{
464 '_type': 'url',
465 'ie_key': BiliBiliIE.ie_key(),
466 'url': ('https://www.bilibili.com/video/%s' %
467 entry['bvid']),
468 'id': entry['bvid'],
469 } for entry in json_parsed['data']['list']['vlist']]
470
471 return {
472 '_type': 'playlist',
473 'id': list_id,
474 'entries': entries
475 }
476
477
478 class BiliBiliSearchIE(SearchInfoExtractor):
479 IE_DESC = 'Bilibili video search, "bilisearch" keyword'
480 _MAX_RESULTS = 100000
481 _SEARCH_KEY = 'bilisearch'
482 MAX_NUMBER_OF_RESULTS = 1000
483
484 def _get_n_results(self, query, n):
485 """Get a specified number of results for a query"""
486
487 entries = []
488 pageNumber = 0
489 while True:
490 pageNumber += 1
491 # FIXME
492 api_url = "https://api.bilibili.com/x/web-interface/search/type?context=&page=%s&order=pubdate&keyword=%s&duration=0&tids_2=&__refresh__=true&search_type=video&tids=0&highlight=1" % (pageNumber, query)
493 json_str = self._download_webpage(
494 api_url, "None", query={"Search_key": query},
495 note='Extracting results from page %s' % pageNumber)
496 data = json.loads(json_str)['data']
497
498 # FIXME: this is hideous
499 if "result" not in data:
500 return {
501 '_type': 'playlist',
502 'id': query,
503 'entries': entries[:n]
504 }
505
506 videos = data['result']
507 for video in videos:
508 e = self.url_result(video['arcurl'], 'BiliBili', str(video['aid']))
509 entries.append(e)
510
511 if(len(entries) >= n or len(videos) >= BiliBiliSearchIE.MAX_NUMBER_OF_RESULTS):
512 return {
513 '_type': 'playlist',
514 'id': query,
515 'entries': entries[:n]
516 }
517
518
519 class BilibiliAudioBaseIE(InfoExtractor):
520 def _call_api(self, path, sid, query=None):
521 if not query:
522 query = {'sid': sid}
523 return self._download_json(
524 'https://www.bilibili.com/audio/music-service-c/web/' + path,
525 sid, query=query)['data']
526
527
528 class BilibiliAudioIE(BilibiliAudioBaseIE):
529 _VALID_URL = r'https?://(?:www\.)?bilibili\.com/audio/au(?P<id>\d+)'
530 _TEST = {
531 'url': 'https://www.bilibili.com/audio/au1003142',
532 'md5': 'fec4987014ec94ef9e666d4d158ad03b',
533 'info_dict': {
534 'id': '1003142',
535 'ext': 'm4a',
536 'title': '【tsukimi】YELLOW / 神山羊',
537 'artist': 'tsukimi',
538 'comment_count': int,
539 'description': 'YELLOW的mp3版!',
540 'duration': 183,
541 'subtitles': {
542 'origin': [{
543 'ext': 'lrc',
544 }],
545 },
546 'thumbnail': r're:^https?://.+\.jpg',
547 'timestamp': 1564836614,
548 'upload_date': '20190803',
549 'uploader': 'tsukimi-つきみぐー',
550 'view_count': int,
551 },
552 }
553
554 def _real_extract(self, url):
555 au_id = self._match_id(url)
556
557 play_data = self._call_api('url', au_id)
558 formats = [{
559 'url': play_data['cdns'][0],
560 'filesize': int_or_none(play_data.get('size')),
561 }]
562
563 song = self._call_api('song/info', au_id)
564 title = song['title']
565 statistic = song.get('statistic') or {}
566
567 subtitles = None
568 lyric = song.get('lyric')
569 if lyric:
570 subtitles = {
571 'origin': [{
572 'url': lyric,
573 }]
574 }
575
576 return {
577 'id': au_id,
578 'title': title,
579 'formats': formats,
580 'artist': song.get('author'),
581 'comment_count': int_or_none(statistic.get('comment')),
582 'description': song.get('intro'),
583 'duration': int_or_none(song.get('duration')),
584 'subtitles': subtitles,
585 'thumbnail': song.get('cover'),
586 'timestamp': int_or_none(song.get('passtime')),
587 'uploader': song.get('uname'),
588 'view_count': int_or_none(statistic.get('play')),
589 }
590
591
592 class BilibiliAudioAlbumIE(BilibiliAudioBaseIE):
593 _VALID_URL = r'https?://(?:www\.)?bilibili\.com/audio/am(?P<id>\d+)'
594 _TEST = {
595 'url': 'https://www.bilibili.com/audio/am10624',
596 'info_dict': {
597 'id': '10624',
598 'title': '每日新曲推荐(每日11:00更新)',
599 'description': '每天11:00更新,为你推送最新音乐',
600 },
601 'playlist_count': 19,
602 }
603
604 def _real_extract(self, url):
605 am_id = self._match_id(url)
606
607 songs = self._call_api(
608 'song/of-menu', am_id, {'sid': am_id, 'pn': 1, 'ps': 100})['data']
609
610 entries = []
611 for song in songs:
612 sid = str_or_none(song.get('id'))
613 if not sid:
614 continue
615 entries.append(self.url_result(
616 'https://www.bilibili.com/audio/au' + sid,
617 BilibiliAudioIE.ie_key(), sid))
618
619 if entries:
620 album_data = self._call_api('menu/info', am_id) or {}
621 album_title = album_data.get('title')
622 if album_title:
623 for entry in entries:
624 entry['album'] = album_title
625 return self.playlist_result(
626 entries, am_id, album_title, album_data.get('intro'))
627
628 return self.playlist_result(entries, am_id)
629
630
631 class BiliBiliPlayerIE(InfoExtractor):
632 _VALID_URL = r'https?://player\.bilibili\.com/player\.html\?.*?\baid=(?P<id>\d+)'
633 _TEST = {
634 'url': 'http://player.bilibili.com/player.html?aid=92494333&cid=157926707&page=1',
635 'only_matching': True,
636 }
637
638 def _real_extract(self, url):
639 video_id = self._match_id(url)
640 return self.url_result(
641 'http://www.bilibili.tv/video/av%s/' % video_id,
642 ie=BiliBiliIE.ie_key(), video_id=video_id)