8 from .common
import InfoExtractor
, SearchInfoExtractor
9 from ..dependencies
import Cryptodome
26 srt_subtitles_timecode
,
35 class BilibiliBaseIE(InfoExtractor
):
36 def extract_formats(self
, play_info
):
38 r
['quality']: traverse_obj(r
, 'new_description', 'display_desc')
39 for r
in traverse_obj(play_info
, ('support_formats', lambda _
, v
: v
['quality']))
42 audios
= traverse_obj(play_info
, ('dash', 'audio', ...))
43 flac_audio
= traverse_obj(play_info
, ('dash', 'flac', 'audio'))
45 audios
.append(flac_audio
)
47 'url': traverse_obj(audio
, 'baseUrl', 'base_url', 'url'),
48 'ext': mimetype2ext(traverse_obj(audio
, 'mimeType', 'mime_type')),
49 'acodec': audio
.get('codecs'),
51 'tbr': float_or_none(audio
.get('bandwidth'), scale
=1000),
52 'filesize': int_or_none(audio
.get('size'))
53 } for audio
in audios
]
56 'url': traverse_obj(video
, 'baseUrl', 'base_url', 'url'),
57 'ext': mimetype2ext(traverse_obj(video
, 'mimeType', 'mime_type')),
58 'fps': float_or_none(traverse_obj(video
, 'frameRate', 'frame_rate')),
59 'width': int_or_none(video
.get('width')),
60 'height': int_or_none(video
.get('height')),
61 'vcodec': video
.get('codecs'),
62 'acodec': 'none' if audios
else None,
63 'tbr': float_or_none(video
.get('bandwidth'), scale
=1000),
64 'filesize': int_or_none(video
.get('size')),
65 'quality': int_or_none(video
.get('id')),
66 'format': format_names
.get(video
.get('id')),
67 } for video
in traverse_obj(play_info
, ('dash', 'video', ...)))
69 missing_formats
= format_names
.keys() - set(traverse_obj(formats
, (..., 'quality')))
71 self
.to_screen(f
'Format(s) {", ".join(format_names[i] for i in missing_formats)} are missing; '
72 f
'you have to login or become premium member to download them. {self._login_hint()}')
76 def json2srt(self
, json_data
):
78 for idx
, line
in enumerate(json_data
.get('body') or []):
79 srt_data
+= (f
'{idx + 1}\n'
80 f
'{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n'
81 f
'{line["content"]}\n\n')
84 def _get_subtitles(self
, video_id
, initial_state
, cid
):
88 'url': f
'https://comment.bilibili.com/{cid}.xml',
92 for s
in traverse_obj(initial_state
, ('videoData', 'subtitle', 'list')) or []:
93 subtitles
.setdefault(s
['lan'], []).append({
95 'data': self
.json2srt(self
._download
_json
(s
['subtitle_url'], video_id
))
99 def _get_chapters(self
, aid
, cid
):
100 chapters
= aid
and cid
and self
._download
_json
(
101 'https://api.bilibili.com/x/player/v2', aid
, query
={'aid': aid, 'cid': cid}
,
102 note
='Extracting chapters', fatal
=False)
103 return traverse_obj(chapters
, ('data', 'view_points', ..., {
105 'start_time': 'from',
109 def _get_comments(self
, aid
):
110 for idx
in itertools
.count(1):
111 replies
= traverse_obj(
113 f
'https://api.bilibili.com/x/v2/reply?pn={idx}&oid={aid}&type=1&jsonp=jsonp&sort=2&_=1567227301685',
114 aid
, note
=f
'Extracting comments from page {idx}', fatal
=False),
118 for children
in map(self
._get
_all
_children
, replies
):
121 def _get_all_children(self
, reply
):
123 'author': traverse_obj(reply
, ('member', 'uname')),
124 'author_id': traverse_obj(reply
, ('member', 'mid')),
125 'id': reply
.get('rpid'),
126 'text': traverse_obj(reply
, ('content', 'message')),
127 'timestamp': reply
.get('ctime'),
128 'parent': reply
.get('parent') or 'root',
130 for children
in map(self
._get
_all
_children
, traverse_obj(reply
, ('replies', ...))):
134 class BiliBiliIE(BilibiliBaseIE
):
135 _VALID_URL
= r
'https?://www\.bilibili\.com/video/[aAbB][vV](?P<id>[^/?#&]+)'
138 'url': 'https://www.bilibili.com/video/BV13x41117TL',
140 'id': 'BV13x41117TL',
141 'title': '阿滴英文|英文歌分享#6 "Closer',
143 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
144 'uploader_id': '65880958',
146 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
149 'comment_count': int,
150 'upload_date': '20170301',
151 'timestamp': 1488353834,
157 'url': 'http://www.bilibili.com/video/av1074402/',
159 'thumbnail': r
're:^https?://.*\.(jpg|jpeg)$',
162 'uploader_id': '156160',
163 'id': 'BV11x411K7CN',
166 'upload_date': '20140420',
167 'timestamp': 1397983878,
168 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
170 'comment_count': int,
174 'params': {'skip_download': True}
,
177 'url': 'https://www.bilibili.com/video/BV1bK411W797',
179 'id': 'BV1bK411W797',
180 'title': '物语中的人物是如何吐槽自己的OP的'
182 'playlist_count': 18,
185 'id': 'BV1bK411W797_p1',
187 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川',
189 'timestamp': 1589601697,
190 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
191 'uploader': '打牌还是打桩',
192 'uploader_id': '150259984',
194 'comment_count': int,
195 'upload_date': '20200516',
197 'description': 'md5:e3c401cf7bc363118d1783dd74068a68',
202 'note': 'Specific page of Anthology',
203 'url': 'https://www.bilibili.com/video/BV1bK411W797?p=1',
205 'id': 'BV1bK411W797_p1',
207 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川',
209 'timestamp': 1589601697,
210 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
211 'uploader': '打牌还是打桩',
212 'uploader_id': '150259984',
214 'comment_count': int,
215 'upload_date': '20200516',
217 'description': 'md5:e3c401cf7bc363118d1783dd74068a68',
221 'note': 'video has subtitles',
222 'url': 'https://www.bilibili.com/video/BV12N4y1M7rh',
224 'id': 'BV12N4y1M7rh',
226 'title': 'md5:96e8bb42c2b432c0d4ce3434a61479c1',
228 'description': 'md5:afde2b7ba9025c01d9e3dde10de221e4',
230 'upload_date': '20220709',
231 'uploader': '小夫Tech',
232 'timestamp': 1657347907,
233 'uploader_id': '1326814124',
234 'comment_count': int,
237 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
238 'subtitles': 'count:2'
240 'params': {'listsubtitles': True}
,
242 'url': 'https://www.bilibili.com/video/av8903802/',
244 'id': 'BV13x41117TL',
246 'title': '阿滴英文|英文歌分享#6 "Closer',
247 'upload_date': '20170301',
248 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
249 'timestamp': 1488353834,
250 'uploader_id': '65880958',
252 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
255 'comment_count': int,
260 'skip_download': True,
263 'note': 'video has chapter',
264 'url': 'https://www.bilibili.com/video/BV1vL411G7N7/',
266 'id': 'BV1vL411G7N7',
268 'title': '如何为你的B站视频添加进度条分段',
269 'timestamp': 1634554558,
270 'upload_date': '20211018',
271 'description': 'md5:a9a3d6702b3a94518d419b2e9c320a6d',
273 'uploader': '爱喝咖啡的当麻',
275 'uploader_id': '1680903',
276 'chapters': 'count:6',
277 'comment_count': int,
280 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
282 'params': {'skip_download': True}
,
285 def _real_extract(self
, url
):
286 video_id
= self
._match
_id
(url
)
287 webpage
= self
._download
_webpage
(url
, video_id
)
288 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial state', video_id
)
289 play_info
= self
._search
_json
(r
'window\.__playinfo__\s*=', webpage
, 'play info', video_id
)['data']
291 video_data
= initial_state
['videoData']
292 video_id
, title
= video_data
['bvid'], video_data
.get('title')
294 # Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself.
295 page_list_json
= traverse_obj(
297 'https://api.bilibili.com/x/player/pagelist', video_id
,
298 fatal
=False, query
={'bvid': video_id, 'jsonp': 'jsonp'}
,
299 note
='Extracting videos in anthology'),
300 'data', expected_type
=list) or []
301 is_anthology
= len(page_list_json
) > 1
303 part_id
= int_or_none(parse_qs(url
).get('p', [None])[-1])
304 if is_anthology
and not part_id
and self
._yes
_playlist
(video_id
, video_id
):
305 return self
.playlist_from_matches(
306 page_list_json
, video_id
, title
, ie
=BiliBiliIE
,
307 getter
=lambda entry
: f
'https://www.bilibili.com/video/{video_id}?p={entry["page"]}')
310 part_id
= part_id
or 1
311 title
+= f
' p{part_id:02d} {traverse_obj(page_list_json, (part_id - 1, "part")) or ""}'
313 aid
= video_data
.get('aid')
314 old_video_id
= format_field(aid
, None, f
'%s_part{part_id or 1}')
316 cid
= traverse_obj(video_data
, ('pages', part_id
- 1, 'cid')) if part_id
else video_data
.get('cid')
319 'id': f
'{video_id}{format_field(part_id, None, "_p%d")}',
320 'formats': self
.extract_formats(play_info
),
321 '_old_archive_ids': [make_archive_id(self
, old_video_id
)] if old_video_id
else None,
323 'description': traverse_obj(initial_state
, ('videoData', 'desc')),
324 'view_count': traverse_obj(initial_state
, ('videoData', 'stat', 'view')),
325 'uploader': traverse_obj(initial_state
, ('upData', 'name')),
326 'uploader_id': traverse_obj(initial_state
, ('upData', 'mid')),
327 'like_count': traverse_obj(initial_state
, ('videoData', 'stat', 'like')),
328 'comment_count': traverse_obj(initial_state
, ('videoData', 'stat', 'reply')),
329 'tags': traverse_obj(initial_state
, ('tags', ..., 'tag_name')),
330 'thumbnail': traverse_obj(initial_state
, ('videoData', 'pic')),
331 'timestamp': traverse_obj(initial_state
, ('videoData', 'pubdate')),
332 'duration': float_or_none(play_info
.get('timelength'), scale
=1000),
333 'chapters': self
._get
_chapters
(aid
, cid
),
334 'subtitles': self
.extract_subtitles(video_id
, initial_state
, cid
),
335 '__post_extractor': self
.extract_comments(aid
),
336 'http_headers': {'Referer': url}
,
340 class BiliBiliBangumiIE(BilibiliBaseIE
):
341 _VALID_URL
= r
'(?x)https?://www\.bilibili\.com/bangumi/play/(?P<id>(?:ss|ep)\d+)'
344 'url': 'https://www.bilibili.com/bangumi/play/ss897',
354 'title': '神的记事本:第2话 你与旅行包',
355 'duration': 1428.487,
356 'timestamp': 1310809380,
357 'upload_date': '20110716',
358 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
361 'url': 'https://www.bilibili.com/bangumi/play/ep508406',
362 'only_matching': True,
365 def _real_extract(self
, url
):
366 video_id
= self
._match
_id
(url
)
367 webpage
= self
._download
_webpage
(url
, video_id
)
369 if '您所在的地区无法观看本片' in webpage
:
370 raise GeoRestrictedError('This video is restricted')
371 elif ('开通大会员观看' in webpage
and '__playinfo__' not in webpage
372 or '正在观看预览,大会员免费看全片' in webpage
):
373 self
.raise_login_required('This video is for premium members only')
375 play_info
= self
._search
_json
(r
'window\.__playinfo__\s*=', webpage
, 'play info', video_id
)['data']
376 formats
= self
.extract_formats(play_info
)
377 if (not formats
and '成为大会员抢先看' in webpage
378 and play_info
.get('durl') and not play_info
.get('dash')):
379 self
.raise_login_required('This video is for premium members only')
381 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial state', video_id
)
383 season_id
= traverse_obj(initial_state
, ('mediaInfo', 'season_id'))
384 season_number
= season_id
and next((
385 idx
+ 1 for idx
, e
in enumerate(
386 traverse_obj(initial_state
, ('mediaInfo', 'seasons', ...)))
387 if e
.get('season_id') == season_id
393 'title': traverse_obj(initial_state
, 'h1Title'),
394 'episode': traverse_obj(initial_state
, ('epInfo', 'long_title')),
395 'episode_number': int_or_none(traverse_obj(initial_state
, ('epInfo', 'title'))),
396 'series': traverse_obj(initial_state
, ('mediaInfo', 'series')),
397 'season': traverse_obj(initial_state
, ('mediaInfo', 'season_title')),
398 'season_id': season_id
,
399 'season_number': season_number
,
400 'thumbnail': traverse_obj(initial_state
, ('epInfo', 'cover')),
401 'timestamp': traverse_obj(initial_state
, ('epInfo', 'pub_time')),
402 'duration': float_or_none(play_info
.get('timelength'), scale
=1000),
403 'subtitles': self
.extract_subtitles(
404 video_id
, initial_state
, traverse_obj(initial_state
, ('epInfo', 'cid'))),
405 '__post_extractor': self
.extract_comments(traverse_obj(initial_state
, ('epInfo', 'aid'))),
406 'http_headers': {'Referer': url, **self.geo_verification_headers()}
,
410 class BiliBiliBangumiMediaIE(InfoExtractor
):
411 _VALID_URL
= r
'https?://www\.bilibili\.com/bangumi/media/md(?P<id>\d+)'
413 'url': 'https://www.bilibili.com/bangumi/media/md24097891',
417 'playlist_mincount': 25,
420 def _real_extract(self
, url
):
421 media_id
= self
._match
_id
(url
)
422 webpage
= self
._download
_webpage
(url
, media_id
)
424 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial_state', media_id
)
425 episode_list
= self
._download
_json
(
426 'https://api.bilibili.com/pgc/web/season/section', media_id
,
427 query
={'season_id': initial_state['mediaInfo']['season_id']}
,
428 note
='Downloading season info')['result']['main_section']['episodes']
430 return self
.playlist_result((
431 self
.url_result(entry
['share_url'], BiliBiliBangumiIE
, entry
['aid'])
432 for entry
in episode_list
), media_id
)
435 class BilibiliSpaceBaseIE(InfoExtractor
):
436 def _extract_playlist(self
, fetch_page
, get_metadata
, get_entries
):
437 first_page
= fetch_page(0)
438 metadata
= get_metadata(first_page
)
440 paged_list
= InAdvancePagedList(
441 lambda idx
: get_entries(fetch_page(idx
) if idx
else first_page
),
442 metadata
['page_count'], metadata
['page_size'])
444 return metadata
, paged_list
447 class BilibiliSpaceVideoIE(BilibiliSpaceBaseIE
):
448 _VALID_URL
= r
'https?://space\.bilibili\.com/(?P<id>\d+)(?P<video>/video)?/?(?:[?#]|$)'
450 'url': 'https://space.bilibili.com/3985676/video',
454 'playlist_mincount': 178,
457 def _real_extract(self
, url
):
458 playlist_id
, is_video_url
= self
._match
_valid
_url
(url
).group('id', 'video')
460 self
.to_screen('A channel URL was given. Only the channel\'s videos will be downloaded. '
461 'To download audios, add a "/audio" to the URL')
463 def fetch_page(page_idx
):
465 response
= self
._download
_json
('https://api.bilibili.com/x/space/arc/search',
466 playlist_id
, note
=f
'Downloading page {page_idx}',
467 query
={'mid': playlist_id, 'pn': page_idx + 1, 'jsonp': 'jsonp'}
)
468 except ExtractorError
as e
:
469 if isinstance(e
.cause
, urllib
.error
.HTTPError
) and e
.cause
.code
== 412:
470 raise ExtractorError(
471 'Request is blocked by server (412), please add cookies, wait and try later.', expected
=True)
473 if response
['code'] == -401:
474 raise ExtractorError(
475 'Request is blocked by server (401), please add cookies, wait and try later.', expected
=True)
476 return response
['data']
478 def get_metadata(page_data
):
479 page_size
= page_data
['page']['ps']
480 entry_count
= page_data
['page']['count']
482 'page_count': math
.ceil(entry_count
/ page_size
),
483 'page_size': page_size
,
486 def get_entries(page_data
):
487 for entry
in traverse_obj(page_data
, ('list', 'vlist')) or []:
488 yield self
.url_result(f
'https://www.bilibili.com/video/{entry["bvid"]}', BiliBiliIE
, entry
['bvid'])
490 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
491 return self
.playlist_result(paged_list
, playlist_id
)
494 class BilibiliSpaceAudioIE(BilibiliSpaceBaseIE
):
495 _VALID_URL
= r
'https?://space\.bilibili\.com/(?P<id>\d+)/audio'
497 'url': 'https://space.bilibili.com/3985676/audio',
501 'playlist_mincount': 1,
504 def _real_extract(self
, url
):
505 playlist_id
= self
._match
_id
(url
)
507 def fetch_page(page_idx
):
508 return self
._download
_json
(
509 'https://api.bilibili.com/audio/music-service/web/song/upper', playlist_id
,
510 note
=f
'Downloading page {page_idx}',
511 query
={'uid': playlist_id, 'pn': page_idx + 1, 'ps': 30, 'order': 1, 'jsonp': 'jsonp'}
)['data']
513 def get_metadata(page_data
):
515 'page_count': page_data
['pageCount'],
516 'page_size': page_data
['pageSize'],
519 def get_entries(page_data
):
520 for entry
in page_data
.get('data', []):
521 yield self
.url_result(f
'https://www.bilibili.com/audio/au{entry["id"]}', BilibiliAudioIE
, entry
['id'])
523 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
524 return self
.playlist_result(paged_list
, playlist_id
)
527 class BilibiliSpacePlaylistIE(BilibiliSpaceBaseIE
):
528 _VALID_URL
= r
'https?://space.bilibili\.com/(?P<mid>\d+)/channel/collectiondetail\?sid=(?P<sid>\d+)'
530 'url': 'https://space.bilibili.com/2142762/channel/collectiondetail?sid=57445',
532 'id': '2142762_57445',
535 'playlist_mincount': 31,
538 def _real_extract(self
, url
):
539 mid
, sid
= self
._match
_valid
_url
(url
).group('mid', 'sid')
540 playlist_id
= f
'{mid}_{sid}'
542 def fetch_page(page_idx
):
543 return self
._download
_json
(
544 'https://api.bilibili.com/x/polymer/space/seasons_archives_list',
545 playlist_id
, note
=f
'Downloading page {page_idx}',
546 query
={'mid': mid, 'season_id': sid, 'page_num': page_idx + 1, 'page_size': 30}
)['data']
548 def get_metadata(page_data
):
549 page_size
= page_data
['page']['page_size']
550 entry_count
= page_data
['page']['total']
552 'page_count': math
.ceil(entry_count
/ page_size
),
553 'page_size': page_size
,
554 'title': traverse_obj(page_data
, ('meta', 'name'))
557 def get_entries(page_data
):
558 for entry
in page_data
.get('archives', []):
559 yield self
.url_result(f
'https://www.bilibili.com/video/{entry["bvid"]}',
560 BiliBiliIE
, entry
['bvid'])
562 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
563 return self
.playlist_result(paged_list
, playlist_id
, metadata
['title'])
566 class BilibiliCategoryIE(InfoExtractor
):
567 IE_NAME
= 'Bilibili category extractor'
568 _MAX_RESULTS
= 1000000
569 _VALID_URL
= r
'https?://www\.bilibili\.com/v/[a-zA-Z]+\/[a-zA-Z]+'
571 'url': 'https://www.bilibili.com/v/kichiku/mad',
573 'id': 'kichiku: mad',
574 'title': 'kichiku: mad'
576 'playlist_mincount': 45,
582 def _fetch_page(self
, api_url
, num_pages
, query
, page_num
):
583 parsed_json
= self
._download
_json
(
584 api_url
, query
, query
={'Search_key': query, 'pn': page_num}
,
585 note
='Extracting results from page %s of %s' % (page_num
, num_pages
))
587 video_list
= traverse_obj(parsed_json
, ('data', 'archives'), expected_type
=list)
589 raise ExtractorError('Failed to retrieve video list for page %d' % page_num
)
591 for video
in video_list
:
592 yield self
.url_result(
593 'https://www.bilibili.com/video/%s' % video
['bvid'], 'BiliBili', video
['bvid'])
595 def _entries(self
, category
, subcategory
, query
):
596 # map of categories : subcategories : RIDs
600 'manual_vocaloid': 126,
607 if category
not in rid_map
:
608 raise ExtractorError(
609 f
'The category {category} isn\'t supported. Supported categories: {list(rid_map.keys())}')
610 if subcategory
not in rid_map
[category
]:
611 raise ExtractorError(
612 f
'The subcategory {subcategory} isn\'t supported for this category. Supported subcategories: {list(rid_map[category].keys())}')
613 rid_value
= rid_map
[category
][subcategory
]
615 api_url
= 'https://api.bilibili.com/x/web-interface/newlist?rid=%d&type=1&ps=20&jsonp=jsonp' % rid_value
616 page_json
= self
._download
_json
(api_url
, query
, query
={'Search_key': query, 'pn': '1'}
)
617 page_data
= traverse_obj(page_json
, ('data', 'page'), expected_type
=dict)
618 count
, size
= int_or_none(page_data
.get('count')), int_or_none(page_data
.get('size'))
619 if count
is None or not size
:
620 raise ExtractorError('Failed to calculate either page count or size')
622 num_pages
= math
.ceil(count
/ size
)
624 return OnDemandPagedList(functools
.partial(
625 self
._fetch
_page
, api_url
, num_pages
, query
), size
)
627 def _real_extract(self
, url
):
628 category
, subcategory
= urllib
.parse
.urlparse(url
).path
.split('/')[2:4]
629 query
= '%s: %s' % (category
, subcategory
)
631 return self
.playlist_result(self
._entries
(category
, subcategory
, query
), query
, query
)
634 class BiliBiliSearchIE(SearchInfoExtractor
):
635 IE_DESC
= 'Bilibili video search'
636 _MAX_RESULTS
= 100000
637 _SEARCH_KEY
= 'bilisearch'
639 def _search_results(self
, query
):
640 for page_num
in itertools
.count(1):
641 videos
= self
._download
_json
(
642 'https://api.bilibili.com/x/web-interface/search/type', query
,
643 note
=f
'Extracting results from page {page_num}', query
={
650 '__refresh__': 'true',
651 'search_type': 'video',
654 })['data'].get('result')
658 yield self
.url_result(video
['arcurl'], 'BiliBili', str(video
['aid']))
661 class BilibiliAudioBaseIE(InfoExtractor
):
662 def _call_api(self
, path
, sid
, query
=None):
665 return self
._download
_json
(
666 'https://www.bilibili.com/audio/music-service-c/web/' + path
,
667 sid
, query
=query
)['data']
670 class BilibiliAudioIE(BilibiliAudioBaseIE
):
671 _VALID_URL
= r
'https?://(?:www\.)?bilibili\.com/audio/au(?P<id>\d+)'
673 'url': 'https://www.bilibili.com/audio/au1003142',
674 'md5': 'fec4987014ec94ef9e666d4d158ad03b',
678 'title': '【tsukimi】YELLOW / 神山羊',
680 'comment_count': int,
681 'description': 'YELLOW的mp3版!',
688 'thumbnail': r
're:^https?://.+\.jpg',
689 'timestamp': 1564836614,
690 'upload_date': '20190803',
691 'uploader': 'tsukimi-つきみぐー',
696 def _real_extract(self
, url
):
697 au_id
= self
._match
_id
(url
)
699 play_data
= self
._call
_api
('url', au_id
)
701 'url': play_data
['cdns'][0],
702 'filesize': int_or_none(play_data
.get('size')),
706 for a_format
in formats
:
707 a_format
.setdefault('http_headers', {}).update({
711 song
= self
._call
_api
('song/info', au_id
)
712 title
= song
['title']
713 statistic
= song
.get('statistic') or {}
716 lyric
= song
.get('lyric')
728 'artist': song
.get('author'),
729 'comment_count': int_or_none(statistic
.get('comment')),
730 'description': song
.get('intro'),
731 'duration': int_or_none(song
.get('duration')),
732 'subtitles': subtitles
,
733 'thumbnail': song
.get('cover'),
734 'timestamp': int_or_none(song
.get('passtime')),
735 'uploader': song
.get('uname'),
736 'view_count': int_or_none(statistic
.get('play')),
740 class BilibiliAudioAlbumIE(BilibiliAudioBaseIE
):
741 _VALID_URL
= r
'https?://(?:www\.)?bilibili\.com/audio/am(?P<id>\d+)'
743 'url': 'https://www.bilibili.com/audio/am10624',
746 'title': '每日新曲推荐(每日11:00更新)',
747 'description': '每天11:00更新,为你推送最新音乐',
749 'playlist_count': 19,
752 def _real_extract(self
, url
):
753 am_id
= self
._match
_id
(url
)
755 songs
= self
._call
_api
(
756 'song/of-menu', am_id
, {'sid': am_id, 'pn': 1, 'ps': 100}
)['data']
760 sid
= str_or_none(song
.get('id'))
763 entries
.append(self
.url_result(
764 'https://www.bilibili.com/audio/au' + sid
,
765 BilibiliAudioIE
.ie_key(), sid
))
768 album_data
= self
._call
_api
('menu/info', am_id
) or {}
769 album_title
= album_data
.get('title')
771 for entry
in entries
:
772 entry
['album'] = album_title
773 return self
.playlist_result(
774 entries
, am_id
, album_title
, album_data
.get('intro'))
776 return self
.playlist_result(entries
, am_id
)
779 class BiliBiliPlayerIE(InfoExtractor
):
780 _VALID_URL
= r
'https?://player\.bilibili\.com/player\.html\?.*?\baid=(?P<id>\d+)'
782 'url': 'http://player.bilibili.com/player.html?aid=92494333&cid=157926707&page=1',
783 'only_matching': True,
786 def _real_extract(self
, url
):
787 video_id
= self
._match
_id
(url
)
788 return self
.url_result(
789 'http://www.bilibili.tv/video/av%s/' % video_id
,
790 ie
=BiliBiliIE
.ie_key(), video_id
=video_id
)
793 class BiliIntlBaseIE(InfoExtractor
):
794 _API_URL
= 'https://api.bilibili.tv/intl/gateway'
795 _NETRC_MACHINE
= 'biliintl'
797 def _call_api(self
, endpoint
, *args
, **kwargs
):
798 json
= self
._download
_json
(self
._API
_URL
+ endpoint
, *args
, **kwargs
)
800 if json
['code'] in (10004004, 10004005, 10023006):
801 self
.raise_login_required()
802 elif json
['code'] == 10004001:
803 self
.raise_geo_restricted()
805 if json
.get('message') and str(json
['code']) != json
['message']:
806 errmsg
= f
'{kwargs.get("errnote", "Unable to download JSON metadata")}: {self.IE_NAME} said: {json["message"]}'
808 errmsg
= kwargs
.get('errnote', 'Unable to download JSON metadata')
809 if kwargs
.get('fatal'):
810 raise ExtractorError(errmsg
)
812 self
.report_warning(errmsg
)
813 return json
.get('data')
815 def json2srt(self
, json
):
817 f
'{i + 1}\n{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n{line["content"]}'
818 for i
, line
in enumerate(traverse_obj(json
, (
819 'body', lambda _
, l
: l
['content'] and l
['from'] and l
['to']))))
822 def _get_subtitles(self
, *, ep_id
=None, aid
=None):
823 sub_json
= self
._call
_api
(
824 '/web/v2/subtitle', ep_id
or aid
, fatal
=False,
825 note
='Downloading subtitles list', errnote
='Unable to download subtitles list',
833 for sub
in sub_json
.get('subtitles') or []:
834 sub_url
= sub
.get('url')
837 sub_data
= self
._download
_json
(
838 sub_url
, ep_id
or aid
, errnote
='Unable to download subtitles', fatal
=False,
839 note
='Downloading subtitles%s' % f
' for {sub["lang"]}' if sub
.get('lang') else '')
842 subtitles
.setdefault(sub
.get('lang_key', 'en'), []).append({
844 'data': self
.json2srt(sub_data
)
848 def _get_formats(self
, *, ep_id
=None, aid
=None):
849 video_json
= self
._call
_api
(
850 '/web/playurl', ep_id
or aid
, note
='Downloading video formats',
851 errnote
='Unable to download video formats', query
=filter_dict({
856 video_json
= video_json
['playurl']
858 for vid
in video_json
.get('video') or []:
859 video_res
= vid
.get('video_resource') or {}
860 video_info
= vid
.get('stream_info') or {}
861 if not video_res
.get('url'):
864 'url': video_res
['url'],
866 'format_note': video_info
.get('desc_words'),
867 'width': video_res
.get('width'),
868 'height': video_res
.get('height'),
869 'vbr': video_res
.get('bandwidth'),
871 'vcodec': video_res
.get('codecs'),
872 'filesize': video_res
.get('size'),
874 for aud
in video_json
.get('audio_resource') or []:
875 if not aud
.get('url'):
880 'abr': aud
.get('bandwidth'),
881 'acodec': aud
.get('codecs'),
883 'filesize': aud
.get('size'),
888 def _parse_video_metadata(self
, video_data
):
890 'title': video_data
.get('title_display') or video_data
.get('title'),
891 'thumbnail': video_data
.get('cover'),
892 'episode_number': int_or_none(self
._search
_regex
(
893 r
'^E(\d+)(?:$| - )', video_data
.get('title_display') or '', 'episode number', default
=None)),
896 def _perform_login(self
, username
, password
):
897 if not Cryptodome
.RSA
:
898 raise ExtractorError('pycryptodomex not found. Please install', expected
=True)
900 key_data
= self
._download
_json
(
901 'https://passport.bilibili.tv/x/intl/passport-login/web/key?lang=en-US', None,
902 note
='Downloading login key', errnote
='Unable to download login key')['data']
904 public_key
= Cryptodome
.RSA
.importKey(key_data
['key'])
905 password_hash
= Cryptodome
.PKCS1_v1_5
.new(public_key
).encrypt((key_data
['hash'] + password
).encode('utf-8'))
906 login_post
= self
._download
_json
(
907 'https://passport.bilibili.tv/x/intl/passport-login/web/login/password?lang=en-US', None, data
=urlencode_postdata({
908 'username': username
,
909 'password': base64
.b64encode(password_hash
).decode('ascii'),
913 }), note
='Logging in', errnote
='Unable to log in')
914 if login_post
.get('code'):
915 if login_post
.get('message'):
916 raise ExtractorError(f
'Unable to log in: {self.IE_NAME} said: {login_post["message"]}', expected
=True)
918 raise ExtractorError('Unable to log in')
921 class BiliIntlIE(BiliIntlBaseIE
):
922 _VALID_URL
= r
'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?(play/(?P<season_id>\d+)/(?P<ep_id>\d+)|video/(?P<aid>\d+))'
925 'url': 'https://www.bilibili.tv/en/play/34613/341736',
929 'title': 'E2 - The First Night',
930 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
932 'upload_date': '20201009',
933 'episode': 'Episode 2',
934 'timestamp': 1602259500,
935 'description': 'md5:297b5a17155eb645e14a14b385ab547e',
939 'title': '<Untitled Chapter 1>'
941 'start_time': 76.242,
945 'start_time': 1325.742,
946 'end_time': 1403.903,
952 'url': 'https://www.bilibili.tv/en/play/1033760/11005006',
956 'title': 'E3 - Who?',
957 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
959 'description': 'md5:e1a775e71a35c43f141484715470ad09',
960 'episode': 'Episode 3',
961 'upload_date': '20211219',
962 'timestamp': 1639928700,
966 'title': '<Untitled Chapter 1>'
972 'start_time': 1173.0,
973 'end_time': 1259.535,
978 # Subtitle with empty content
979 'url': 'https://www.bilibili.tv/en/play/1005144/10131790',
983 'title': 'E140 - Two Heartbeats: Kabuto\'s Trap',
984 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
985 'episode_number': 140,
987 'skip': 'According to the copyright owner\'s request, you may only watch the video after you log in.'
989 'url': 'https://www.bilibili.tv/en/video/2041863208',
993 'timestamp': 1670874843,
994 'description': 'Scheduled for April 2023.\nStudio: ufotable',
995 'thumbnail': r
're:https?://pic[-\.]bstarstatic.+/ugc/.+\.jpg$',
996 'upload_date': '20221212',
997 'title': 'Kimetsu no Yaiba Season 3 Official Trailer - Bstation',
1000 # episode id without intro and outro
1001 'url': 'https://www.bilibili.tv/en/play/1048837/11246489',
1005 'title': 'E1 - Operation \'Strix\' <Owl>',
1006 'description': 'md5:b4434eb1a9a97ad2bccb779514b89f17',
1007 'timestamp': 1649516400,
1008 'thumbnail': 'https://pic.bstarstatic.com/ogv/62cb1de23ada17fb70fbe7bdd6ff29c29da02a64.png',
1009 'episode': 'Episode 1',
1010 'episode_number': 1,
1011 'upload_date': '20220409',
1014 'url': 'https://www.biliintl.com/en/play/34613/341736',
1015 'only_matching': True,
1017 # User-generated content (as opposed to a series licensed from a studio)
1018 'url': 'https://bilibili.tv/en/video/2019955076',
1019 'only_matching': True,
1021 # No language in URL
1022 'url': 'https://www.bilibili.tv/video/2019955076',
1023 'only_matching': True,
1025 # Uppercase language in URL
1026 'url': 'https://www.bilibili.tv/EN/video/2019955076',
1027 'only_matching': True,
1030 def _make_url(video_id
, series_id
=None):
1032 return f
'https://www.bilibili.tv/en/play/{series_id}/{video_id}'
1033 return f
'https://www.bilibili.tv/en/video/{video_id}'
1035 def _extract_video_metadata(self
, url
, video_id
, season_id
):
1036 url
, smuggled_data
= unsmuggle_url(url
, {})
1037 if smuggled_data
.get('title'):
1038 return smuggled_data
1040 webpage
= self
._download
_webpage
(url
, video_id
)
1043 self
._search
_json
(r
'window\.__INITIAL_(?:DATA|STATE)__\s*=', webpage
, 'preload state', video_id
, default
={})
1044 or self
._search
_nuxt
_data
(webpage
, video_id
, '__initialState', fatal
=False, traverse
=None))
1045 video_data
= traverse_obj(
1046 initial_data
, ('OgvVideo', 'epDetail'), ('UgcVideo', 'videoData'), ('ugc', 'archive'), expected_type
=dict) or {}
1048 if season_id
and not video_data
:
1049 # Non-Bstation layout, read through episode list
1050 season_json
= self
._call
_api
(f
'/web/v2/ogv/play/episodes?season_id={season_id}&platform=web', video_id
)
1051 video_data
= traverse_obj(season_json
, (
1052 'sections', ..., 'episodes', lambda _
, v
: str(v
['episode_id']) == video_id
1053 ), expected_type
=dict, get_all
=False)
1055 # XXX: webpage metadata may not accurate, it just used to not crash when video_data not found
1057 self
._parse
_video
_metadata
(video_data
), self
._search
_json
_ld
(webpage
, video_id
), {
1058 'title': self
._html
_search
_meta
('og:title', webpage
),
1059 'description': self
._html
_search
_meta
('og:description', webpage
)
1062 def _real_extract(self
, url
):
1063 season_id
, ep_id
, aid
= self
._match
_valid
_url
(url
).group('season_id', 'ep_id', 'aid')
1064 video_id
= ep_id
or aid
1068 intro_ending_json
= self
._call
_api
(
1069 f
'/web/v2/ogv/play/episode?episode_id={ep_id}&platform=web',
1070 video_id
, fatal
=False) or {}
1071 if intro_ending_json
.get('skip'):
1072 # FIXME: start time and end time seems a bit off a few second even it corrext based on ogv.*.js
1073 # ref: https://p.bstarstatic.com/fe-static/bstar-web-new/assets/ogv.2b147442.js
1075 'start_time': float_or_none(traverse_obj(intro_ending_json
, ('skip', 'opening_start_time')), 1000),
1076 'end_time': float_or_none(traverse_obj(intro_ending_json
, ('skip', 'opening_end_time')), 1000),
1079 'start_time': float_or_none(traverse_obj(intro_ending_json
, ('skip', 'ending_start_time')), 1000),
1080 'end_time': float_or_none(traverse_obj(intro_ending_json
, ('skip', 'ending_end_time')), 1000),
1086 **self
._extract
_video
_metadata
(url
, video_id
, season_id
),
1087 'formats': self
._get
_formats
(ep_id
=ep_id
, aid
=aid
),
1088 'subtitles': self
.extract_subtitles(ep_id
=ep_id
, aid
=aid
),
1089 'chapters': chapters
1093 class BiliIntlSeriesIE(BiliIntlBaseIE
):
1094 IE_NAME
= 'biliIntl:series'
1095 _VALID_URL
= r
'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?(?:play|media)/(?P<id>\d+)/?(?:[?#]|$)'
1097 'url': 'https://www.bilibili.tv/en/play/34613',
1098 'playlist_mincount': 15,
1101 'title': 'TONIKAWA: Over the Moon For You',
1102 'description': 'md5:297b5a17155eb645e14a14b385ab547e',
1103 'categories': ['Slice of life', 'Comedy', 'Romance'],
1104 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
1108 'skip_download': True,
1111 'url': 'https://www.bilibili.tv/en/media/1048837',
1114 'title': 'SPY×FAMILY',
1115 'description': 'md5:b4434eb1a9a97ad2bccb779514b89f17',
1116 'categories': ['Adventure', 'Action', 'Comedy'],
1117 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.jpg$',
1120 'playlist_mincount': 25,
1122 'url': 'https://www.biliintl.com/en/play/34613',
1123 'only_matching': True,
1125 'url': 'https://www.biliintl.com/EN/play/34613',
1126 'only_matching': True,
1129 def _entries(self
, series_id
):
1130 series_json
= self
._call
_api
(f
'/web/v2/ogv/play/episodes?season_id={series_id}&platform=web', series_id
)
1131 for episode
in traverse_obj(series_json
, ('sections', ..., 'episodes', ...), expected_type
=dict):
1132 episode_id
= str(episode
['episode_id'])
1133 yield self
.url_result(smuggle_url(
1134 BiliIntlIE
._make
_url
(episode_id
, series_id
),
1135 self
._parse
_video
_metadata
(episode
)
1136 ), BiliIntlIE
, episode_id
)
1138 def _real_extract(self
, url
):
1139 series_id
= self
._match
_id
(url
)
1140 series_info
= self
._call
_api
(f
'/web/v2/ogv/play/season_info?season_id={series_id}&platform=web', series_id
).get('season') or {}
1141 return self
.playlist_result(
1142 self
._entries
(series_id
), series_id
, series_info
.get('title'), series_info
.get('description'),
1143 categories
=traverse_obj(series_info
, ('styles', ..., 'title'), expected_type
=str_or_none
),
1144 thumbnail
=url_or_none(series_info
.get('horizontal_cover')), view_count
=parse_count(series_info
.get('view')))
1147 class BiliLiveIE(InfoExtractor
):
1148 _VALID_URL
= r
'https?://live.bilibili.com/(?:blanc/)?(?P<id>\d+)'
1151 'url': 'https://live.bilibili.com/196',
1154 'description': "周六杂谈回,其他时候随机游戏。 | \n录播:@下播型泛式录播组。 | \n直播通知群(全员禁言):666906670,902092584,59971⑧481 (功能一样,别多加)",
1156 'title': "太空狼人杀联动,不被爆杀就算赢",
1157 'thumbnail': "https://i0.hdslb.com/bfs/live/new_room_cover/e607bc1529057ef4b332e1026e62cf46984c314d.jpg",
1158 'timestamp': 1650802769,
1162 'url': 'https://live.bilibili.com/196?broadcast_type=0&is_room_feed=1?spm_id_from=333.999.space_home.strengthen_live_card.click',
1163 'only_matching': True
1165 'url': 'https://live.bilibili.com/blanc/196',
1166 'only_matching': True
1170 80: {'format_id': 'low', 'format_note': '流畅'}
,
1171 150: {'format_id': 'high_res', 'format_note': '高清'}
,
1172 250: {'format_id': 'ultra_high_res', 'format_note': '超清'}
,
1173 400: {'format_id': 'blue_ray', 'format_note': '蓝光'}
,
1174 10000: {'format_id': 'source', 'format_note': '原画'}
,
1175 20000: {'format_id': '4K', 'format_note': '4K'}
,
1176 30000: {'format_id': 'dolby', 'format_note': '杜比'}
,
1179 _quality
= staticmethod(qualities(list(_FORMATS
)))
1181 def _call_api(self
, path
, room_id
, query
):
1182 api_result
= self
._download
_json
(f
'https://api.live.bilibili.com/{path}', room_id
, query
=query
)
1183 if api_result
.get('code') != 0:
1184 raise ExtractorError(api_result
.get('message') or 'Unable to download JSON metadata')
1185 return api_result
.get('data') or {}
1187 def _parse_formats(self
, qn
, fmt
):
1188 for codec
in fmt
.get('codec') or []:
1189 if codec
.get('current_qn') != qn
:
1191 for url_info
in codec
['url_info']:
1193 'url': f
'{url_info["host"]}{codec["base_url"]}{url_info["extra"]}',
1194 'ext': fmt
.get('format_name'),
1195 'vcodec': codec
.get('codec_name'),
1196 'quality': self
._quality
(qn
),
1197 **self
._FORMATS
[qn
],
1200 def _real_extract(self
, url
):
1201 room_id
= self
._match
_id
(url
)
1202 room_data
= self
._call
_api
('room/v1/Room/get_info', room_id
, {'id': room_id}
)
1203 if room_data
.get('live_status') == 0:
1204 raise ExtractorError('Streamer is not live', expected
=True)
1207 for qn
in self
._FORMATS
.keys():
1208 stream_data
= self
._call
_api
('xlive/web-room/v2/index/getRoomPlayInfo', room_id
, {
1218 for fmt
in traverse_obj(stream_data
, ('playurl_info', 'playurl', 'stream', ..., 'format', ...)) or []:
1219 formats
.extend(self
._parse
_formats
(qn
, fmt
))
1223 'title': room_data
.get('title'),
1224 'description': room_data
.get('description'),
1225 'thumbnail': room_data
.get('user_cover'),
1226 'timestamp': stream_data
.get('live_time'),