8 from .common
import InfoExtractor
, SearchInfoExtractor
9 from ..dependencies
import Cryptodome
26 srt_subtitles_timecode
,
35 class BilibiliBaseIE(InfoExtractor
):
36 def extract_formats(self
, play_info
):
38 r
['quality']: traverse_obj(r
, 'new_description', 'display_desc')
39 for r
in traverse_obj(play_info
, ('support_formats', lambda _
, v
: v
['quality']))
42 audios
= traverse_obj(play_info
, ('dash', 'audio', ...))
43 flac_audio
= traverse_obj(play_info
, ('dash', 'flac', 'audio'))
45 audios
.append(flac_audio
)
47 'url': traverse_obj(audio
, 'baseUrl', 'base_url', 'url'),
48 'ext': mimetype2ext(traverse_obj(audio
, 'mimeType', 'mime_type')),
49 'acodec': audio
.get('codecs'),
51 'tbr': float_or_none(audio
.get('bandwidth'), scale
=1000),
52 'filesize': int_or_none(audio
.get('size'))
53 } for audio
in audios
]
56 'url': traverse_obj(video
, 'baseUrl', 'base_url', 'url'),
57 'ext': mimetype2ext(traverse_obj(video
, 'mimeType', 'mime_type')),
58 'fps': float_or_none(traverse_obj(video
, 'frameRate', 'frame_rate')),
59 'width': int_or_none(video
.get('width')),
60 'height': int_or_none(video
.get('height')),
61 'vcodec': video
.get('codecs'),
62 'acodec': 'none' if audios
else None,
63 'tbr': float_or_none(video
.get('bandwidth'), scale
=1000),
64 'filesize': int_or_none(video
.get('size')),
65 'quality': int_or_none(video
.get('id')),
66 'format': format_names
.get(video
.get('id')),
67 } for video
in traverse_obj(play_info
, ('dash', 'video', ...)))
69 missing_formats
= format_names
.keys() - set(traverse_obj(formats
, (..., 'quality')))
71 self
.to_screen(f
'Format(s) {", ".join(format_names[i] for i in missing_formats)} are missing; '
72 f
'you have to login or become premium member to download them. {self._login_hint()}')
76 def json2srt(self
, json_data
):
78 for idx
, line
in enumerate(json_data
.get('body') or []):
79 srt_data
+= (f
'{idx + 1}\n'
80 f
'{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n'
81 f
'{line["content"]}\n\n')
84 def _get_subtitles(self
, video_id
, aid
, cid
):
88 'url': f
'https://comment.bilibili.com/{cid}.xml',
92 video_info_json
= self
._download
_json
(f
'https://api.bilibili.com/x/player/v2?aid={aid}&cid={cid}', video_id
)
93 for s
in traverse_obj(video_info_json
, ('data', 'subtitle', 'subtitles', ...)):
94 subtitles
.setdefault(s
['lan'], []).append({
96 'data': self
.json2srt(self
._download
_json
(s
['subtitle_url'], video_id
))
100 def _get_chapters(self
, aid
, cid
):
101 chapters
= aid
and cid
and self
._download
_json
(
102 'https://api.bilibili.com/x/player/v2', aid
, query
={'aid': aid, 'cid': cid}
,
103 note
='Extracting chapters', fatal
=False)
104 return traverse_obj(chapters
, ('data', 'view_points', ..., {
106 'start_time': 'from',
110 def _get_comments(self
, aid
):
111 for idx
in itertools
.count(1):
112 replies
= traverse_obj(
114 f
'https://api.bilibili.com/x/v2/reply?pn={idx}&oid={aid}&type=1&jsonp=jsonp&sort=2&_=1567227301685',
115 aid
, note
=f
'Extracting comments from page {idx}', fatal
=False),
119 for children
in map(self
._get
_all
_children
, replies
):
122 def _get_all_children(self
, reply
):
124 'author': traverse_obj(reply
, ('member', 'uname')),
125 'author_id': traverse_obj(reply
, ('member', 'mid')),
126 'id': reply
.get('rpid'),
127 'text': traverse_obj(reply
, ('content', 'message')),
128 'timestamp': reply
.get('ctime'),
129 'parent': reply
.get('parent') or 'root',
131 for children
in map(self
._get
_all
_children
, traverse_obj(reply
, ('replies', ...))):
135 class BiliBiliIE(BilibiliBaseIE
):
136 _VALID_URL
= r
'https?://www\.bilibili\.com/video/[aAbB][vV](?P<id>[^/?#&]+)'
139 'url': 'https://www.bilibili.com/video/BV13x41117TL',
141 'id': 'BV13x41117TL',
142 'title': '阿滴英文|英文歌分享#6 "Closer',
144 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
145 'uploader_id': '65880958',
147 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
150 'comment_count': int,
151 'upload_date': '20170301',
152 'timestamp': 1488353834,
158 'url': 'http://www.bilibili.com/video/av1074402/',
160 'thumbnail': r
're:^https?://.*\.(jpg|jpeg)$',
163 'uploader_id': '156160',
164 'id': 'BV11x411K7CN',
167 'upload_date': '20140420',
168 'timestamp': 1397983878,
169 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
171 'comment_count': int,
175 'params': {'skip_download': True}
,
178 'url': 'https://www.bilibili.com/video/BV1bK411W797',
180 'id': 'BV1bK411W797',
181 'title': '物语中的人物是如何吐槽自己的OP的'
183 'playlist_count': 18,
186 'id': 'BV1bK411W797_p1',
188 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川',
190 'timestamp': 1589601697,
191 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
192 'uploader': '打牌还是打桩',
193 'uploader_id': '150259984',
195 'comment_count': int,
196 'upload_date': '20200516',
198 'description': 'md5:e3c401cf7bc363118d1783dd74068a68',
203 'note': 'Specific page of Anthology',
204 'url': 'https://www.bilibili.com/video/BV1bK411W797?p=1',
206 'id': 'BV1bK411W797_p1',
208 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川',
210 'timestamp': 1589601697,
211 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
212 'uploader': '打牌还是打桩',
213 'uploader_id': '150259984',
215 'comment_count': int,
216 'upload_date': '20200516',
218 'description': 'md5:e3c401cf7bc363118d1783dd74068a68',
222 'note': 'video has subtitles',
223 'url': 'https://www.bilibili.com/video/BV12N4y1M7rh',
225 'id': 'BV12N4y1M7rh',
227 'title': 'md5:96e8bb42c2b432c0d4ce3434a61479c1',
229 'description': 'md5:afde2b7ba9025c01d9e3dde10de221e4',
231 'upload_date': '20220709',
232 'uploader': '小夫Tech',
233 'timestamp': 1657347907,
234 'uploader_id': '1326814124',
235 'comment_count': int,
238 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
239 'subtitles': 'count:2'
241 'params': {'listsubtitles': True}
,
243 'url': 'https://www.bilibili.com/video/av8903802/',
245 'id': 'BV13x41117TL',
247 'title': '阿滴英文|英文歌分享#6 "Closer',
248 'upload_date': '20170301',
249 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
250 'timestamp': 1488353834,
251 'uploader_id': '65880958',
253 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
256 'comment_count': int,
261 'skip_download': True,
264 'note': 'video has chapter',
265 'url': 'https://www.bilibili.com/video/BV1vL411G7N7/',
267 'id': 'BV1vL411G7N7',
269 'title': '如何为你的B站视频添加进度条分段',
270 'timestamp': 1634554558,
271 'upload_date': '20211018',
272 'description': 'md5:a9a3d6702b3a94518d419b2e9c320a6d',
274 'uploader': '爱喝咖啡的当麻',
276 'uploader_id': '1680903',
277 'chapters': 'count:6',
278 'comment_count': int,
281 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
283 'params': {'skip_download': True}
,
286 def _real_extract(self
, url
):
287 video_id
= self
._match
_id
(url
)
288 webpage
= self
._download
_webpage
(url
, video_id
)
289 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial state', video_id
)
290 play_info
= self
._search
_json
(r
'window\.__playinfo__\s*=', webpage
, 'play info', video_id
)['data']
292 video_data
= initial_state
['videoData']
293 video_id
, title
= video_data
['bvid'], video_data
.get('title')
295 # Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself.
296 page_list_json
= traverse_obj(
298 'https://api.bilibili.com/x/player/pagelist', video_id
,
299 fatal
=False, query
={'bvid': video_id, 'jsonp': 'jsonp'}
,
300 note
='Extracting videos in anthology'),
301 'data', expected_type
=list) or []
302 is_anthology
= len(page_list_json
) > 1
304 part_id
= int_or_none(parse_qs(url
).get('p', [None])[-1])
305 if is_anthology
and not part_id
and self
._yes
_playlist
(video_id
, video_id
):
306 return self
.playlist_from_matches(
307 page_list_json
, video_id
, title
, ie
=BiliBiliIE
,
308 getter
=lambda entry
: f
'https://www.bilibili.com/video/{video_id}?p={entry["page"]}')
311 part_id
= part_id
or 1
312 title
+= f
' p{part_id:02d} {traverse_obj(page_list_json, (part_id - 1, "part")) or ""}'
314 aid
= video_data
.get('aid')
315 old_video_id
= format_field(aid
, None, f
'%s_part{part_id or 1}')
317 cid
= traverse_obj(video_data
, ('pages', part_id
- 1, 'cid')) if part_id
else video_data
.get('cid')
320 'id': f
'{video_id}{format_field(part_id, None, "_p%d")}',
321 'formats': self
.extract_formats(play_info
),
322 '_old_archive_ids': [make_archive_id(self
, old_video_id
)] if old_video_id
else None,
324 'description': traverse_obj(initial_state
, ('videoData', 'desc')),
325 'view_count': traverse_obj(initial_state
, ('videoData', 'stat', 'view')),
326 'uploader': traverse_obj(initial_state
, ('upData', 'name')),
327 'uploader_id': traverse_obj(initial_state
, ('upData', 'mid')),
328 'like_count': traverse_obj(initial_state
, ('videoData', 'stat', 'like')),
329 'comment_count': traverse_obj(initial_state
, ('videoData', 'stat', 'reply')),
330 'tags': traverse_obj(initial_state
, ('tags', ..., 'tag_name')),
331 'thumbnail': traverse_obj(initial_state
, ('videoData', 'pic')),
332 'timestamp': traverse_obj(initial_state
, ('videoData', 'pubdate')),
333 'duration': float_or_none(play_info
.get('timelength'), scale
=1000),
334 'chapters': self
._get
_chapters
(aid
, cid
),
335 'subtitles': self
.extract_subtitles(video_id
, aid
, cid
),
336 '__post_extractor': self
.extract_comments(aid
),
337 'http_headers': {'Referer': url}
,
341 class BiliBiliBangumiIE(BilibiliBaseIE
):
342 _VALID_URL
= r
'(?x)https?://www\.bilibili\.com/bangumi/play/(?P<id>(?:ss|ep)\d+)'
345 'url': 'https://www.bilibili.com/bangumi/play/ss897',
355 'title': '神的记事本:第2话 你与旅行包',
356 'duration': 1428.487,
357 'timestamp': 1310809380,
358 'upload_date': '20110716',
359 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
362 'url': 'https://www.bilibili.com/bangumi/play/ep508406',
363 'only_matching': True,
366 def _real_extract(self
, url
):
367 video_id
= self
._match
_id
(url
)
368 webpage
= self
._download
_webpage
(url
, video_id
)
370 if '您所在的地区无法观看本片' in webpage
:
371 raise GeoRestrictedError('This video is restricted')
372 elif ('开通大会员观看' in webpage
and '__playinfo__' not in webpage
373 or '正在观看预览,大会员免费看全片' in webpage
):
374 self
.raise_login_required('This video is for premium members only')
376 play_info
= self
._search
_json
(r
'window\.__playinfo__\s*=', webpage
, 'play info', video_id
)['data']
377 formats
= self
.extract_formats(play_info
)
378 if (not formats
and '成为大会员抢先看' in webpage
379 and play_info
.get('durl') and not play_info
.get('dash')):
380 self
.raise_login_required('This video is for premium members only')
382 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial state', video_id
)
384 season_id
= traverse_obj(initial_state
, ('mediaInfo', 'season_id'))
385 season_number
= season_id
and next((
386 idx
+ 1 for idx
, e
in enumerate(
387 traverse_obj(initial_state
, ('mediaInfo', 'seasons', ...)))
388 if e
.get('season_id') == season_id
394 'title': traverse_obj(initial_state
, 'h1Title'),
395 'episode': traverse_obj(initial_state
, ('epInfo', 'long_title')),
396 'episode_number': int_or_none(traverse_obj(initial_state
, ('epInfo', 'title'))),
397 'series': traverse_obj(initial_state
, ('mediaInfo', 'series')),
398 'season': traverse_obj(initial_state
, ('mediaInfo', 'season_title')),
399 'season_id': season_id
,
400 'season_number': season_number
,
401 'thumbnail': traverse_obj(initial_state
, ('epInfo', 'cover')),
402 'timestamp': traverse_obj(initial_state
, ('epInfo', 'pub_time')),
403 'duration': float_or_none(play_info
.get('timelength'), scale
=1000),
404 'subtitles': self
.extract_subtitles(
405 video_id
, initial_state
, traverse_obj(initial_state
, ('epInfo', 'cid'))),
406 '__post_extractor': self
.extract_comments(traverse_obj(initial_state
, ('epInfo', 'aid'))),
407 'http_headers': {'Referer': url, **self.geo_verification_headers()}
,
411 class BiliBiliBangumiMediaIE(InfoExtractor
):
412 _VALID_URL
= r
'https?://www\.bilibili\.com/bangumi/media/md(?P<id>\d+)'
414 'url': 'https://www.bilibili.com/bangumi/media/md24097891',
418 'playlist_mincount': 25,
421 def _real_extract(self
, url
):
422 media_id
= self
._match
_id
(url
)
423 webpage
= self
._download
_webpage
(url
, media_id
)
425 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial_state', media_id
)
426 episode_list
= self
._download
_json
(
427 'https://api.bilibili.com/pgc/web/season/section', media_id
,
428 query
={'season_id': initial_state['mediaInfo']['season_id']}
,
429 note
='Downloading season info')['result']['main_section']['episodes']
431 return self
.playlist_result((
432 self
.url_result(entry
['share_url'], BiliBiliBangumiIE
, entry
['aid'])
433 for entry
in episode_list
), media_id
)
436 class BilibiliSpaceBaseIE(InfoExtractor
):
437 def _extract_playlist(self
, fetch_page
, get_metadata
, get_entries
):
438 first_page
= fetch_page(0)
439 metadata
= get_metadata(first_page
)
441 paged_list
= InAdvancePagedList(
442 lambda idx
: get_entries(fetch_page(idx
) if idx
else first_page
),
443 metadata
['page_count'], metadata
['page_size'])
445 return metadata
, paged_list
448 class BilibiliSpaceVideoIE(BilibiliSpaceBaseIE
):
449 _VALID_URL
= r
'https?://space\.bilibili\.com/(?P<id>\d+)(?P<video>/video)?/?(?:[?#]|$)'
451 'url': 'https://space.bilibili.com/3985676/video',
455 'playlist_mincount': 178,
458 def _real_extract(self
, url
):
459 playlist_id
, is_video_url
= self
._match
_valid
_url
(url
).group('id', 'video')
461 self
.to_screen('A channel URL was given. Only the channel\'s videos will be downloaded. '
462 'To download audios, add a "/audio" to the URL')
464 def fetch_page(page_idx
):
466 response
= self
._download
_json
('https://api.bilibili.com/x/space/arc/search',
467 playlist_id
, note
=f
'Downloading page {page_idx}',
468 query
={'mid': playlist_id, 'pn': page_idx + 1, 'jsonp': 'jsonp'}
)
469 except ExtractorError
as e
:
470 if isinstance(e
.cause
, urllib
.error
.HTTPError
) and e
.cause
.code
== 412:
471 raise ExtractorError(
472 'Request is blocked by server (412), please add cookies, wait and try later.', expected
=True)
474 if response
['code'] == -401:
475 raise ExtractorError(
476 'Request is blocked by server (401), please add cookies, wait and try later.', expected
=True)
477 return response
['data']
479 def get_metadata(page_data
):
480 page_size
= page_data
['page']['ps']
481 entry_count
= page_data
['page']['count']
483 'page_count': math
.ceil(entry_count
/ page_size
),
484 'page_size': page_size
,
487 def get_entries(page_data
):
488 for entry
in traverse_obj(page_data
, ('list', 'vlist')) or []:
489 yield self
.url_result(f
'https://www.bilibili.com/video/{entry["bvid"]}', BiliBiliIE
, entry
['bvid'])
491 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
492 return self
.playlist_result(paged_list
, playlist_id
)
495 class BilibiliSpaceAudioIE(BilibiliSpaceBaseIE
):
496 _VALID_URL
= r
'https?://space\.bilibili\.com/(?P<id>\d+)/audio'
498 'url': 'https://space.bilibili.com/3985676/audio',
502 'playlist_mincount': 1,
505 def _real_extract(self
, url
):
506 playlist_id
= self
._match
_id
(url
)
508 def fetch_page(page_idx
):
509 return self
._download
_json
(
510 'https://api.bilibili.com/audio/music-service/web/song/upper', playlist_id
,
511 note
=f
'Downloading page {page_idx}',
512 query
={'uid': playlist_id, 'pn': page_idx + 1, 'ps': 30, 'order': 1, 'jsonp': 'jsonp'}
)['data']
514 def get_metadata(page_data
):
516 'page_count': page_data
['pageCount'],
517 'page_size': page_data
['pageSize'],
520 def get_entries(page_data
):
521 for entry
in page_data
.get('data', []):
522 yield self
.url_result(f
'https://www.bilibili.com/audio/au{entry["id"]}', BilibiliAudioIE
, entry
['id'])
524 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
525 return self
.playlist_result(paged_list
, playlist_id
)
528 class BilibiliSpacePlaylistIE(BilibiliSpaceBaseIE
):
529 _VALID_URL
= r
'https?://space.bilibili\.com/(?P<mid>\d+)/channel/collectiondetail\?sid=(?P<sid>\d+)'
531 'url': 'https://space.bilibili.com/2142762/channel/collectiondetail?sid=57445',
533 'id': '2142762_57445',
536 'playlist_mincount': 31,
539 def _real_extract(self
, url
):
540 mid
, sid
= self
._match
_valid
_url
(url
).group('mid', 'sid')
541 playlist_id
= f
'{mid}_{sid}'
543 def fetch_page(page_idx
):
544 return self
._download
_json
(
545 'https://api.bilibili.com/x/polymer/space/seasons_archives_list',
546 playlist_id
, note
=f
'Downloading page {page_idx}',
547 query
={'mid': mid, 'season_id': sid, 'page_num': page_idx + 1, 'page_size': 30}
)['data']
549 def get_metadata(page_data
):
550 page_size
= page_data
['page']['page_size']
551 entry_count
= page_data
['page']['total']
553 'page_count': math
.ceil(entry_count
/ page_size
),
554 'page_size': page_size
,
555 'title': traverse_obj(page_data
, ('meta', 'name'))
558 def get_entries(page_data
):
559 for entry
in page_data
.get('archives', []):
560 yield self
.url_result(f
'https://www.bilibili.com/video/{entry["bvid"]}',
561 BiliBiliIE
, entry
['bvid'])
563 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
564 return self
.playlist_result(paged_list
, playlist_id
, metadata
['title'])
567 class BilibiliCategoryIE(InfoExtractor
):
568 IE_NAME
= 'Bilibili category extractor'
569 _MAX_RESULTS
= 1000000
570 _VALID_URL
= r
'https?://www\.bilibili\.com/v/[a-zA-Z]+\/[a-zA-Z]+'
572 'url': 'https://www.bilibili.com/v/kichiku/mad',
574 'id': 'kichiku: mad',
575 'title': 'kichiku: mad'
577 'playlist_mincount': 45,
583 def _fetch_page(self
, api_url
, num_pages
, query
, page_num
):
584 parsed_json
= self
._download
_json
(
585 api_url
, query
, query
={'Search_key': query, 'pn': page_num}
,
586 note
='Extracting results from page %s of %s' % (page_num
, num_pages
))
588 video_list
= traverse_obj(parsed_json
, ('data', 'archives'), expected_type
=list)
590 raise ExtractorError('Failed to retrieve video list for page %d' % page_num
)
592 for video
in video_list
:
593 yield self
.url_result(
594 'https://www.bilibili.com/video/%s' % video
['bvid'], 'BiliBili', video
['bvid'])
596 def _entries(self
, category
, subcategory
, query
):
597 # map of categories : subcategories : RIDs
601 'manual_vocaloid': 126,
608 if category
not in rid_map
:
609 raise ExtractorError(
610 f
'The category {category} isn\'t supported. Supported categories: {list(rid_map.keys())}')
611 if subcategory
not in rid_map
[category
]:
612 raise ExtractorError(
613 f
'The subcategory {subcategory} isn\'t supported for this category. Supported subcategories: {list(rid_map[category].keys())}')
614 rid_value
= rid_map
[category
][subcategory
]
616 api_url
= 'https://api.bilibili.com/x/web-interface/newlist?rid=%d&type=1&ps=20&jsonp=jsonp' % rid_value
617 page_json
= self
._download
_json
(api_url
, query
, query
={'Search_key': query, 'pn': '1'}
)
618 page_data
= traverse_obj(page_json
, ('data', 'page'), expected_type
=dict)
619 count
, size
= int_or_none(page_data
.get('count')), int_or_none(page_data
.get('size'))
620 if count
is None or not size
:
621 raise ExtractorError('Failed to calculate either page count or size')
623 num_pages
= math
.ceil(count
/ size
)
625 return OnDemandPagedList(functools
.partial(
626 self
._fetch
_page
, api_url
, num_pages
, query
), size
)
628 def _real_extract(self
, url
):
629 category
, subcategory
= urllib
.parse
.urlparse(url
).path
.split('/')[2:4]
630 query
= '%s: %s' % (category
, subcategory
)
632 return self
.playlist_result(self
._entries
(category
, subcategory
, query
), query
, query
)
635 class BiliBiliSearchIE(SearchInfoExtractor
):
636 IE_DESC
= 'Bilibili video search'
637 _MAX_RESULTS
= 100000
638 _SEARCH_KEY
= 'bilisearch'
640 def _search_results(self
, query
):
641 for page_num
in itertools
.count(1):
642 videos
= self
._download
_json
(
643 'https://api.bilibili.com/x/web-interface/search/type', query
,
644 note
=f
'Extracting results from page {page_num}', query
={
651 '__refresh__': 'true',
652 'search_type': 'video',
655 })['data'].get('result')
659 yield self
.url_result(video
['arcurl'], 'BiliBili', str(video
['aid']))
662 class BilibiliAudioBaseIE(InfoExtractor
):
663 def _call_api(self
, path
, sid
, query
=None):
666 return self
._download
_json
(
667 'https://www.bilibili.com/audio/music-service-c/web/' + path
,
668 sid
, query
=query
)['data']
671 class BilibiliAudioIE(BilibiliAudioBaseIE
):
672 _VALID_URL
= r
'https?://(?:www\.)?bilibili\.com/audio/au(?P<id>\d+)'
674 'url': 'https://www.bilibili.com/audio/au1003142',
675 'md5': 'fec4987014ec94ef9e666d4d158ad03b',
679 'title': '【tsukimi】YELLOW / 神山羊',
681 'comment_count': int,
682 'description': 'YELLOW的mp3版!',
689 'thumbnail': r
're:^https?://.+\.jpg',
690 'timestamp': 1564836614,
691 'upload_date': '20190803',
692 'uploader': 'tsukimi-つきみぐー',
697 def _real_extract(self
, url
):
698 au_id
= self
._match
_id
(url
)
700 play_data
= self
._call
_api
('url', au_id
)
702 'url': play_data
['cdns'][0],
703 'filesize': int_or_none(play_data
.get('size')),
707 for a_format
in formats
:
708 a_format
.setdefault('http_headers', {}).update({
712 song
= self
._call
_api
('song/info', au_id
)
713 title
= song
['title']
714 statistic
= song
.get('statistic') or {}
717 lyric
= song
.get('lyric')
729 'artist': song
.get('author'),
730 'comment_count': int_or_none(statistic
.get('comment')),
731 'description': song
.get('intro'),
732 'duration': int_or_none(song
.get('duration')),
733 'subtitles': subtitles
,
734 'thumbnail': song
.get('cover'),
735 'timestamp': int_or_none(song
.get('passtime')),
736 'uploader': song
.get('uname'),
737 'view_count': int_or_none(statistic
.get('play')),
741 class BilibiliAudioAlbumIE(BilibiliAudioBaseIE
):
742 _VALID_URL
= r
'https?://(?:www\.)?bilibili\.com/audio/am(?P<id>\d+)'
744 'url': 'https://www.bilibili.com/audio/am10624',
747 'title': '每日新曲推荐(每日11:00更新)',
748 'description': '每天11:00更新,为你推送最新音乐',
750 'playlist_count': 19,
753 def _real_extract(self
, url
):
754 am_id
= self
._match
_id
(url
)
756 songs
= self
._call
_api
(
757 'song/of-menu', am_id
, {'sid': am_id, 'pn': 1, 'ps': 100}
)['data']
761 sid
= str_or_none(song
.get('id'))
764 entries
.append(self
.url_result(
765 'https://www.bilibili.com/audio/au' + sid
,
766 BilibiliAudioIE
.ie_key(), sid
))
769 album_data
= self
._call
_api
('menu/info', am_id
) or {}
770 album_title
= album_data
.get('title')
772 for entry
in entries
:
773 entry
['album'] = album_title
774 return self
.playlist_result(
775 entries
, am_id
, album_title
, album_data
.get('intro'))
777 return self
.playlist_result(entries
, am_id
)
780 class BiliBiliPlayerIE(InfoExtractor
):
781 _VALID_URL
= r
'https?://player\.bilibili\.com/player\.html\?.*?\baid=(?P<id>\d+)'
783 'url': 'http://player.bilibili.com/player.html?aid=92494333&cid=157926707&page=1',
784 'only_matching': True,
787 def _real_extract(self
, url
):
788 video_id
= self
._match
_id
(url
)
789 return self
.url_result(
790 'http://www.bilibili.tv/video/av%s/' % video_id
,
791 ie
=BiliBiliIE
.ie_key(), video_id
=video_id
)
794 class BiliIntlBaseIE(InfoExtractor
):
795 _API_URL
= 'https://api.bilibili.tv/intl/gateway'
796 _NETRC_MACHINE
= 'biliintl'
798 def _call_api(self
, endpoint
, *args
, **kwargs
):
799 json
= self
._download
_json
(self
._API
_URL
+ endpoint
, *args
, **kwargs
)
801 if json
['code'] in (10004004, 10004005, 10023006):
802 self
.raise_login_required()
803 elif json
['code'] == 10004001:
804 self
.raise_geo_restricted()
806 if json
.get('message') and str(json
['code']) != json
['message']:
807 errmsg
= f
'{kwargs.get("errnote", "Unable to download JSON metadata")}: {self.IE_NAME} said: {json["message"]}'
809 errmsg
= kwargs
.get('errnote', 'Unable to download JSON metadata')
810 if kwargs
.get('fatal'):
811 raise ExtractorError(errmsg
)
813 self
.report_warning(errmsg
)
814 return json
.get('data')
816 def json2srt(self
, json
):
818 f
'{i + 1}\n{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n{line["content"]}'
819 for i
, line
in enumerate(traverse_obj(json
, (
820 'body', lambda _
, l
: l
['content'] and l
['from'] and l
['to']))))
823 def _get_subtitles(self
, *, ep_id
=None, aid
=None):
824 sub_json
= self
._call
_api
(
825 '/web/v2/subtitle', ep_id
or aid
, fatal
=False,
826 note
='Downloading subtitles list', errnote
='Unable to download subtitles list',
834 for sub
in sub_json
.get('subtitles') or []:
835 sub_url
= sub
.get('url')
838 sub_data
= self
._download
_json
(
839 sub_url
, ep_id
or aid
, errnote
='Unable to download subtitles', fatal
=False,
840 note
='Downloading subtitles%s' % f
' for {sub["lang"]}' if sub
.get('lang') else '')
843 subtitles
.setdefault(sub
.get('lang_key', 'en'), []).append({
845 'data': self
.json2srt(sub_data
)
849 def _get_formats(self
, *, ep_id
=None, aid
=None):
850 video_json
= self
._call
_api
(
851 '/web/playurl', ep_id
or aid
, note
='Downloading video formats',
852 errnote
='Unable to download video formats', query
=filter_dict({
857 video_json
= video_json
['playurl']
859 for vid
in video_json
.get('video') or []:
860 video_res
= vid
.get('video_resource') or {}
861 video_info
= vid
.get('stream_info') or {}
862 if not video_res
.get('url'):
865 'url': video_res
['url'],
867 'format_note': video_info
.get('desc_words'),
868 'width': video_res
.get('width'),
869 'height': video_res
.get('height'),
870 'vbr': video_res
.get('bandwidth'),
872 'vcodec': video_res
.get('codecs'),
873 'filesize': video_res
.get('size'),
875 for aud
in video_json
.get('audio_resource') or []:
876 if not aud
.get('url'):
881 'abr': aud
.get('bandwidth'),
882 'acodec': aud
.get('codecs'),
884 'filesize': aud
.get('size'),
889 def _parse_video_metadata(self
, video_data
):
891 'title': video_data
.get('title_display') or video_data
.get('title'),
892 'thumbnail': video_data
.get('cover'),
893 'episode_number': int_or_none(self
._search
_regex
(
894 r
'^E(\d+)(?:$| - )', video_data
.get('title_display') or '', 'episode number', default
=None)),
897 def _perform_login(self
, username
, password
):
898 if not Cryptodome
.RSA
:
899 raise ExtractorError('pycryptodomex not found. Please install', expected
=True)
901 key_data
= self
._download
_json
(
902 'https://passport.bilibili.tv/x/intl/passport-login/web/key?lang=en-US', None,
903 note
='Downloading login key', errnote
='Unable to download login key')['data']
905 public_key
= Cryptodome
.RSA
.importKey(key_data
['key'])
906 password_hash
= Cryptodome
.PKCS1_v1_5
.new(public_key
).encrypt((key_data
['hash'] + password
).encode('utf-8'))
907 login_post
= self
._download
_json
(
908 'https://passport.bilibili.tv/x/intl/passport-login/web/login/password?lang=en-US', None, data
=urlencode_postdata({
909 'username': username
,
910 'password': base64
.b64encode(password_hash
).decode('ascii'),
914 }), note
='Logging in', errnote
='Unable to log in')
915 if login_post
.get('code'):
916 if login_post
.get('message'):
917 raise ExtractorError(f
'Unable to log in: {self.IE_NAME} said: {login_post["message"]}', expected
=True)
919 raise ExtractorError('Unable to log in')
922 class BiliIntlIE(BiliIntlBaseIE
):
923 _VALID_URL
= r
'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?(play/(?P<season_id>\d+)/(?P<ep_id>\d+)|video/(?P<aid>\d+))'
926 'url': 'https://www.bilibili.tv/en/play/34613/341736',
930 'title': 'E2 - The First Night',
931 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
933 'upload_date': '20201009',
934 'episode': 'Episode 2',
935 'timestamp': 1602259500,
936 'description': 'md5:297b5a17155eb645e14a14b385ab547e',
940 'title': '<Untitled Chapter 1>'
942 'start_time': 76.242,
946 'start_time': 1325.742,
947 'end_time': 1403.903,
953 'url': 'https://www.bilibili.tv/en/play/1033760/11005006',
957 'title': 'E3 - Who?',
958 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
960 'description': 'md5:e1a775e71a35c43f141484715470ad09',
961 'episode': 'Episode 3',
962 'upload_date': '20211219',
963 'timestamp': 1639928700,
967 'title': '<Untitled Chapter 1>'
973 'start_time': 1173.0,
974 'end_time': 1259.535,
979 # Subtitle with empty content
980 'url': 'https://www.bilibili.tv/en/play/1005144/10131790',
984 'title': 'E140 - Two Heartbeats: Kabuto\'s Trap',
985 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
986 'episode_number': 140,
988 'skip': 'According to the copyright owner\'s request, you may only watch the video after you log in.'
990 'url': 'https://www.bilibili.tv/en/video/2041863208',
994 'timestamp': 1670874843,
995 'description': 'Scheduled for April 2023.\nStudio: ufotable',
996 'thumbnail': r
're:https?://pic[-\.]bstarstatic.+/ugc/.+\.jpg$',
997 'upload_date': '20221212',
998 'title': 'Kimetsu no Yaiba Season 3 Official Trailer - Bstation',
1001 # episode id without intro and outro
1002 'url': 'https://www.bilibili.tv/en/play/1048837/11246489',
1006 'title': 'E1 - Operation \'Strix\' <Owl>',
1007 'description': 'md5:b4434eb1a9a97ad2bccb779514b89f17',
1008 'timestamp': 1649516400,
1009 'thumbnail': 'https://pic.bstarstatic.com/ogv/62cb1de23ada17fb70fbe7bdd6ff29c29da02a64.png',
1010 'episode': 'Episode 1',
1011 'episode_number': 1,
1012 'upload_date': '20220409',
1015 'url': 'https://www.biliintl.com/en/play/34613/341736',
1016 'only_matching': True,
1018 # User-generated content (as opposed to a series licensed from a studio)
1019 'url': 'https://bilibili.tv/en/video/2019955076',
1020 'only_matching': True,
1022 # No language in URL
1023 'url': 'https://www.bilibili.tv/video/2019955076',
1024 'only_matching': True,
1026 # Uppercase language in URL
1027 'url': 'https://www.bilibili.tv/EN/video/2019955076',
1028 'only_matching': True,
1031 def _make_url(video_id
, series_id
=None):
1033 return f
'https://www.bilibili.tv/en/play/{series_id}/{video_id}'
1034 return f
'https://www.bilibili.tv/en/video/{video_id}'
1036 def _extract_video_metadata(self
, url
, video_id
, season_id
):
1037 url
, smuggled_data
= unsmuggle_url(url
, {})
1038 if smuggled_data
.get('title'):
1039 return smuggled_data
1041 webpage
= self
._download
_webpage
(url
, video_id
)
1044 self
._search
_json
(r
'window\.__INITIAL_(?:DATA|STATE)__\s*=', webpage
, 'preload state', video_id
, default
={})
1045 or self
._search
_nuxt
_data
(webpage
, video_id
, '__initialState', fatal
=False, traverse
=None))
1046 video_data
= traverse_obj(
1047 initial_data
, ('OgvVideo', 'epDetail'), ('UgcVideo', 'videoData'), ('ugc', 'archive'), expected_type
=dict) or {}
1049 if season_id
and not video_data
:
1050 # Non-Bstation layout, read through episode list
1051 season_json
= self
._call
_api
(f
'/web/v2/ogv/play/episodes?season_id={season_id}&platform=web', video_id
)
1052 video_data
= traverse_obj(season_json
, (
1053 'sections', ..., 'episodes', lambda _
, v
: str(v
['episode_id']) == video_id
1054 ), expected_type
=dict, get_all
=False)
1056 # XXX: webpage metadata may not accurate, it just used to not crash when video_data not found
1058 self
._parse
_video
_metadata
(video_data
), self
._search
_json
_ld
(webpage
, video_id
), {
1059 'title': self
._html
_search
_meta
('og:title', webpage
),
1060 'description': self
._html
_search
_meta
('og:description', webpage
)
1063 def _real_extract(self
, url
):
1064 season_id
, ep_id
, aid
= self
._match
_valid
_url
(url
).group('season_id', 'ep_id', 'aid')
1065 video_id
= ep_id
or aid
1069 intro_ending_json
= self
._call
_api
(
1070 f
'/web/v2/ogv/play/episode?episode_id={ep_id}&platform=web',
1071 video_id
, fatal
=False) or {}
1072 if intro_ending_json
.get('skip'):
1073 # FIXME: start time and end time seems a bit off a few second even it corrext based on ogv.*.js
1074 # ref: https://p.bstarstatic.com/fe-static/bstar-web-new/assets/ogv.2b147442.js
1076 'start_time': float_or_none(traverse_obj(intro_ending_json
, ('skip', 'opening_start_time')), 1000),
1077 'end_time': float_or_none(traverse_obj(intro_ending_json
, ('skip', 'opening_end_time')), 1000),
1080 'start_time': float_or_none(traverse_obj(intro_ending_json
, ('skip', 'ending_start_time')), 1000),
1081 'end_time': float_or_none(traverse_obj(intro_ending_json
, ('skip', 'ending_end_time')), 1000),
1087 **self
._extract
_video
_metadata
(url
, video_id
, season_id
),
1088 'formats': self
._get
_formats
(ep_id
=ep_id
, aid
=aid
),
1089 'subtitles': self
.extract_subtitles(ep_id
=ep_id
, aid
=aid
),
1090 'chapters': chapters
1094 class BiliIntlSeriesIE(BiliIntlBaseIE
):
1095 IE_NAME
= 'biliIntl:series'
1096 _VALID_URL
= r
'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?(?:play|media)/(?P<id>\d+)/?(?:[?#]|$)'
1098 'url': 'https://www.bilibili.tv/en/play/34613',
1099 'playlist_mincount': 15,
1102 'title': 'TONIKAWA: Over the Moon For You',
1103 'description': 'md5:297b5a17155eb645e14a14b385ab547e',
1104 'categories': ['Slice of life', 'Comedy', 'Romance'],
1105 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
1109 'skip_download': True,
1112 'url': 'https://www.bilibili.tv/en/media/1048837',
1115 'title': 'SPY×FAMILY',
1116 'description': 'md5:b4434eb1a9a97ad2bccb779514b89f17',
1117 'categories': ['Adventure', 'Action', 'Comedy'],
1118 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.jpg$',
1121 'playlist_mincount': 25,
1123 'url': 'https://www.biliintl.com/en/play/34613',
1124 'only_matching': True,
1126 'url': 'https://www.biliintl.com/EN/play/34613',
1127 'only_matching': True,
1130 def _entries(self
, series_id
):
1131 series_json
= self
._call
_api
(f
'/web/v2/ogv/play/episodes?season_id={series_id}&platform=web', series_id
)
1132 for episode
in traverse_obj(series_json
, ('sections', ..., 'episodes', ...), expected_type
=dict):
1133 episode_id
= str(episode
['episode_id'])
1134 yield self
.url_result(smuggle_url(
1135 BiliIntlIE
._make
_url
(episode_id
, series_id
),
1136 self
._parse
_video
_metadata
(episode
)
1137 ), BiliIntlIE
, episode_id
)
1139 def _real_extract(self
, url
):
1140 series_id
= self
._match
_id
(url
)
1141 series_info
= self
._call
_api
(f
'/web/v2/ogv/play/season_info?season_id={series_id}&platform=web', series_id
).get('season') or {}
1142 return self
.playlist_result(
1143 self
._entries
(series_id
), series_id
, series_info
.get('title'), series_info
.get('description'),
1144 categories
=traverse_obj(series_info
, ('styles', ..., 'title'), expected_type
=str_or_none
),
1145 thumbnail
=url_or_none(series_info
.get('horizontal_cover')), view_count
=parse_count(series_info
.get('view')))
1148 class BiliLiveIE(InfoExtractor
):
1149 _VALID_URL
= r
'https?://live.bilibili.com/(?:blanc/)?(?P<id>\d+)'
1152 'url': 'https://live.bilibili.com/196',
1155 'description': "周六杂谈回,其他时候随机游戏。 | \n录播:@下播型泛式录播组。 | \n直播通知群(全员禁言):666906670,902092584,59971⑧481 (功能一样,别多加)",
1157 'title': "太空狼人杀联动,不被爆杀就算赢",
1158 'thumbnail': "https://i0.hdslb.com/bfs/live/new_room_cover/e607bc1529057ef4b332e1026e62cf46984c314d.jpg",
1159 'timestamp': 1650802769,
1163 'url': 'https://live.bilibili.com/196?broadcast_type=0&is_room_feed=1?spm_id_from=333.999.space_home.strengthen_live_card.click',
1164 'only_matching': True
1166 'url': 'https://live.bilibili.com/blanc/196',
1167 'only_matching': True
1171 80: {'format_id': 'low', 'format_note': '流畅'}
,
1172 150: {'format_id': 'high_res', 'format_note': '高清'}
,
1173 250: {'format_id': 'ultra_high_res', 'format_note': '超清'}
,
1174 400: {'format_id': 'blue_ray', 'format_note': '蓝光'}
,
1175 10000: {'format_id': 'source', 'format_note': '原画'}
,
1176 20000: {'format_id': '4K', 'format_note': '4K'}
,
1177 30000: {'format_id': 'dolby', 'format_note': '杜比'}
,
1180 _quality
= staticmethod(qualities(list(_FORMATS
)))
1182 def _call_api(self
, path
, room_id
, query
):
1183 api_result
= self
._download
_json
(f
'https://api.live.bilibili.com/{path}', room_id
, query
=query
)
1184 if api_result
.get('code') != 0:
1185 raise ExtractorError(api_result
.get('message') or 'Unable to download JSON metadata')
1186 return api_result
.get('data') or {}
1188 def _parse_formats(self
, qn
, fmt
):
1189 for codec
in fmt
.get('codec') or []:
1190 if codec
.get('current_qn') != qn
:
1192 for url_info
in codec
['url_info']:
1194 'url': f
'{url_info["host"]}{codec["base_url"]}{url_info["extra"]}',
1195 'ext': fmt
.get('format_name'),
1196 'vcodec': codec
.get('codec_name'),
1197 'quality': self
._quality
(qn
),
1198 **self
._FORMATS
[qn
],
1201 def _real_extract(self
, url
):
1202 room_id
= self
._match
_id
(url
)
1203 room_data
= self
._call
_api
('room/v1/Room/get_info', room_id
, {'id': room_id}
)
1204 if room_data
.get('live_status') == 0:
1205 raise ExtractorError('Streamer is not live', expected
=True)
1208 for qn
in self
._FORMATS
.keys():
1209 stream_data
= self
._call
_api
('xlive/web-room/v2/index/getRoomPlayInfo', room_id
, {
1219 for fmt
in traverse_obj(stream_data
, ('playurl_info', 'playurl', 'stream', ..., 'format', ...)) or []:
1220 formats
.extend(self
._parse
_formats
(qn
, fmt
))
1224 'title': room_data
.get('title'),
1225 'description': room_data
.get('description'),
1226 'thumbnail': room_data
.get('user_cover'),
1227 'timestamp': stream_data
.get('live_time'),