8 from .common
import InfoExtractor
, SearchInfoExtractor
23 srt_subtitles_timecode
,
31 class BilibiliBaseIE(InfoExtractor
):
32 def extract_formats(self
, play_info
):
34 r
['quality']: traverse_obj(r
, 'new_description', 'display_desc')
35 for r
in traverse_obj(play_info
, ('support_formats', lambda _
, v
: v
['quality']))
38 audios
= traverse_obj(play_info
, ('dash', 'audio', ...))
39 flac_audio
= traverse_obj(play_info
, ('dash', 'flac', 'audio'))
41 audios
.append(flac_audio
)
43 'url': traverse_obj(audio
, 'baseUrl', 'base_url', 'url'),
44 'ext': mimetype2ext(traverse_obj(audio
, 'mimeType', 'mime_type')),
45 'acodec': audio
.get('codecs'),
47 'tbr': float_or_none(audio
.get('bandwidth'), scale
=1000),
48 'filesize': int_or_none(audio
.get('size'))
49 } for audio
in audios
]
52 'url': traverse_obj(video
, 'baseUrl', 'base_url', 'url'),
53 'ext': mimetype2ext(traverse_obj(video
, 'mimeType', 'mime_type')),
54 'fps': float_or_none(traverse_obj(video
, 'frameRate', 'frame_rate')),
55 'width': int_or_none(video
.get('width')),
56 'height': int_or_none(video
.get('height')),
57 'vcodec': video
.get('codecs'),
58 'acodec': 'none' if audios
else None,
59 'tbr': float_or_none(video
.get('bandwidth'), scale
=1000),
60 'filesize': int_or_none(video
.get('size')),
61 'quality': int_or_none(video
.get('id')),
62 'format': format_names
.get(video
.get('id')),
63 } for video
in traverse_obj(play_info
, ('dash', 'video', ...)))
65 missing_formats
= format_names
.keys() - set(traverse_obj(formats
, (..., 'quality')))
67 self
.to_screen(f
'Format(s) {", ".join(format_names[i] for i in missing_formats)} are missing; '
68 'you have to login or become premium member to download them')
70 self
._sort
_formats
(formats
)
73 def json2srt(self
, json_data
):
75 for idx
, line
in enumerate(json_data
.get('body') or []):
76 srt_data
+= (f
'{idx + 1}\n'
77 f
'{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n'
78 f
'{line["content"]}\n\n')
81 def _get_subtitles(self
, video_id
, initial_state
, cid
):
85 'url': f
'https://comment.bilibili.com/{cid}.xml',
89 for s
in traverse_obj(initial_state
, ('videoData', 'subtitle', 'list')) or []:
90 subtitles
.setdefault(s
['lan'], []).append({
92 'data': self
.json2srt(self
._download
_json
(s
['subtitle_url'], video_id
))
96 def _get_chapters(self
, aid
, cid
):
97 chapters
= aid
and cid
and self
._download
_json
(
98 'https://api.bilibili.com/x/player/v2', aid
, query
={'aid': aid, 'cid': cid}
,
99 note
='Extracting chapters', fatal
=False)
100 return traverse_obj(chapters
, ('data', 'view_points', ..., {
102 'start_time': 'from',
106 def _get_comments(self
, aid
):
107 for idx
in itertools
.count(1):
108 replies
= traverse_obj(
110 f
'https://api.bilibili.com/x/v2/reply?pn={idx}&oid={aid}&type=1&jsonp=jsonp&sort=2&_=1567227301685',
111 aid
, note
=f
'Extracting comments from page {idx}', fatal
=False),
115 for children
in map(self
._get
_all
_children
, replies
):
118 def _get_all_children(self
, reply
):
120 'author': traverse_obj(reply
, ('member', 'uname')),
121 'author_id': traverse_obj(reply
, ('member', 'mid')),
122 'id': reply
.get('rpid'),
123 'text': traverse_obj(reply
, ('content', 'message')),
124 'timestamp': reply
.get('ctime'),
125 'parent': reply
.get('parent') or 'root',
127 for children
in map(self
._get
_all
_children
, traverse_obj(reply
, ('replies', ...))):
131 class BiliBiliIE(BilibiliBaseIE
):
132 _VALID_URL
= r
'https?://www\.bilibili\.com/video/[aAbB][vV](?P<id>[^/?#&]+)'
135 'url': 'https://www.bilibili.com/video/BV13x41117TL',
137 'id': 'BV13x41117TL',
138 'title': '阿滴英文|英文歌分享#6 "Closer',
140 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
141 'uploader_id': '65880958',
143 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
146 'comment_count': int,
147 'upload_date': '20170301',
148 'timestamp': 1488353834,
154 'url': 'http://www.bilibili.com/video/av1074402/',
156 'thumbnail': r
're:^https?://.*\.(jpg|jpeg)$',
159 'uploader_id': '156160',
160 'id': 'BV11x411K7CN',
163 'upload_date': '20140420',
164 'timestamp': 1397983878,
165 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
167 'comment_count': int,
171 'params': {'skip_download': True}
,
174 'url': 'https://www.bilibili.com/video/BV1bK411W797',
176 'id': 'BV1bK411W797',
177 'title': '物语中的人物是如何吐槽自己的OP的'
179 'playlist_count': 18,
182 'id': 'BV1bK411W797_p1',
184 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川',
186 'timestamp': 1589601697,
187 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
188 'uploader': '打牌还是打桩',
189 'uploader_id': '150259984',
191 'comment_count': int,
192 'upload_date': '20200516',
194 'description': 'md5:e3c401cf7bc363118d1783dd74068a68',
199 'note': 'Specific page of Anthology',
200 'url': 'https://www.bilibili.com/video/BV1bK411W797?p=1',
202 'id': 'BV1bK411W797_p1',
204 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川',
206 'timestamp': 1589601697,
207 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
208 'uploader': '打牌还是打桩',
209 'uploader_id': '150259984',
211 'comment_count': int,
212 'upload_date': '20200516',
214 'description': 'md5:e3c401cf7bc363118d1783dd74068a68',
218 'note': 'video has subtitles',
219 'url': 'https://www.bilibili.com/video/BV12N4y1M7rh',
221 'id': 'BV12N4y1M7rh',
223 'title': 'md5:96e8bb42c2b432c0d4ce3434a61479c1',
225 'description': 'md5:afde2b7ba9025c01d9e3dde10de221e4',
227 'upload_date': '20220709',
228 'uploader': '小夫Tech',
229 'timestamp': 1657347907,
230 'uploader_id': '1326814124',
231 'comment_count': int,
234 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
235 'subtitles': 'count:2'
237 'params': {'listsubtitles': True}
,
239 'url': 'https://www.bilibili.com/video/av8903802/',
241 'id': 'BV13x41117TL',
243 'title': '阿滴英文|英文歌分享#6 "Closer',
244 'upload_date': '20170301',
245 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
246 'timestamp': 1488353834,
247 'uploader_id': '65880958',
249 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
252 'comment_count': int,
257 'skip_download': True,
260 'note': 'video has chapter',
261 'url': 'https://www.bilibili.com/video/BV1vL411G7N7/',
263 'id': 'BV1vL411G7N7',
265 'title': '如何为你的B站视频添加进度条分段',
266 'timestamp': 1634554558,
267 'upload_date': '20211018',
268 'description': 'md5:a9a3d6702b3a94518d419b2e9c320a6d',
270 'uploader': '爱喝咖啡的当麻',
272 'uploader_id': '1680903',
273 'chapters': 'count:6',
274 'comment_count': int,
277 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
279 'params': {'skip_download': True}
,
282 def _real_extract(self
, url
):
283 video_id
= self
._match
_id
(url
)
284 webpage
= self
._download
_webpage
(url
, video_id
)
285 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial state', video_id
)
286 play_info
= self
._search
_json
(r
'window\.__playinfo__\s*=', webpage
, 'play info', video_id
)['data']
288 video_data
= initial_state
['videoData']
289 video_id
, title
= video_data
['bvid'], video_data
.get('title')
291 # Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself.
292 page_list_json
= traverse_obj(
294 'https://api.bilibili.com/x/player/pagelist', video_id
,
295 fatal
=False, query
={'bvid': video_id, 'jsonp': 'jsonp'}
,
296 note
='Extracting videos in anthology'),
297 'data', expected_type
=list) or []
298 is_anthology
= len(page_list_json
) > 1
300 part_id
= int_or_none(parse_qs(url
).get('p', [None])[-1])
301 if is_anthology
and not part_id
and self
._yes
_playlist
(video_id
, video_id
):
302 return self
.playlist_from_matches(
303 page_list_json
, video_id
, title
, ie
=BiliBiliIE
,
304 getter
=lambda entry
: f
'https://www.bilibili.com/video/{video_id}?p={entry["page"]}')
307 title
+= f
' p{part_id:02d} {traverse_obj(page_list_json, ((part_id or 1) - 1, "part")) or ""}'
309 aid
= video_data
.get('aid')
310 old_video_id
= format_field(aid
, None, f
'%s_part{part_id or 1}')
312 cid
= traverse_obj(video_data
, ('pages', part_id
- 1, 'cid')) if part_id
else video_data
.get('cid')
315 'id': f
'{video_id}{format_field(part_id, None, "_p%d")}',
316 'formats': self
.extract_formats(play_info
),
317 '_old_archive_ids': [make_archive_id(self
, old_video_id
)] if old_video_id
else None,
319 'description': traverse_obj(initial_state
, ('videoData', 'desc')),
320 'view_count': traverse_obj(initial_state
, ('videoData', 'stat', 'view')),
321 'uploader': traverse_obj(initial_state
, ('upData', 'name')),
322 'uploader_id': traverse_obj(initial_state
, ('upData', 'mid')),
323 'like_count': traverse_obj(initial_state
, ('videoData', 'stat', 'like')),
324 'comment_count': traverse_obj(initial_state
, ('videoData', 'stat', 'reply')),
325 'tags': traverse_obj(initial_state
, ('tags', ..., 'tag_name')),
326 'thumbnail': traverse_obj(initial_state
, ('videoData', 'pic')),
327 'timestamp': traverse_obj(initial_state
, ('videoData', 'pubdate')),
328 'duration': float_or_none(play_info
.get('timelength'), scale
=1000),
329 'chapters': self
._get
_chapters
(aid
, cid
),
330 'subtitles': self
.extract_subtitles(video_id
, initial_state
, cid
),
331 '__post_extractor': self
.extract_comments(aid
),
332 'http_headers': {'Referer': url}
,
336 class BiliBiliBangumiIE(BilibiliBaseIE
):
337 _VALID_URL
= r
'(?x)https?://www\.bilibili\.com/bangumi/play/(?P<id>(?:ss|ep)\d+)'
340 'url': 'https://www.bilibili.com/bangumi/play/ss897',
350 'title': '神的记事本:第2话 你与旅行包',
351 'duration': 1428.487,
352 'timestamp': 1310809380,
353 'upload_date': '20110716',
354 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
357 'url': 'https://www.bilibili.com/bangumi/play/ep508406',
358 'only_matching': True,
361 def _real_extract(self
, url
):
362 video_id
= self
._match
_id
(url
)
363 webpage
= self
._download
_webpage
(url
, video_id
)
365 if '您所在的地区无法观看本片' in webpage
:
366 raise GeoRestrictedError('This video is restricted')
367 elif ('开通大会员观看' in webpage
and '__playinfo__' not in webpage
368 or '正在观看预览,大会员免费看全片' in webpage
):
369 self
.raise_login_required('This video is for premium members only')
371 play_info
= self
._search
_json
(r
'window\.__playinfo__\s*=\s*', webpage
, 'play info', video_id
)['data']
372 formats
= self
.extract_formats(play_info
)
373 if (not formats
and '成为大会员抢先看' in webpage
374 and play_info
.get('durl') and not play_info
.get('dash')):
375 self
.raise_login_required('This video is for premium members only')
377 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial state', video_id
)
379 season_id
= traverse_obj(initial_state
, ('mediaInfo', 'season_id'))
380 season_number
= season_id
and next((
381 idx
+ 1 for idx
, e
in enumerate(
382 traverse_obj(initial_state
, ('mediaInfo', 'seasons', ...)))
383 if e
.get('season_id') == season_id
389 'title': traverse_obj(initial_state
, 'h1Title'),
390 'episode': traverse_obj(initial_state
, ('epInfo', 'long_title')),
391 'episode_number': int_or_none(traverse_obj(initial_state
, ('epInfo', 'title'))),
392 'series': traverse_obj(initial_state
, ('mediaInfo', 'series')),
393 'season': traverse_obj(initial_state
, ('mediaInfo', 'season_title')),
394 'season_id': season_id
,
395 'season_number': season_number
,
396 'thumbnail': traverse_obj(initial_state
, ('epInfo', 'cover')),
397 'timestamp': traverse_obj(initial_state
, ('epInfo', 'pub_time')),
398 'duration': float_or_none(play_info
.get('timelength'), scale
=1000),
399 'subtitles': self
.extract_subtitles(
400 video_id
, initial_state
, traverse_obj(initial_state
, ('epInfo', 'cid'))),
401 '__post_extractor': self
.extract_comments(traverse_obj(initial_state
, ('epInfo', 'aid'))),
402 'http_headers': {'Referer': url, **self.geo_verification_headers()}
,
406 class BiliBiliBangumiMediaIE(InfoExtractor
):
407 _VALID_URL
= r
'https?://www\.bilibili\.com/bangumi/media/md(?P<id>\d+)'
409 'url': 'https://www.bilibili.com/bangumi/media/md24097891',
413 'playlist_mincount': 25,
416 def _real_extract(self
, url
):
417 media_id
= self
._match
_id
(url
)
418 webpage
= self
._download
_webpage
(url
, media_id
)
420 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial_state', media_id
)
421 episode_list
= self
._download
_json
(
422 'https://api.bilibili.com/pgc/web/season/section', media_id
,
423 query
={'season_id': initial_state['mediaInfo']['season_id']}
,
424 note
='Downloading season info')['result']['main_section']['episodes']
426 return self
.playlist_result((
427 self
.url_result(entry
['share_url'], BiliBiliBangumiIE
, entry
['aid'])
428 for entry
in episode_list
), media_id
)
431 class BilibiliSpaceBaseIE(InfoExtractor
):
432 def _extract_playlist(self
, fetch_page
, get_metadata
, get_entries
):
433 first_page
= fetch_page(0)
434 metadata
= get_metadata(first_page
)
436 paged_list
= InAdvancePagedList(
437 lambda idx
: get_entries(fetch_page(idx
) if idx
else first_page
),
438 metadata
['page_count'], metadata
['page_size'])
440 return metadata
, paged_list
443 class BilibiliSpaceVideoIE(BilibiliSpaceBaseIE
):
444 _VALID_URL
= r
'https?://space\.bilibili\.com/(?P<id>\d+)(?P<video>/video)?/?(?:[?#]|$)'
446 'url': 'https://space.bilibili.com/3985676/video',
450 'playlist_mincount': 178,
453 def _real_extract(self
, url
):
454 playlist_id
, is_video_url
= self
._match
_valid
_url
(url
).group('id', 'video')
456 self
.to_screen('A channel URL was given. Only the channel\'s videos will be downloaded. '
457 'To download audios, add a "/audio" to the URL')
459 def fetch_page(page_idx
):
461 response
= self
._download
_json
('https://api.bilibili.com/x/space/arc/search',
462 playlist_id
, note
=f
'Downloading page {page_idx}',
463 query
={'mid': playlist_id, 'pn': page_idx + 1, 'jsonp': 'jsonp'}
)
464 except ExtractorError
as e
:
465 if isinstance(e
.cause
, urllib
.error
.HTTPError
) and e
.cause
.code
== 412:
466 raise ExtractorError(
467 'Request is blocked by server (412), please add cookies, wait and try later.', expected
=True)
469 if response
['code'] == -401:
470 raise ExtractorError(
471 'Request is blocked by server (401), please add cookies, wait and try later.', expected
=True)
472 return response
['data']
474 def get_metadata(page_data
):
475 page_size
= page_data
['page']['ps']
476 entry_count
= page_data
['page']['count']
478 'page_count': math
.ceil(entry_count
/ page_size
),
479 'page_size': page_size
,
482 def get_entries(page_data
):
483 for entry
in traverse_obj(page_data
, ('list', 'vlist')) or []:
484 yield self
.url_result(f
'https://www.bilibili.com/video/{entry["bvid"]}', BiliBiliIE
, entry
['bvid'])
486 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
487 return self
.playlist_result(paged_list
, playlist_id
)
490 class BilibiliSpaceAudioIE(BilibiliSpaceBaseIE
):
491 _VALID_URL
= r
'https?://space\.bilibili\.com/(?P<id>\d+)/audio'
493 'url': 'https://space.bilibili.com/3985676/audio',
497 'playlist_mincount': 1,
500 def _real_extract(self
, url
):
501 playlist_id
= self
._match
_id
(url
)
503 def fetch_page(page_idx
):
504 return self
._download
_json
(
505 'https://api.bilibili.com/audio/music-service/web/song/upper', playlist_id
,
506 note
=f
'Downloading page {page_idx}',
507 query
={'uid': playlist_id, 'pn': page_idx + 1, 'ps': 30, 'order': 1, 'jsonp': 'jsonp'}
)['data']
509 def get_metadata(page_data
):
511 'page_count': page_data
['pageCount'],
512 'page_size': page_data
['pageSize'],
515 def get_entries(page_data
):
516 for entry
in page_data
.get('data', []):
517 yield self
.url_result(f
'https://www.bilibili.com/audio/au{entry["id"]}', BilibiliAudioIE
, entry
['id'])
519 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
520 return self
.playlist_result(paged_list
, playlist_id
)
523 class BilibiliSpacePlaylistIE(BilibiliSpaceBaseIE
):
524 _VALID_URL
= r
'https?://space.bilibili\.com/(?P<mid>\d+)/channel/collectiondetail\?sid=(?P<sid>\d+)'
526 'url': 'https://space.bilibili.com/2142762/channel/collectiondetail?sid=57445',
528 'id': '2142762_57445',
531 'playlist_mincount': 31,
534 def _real_extract(self
, url
):
535 mid
, sid
= self
._match
_valid
_url
(url
).group('mid', 'sid')
536 playlist_id
= f
'{mid}_{sid}'
538 def fetch_page(page_idx
):
539 return self
._download
_json
(
540 'https://api.bilibili.com/x/polymer/space/seasons_archives_list',
541 playlist_id
, note
=f
'Downloading page {page_idx}',
542 query
={'mid': mid, 'season_id': sid, 'page_num': page_idx + 1, 'page_size': 30}
)['data']
544 def get_metadata(page_data
):
545 page_size
= page_data
['page']['page_size']
546 entry_count
= page_data
['page']['total']
548 'page_count': math
.ceil(entry_count
/ page_size
),
549 'page_size': page_size
,
550 'title': traverse_obj(page_data
, ('meta', 'name'))
553 def get_entries(page_data
):
554 for entry
in page_data
.get('archives', []):
555 yield self
.url_result(f
'https://www.bilibili.com/video/{entry["bvid"]}',
556 BiliBiliIE
, entry
['bvid'])
558 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
559 return self
.playlist_result(paged_list
, playlist_id
, metadata
['title'])
562 class BilibiliCategoryIE(InfoExtractor
):
563 IE_NAME
= 'Bilibili category extractor'
564 _MAX_RESULTS
= 1000000
565 _VALID_URL
= r
'https?://www\.bilibili\.com/v/[a-zA-Z]+\/[a-zA-Z]+'
567 'url': 'https://www.bilibili.com/v/kichiku/mad',
569 'id': 'kichiku: mad',
570 'title': 'kichiku: mad'
572 'playlist_mincount': 45,
578 def _fetch_page(self
, api_url
, num_pages
, query
, page_num
):
579 parsed_json
= self
._download
_json
(
580 api_url
, query
, query
={'Search_key': query, 'pn': page_num}
,
581 note
='Extracting results from page %s of %s' % (page_num
, num_pages
))
583 video_list
= traverse_obj(parsed_json
, ('data', 'archives'), expected_type
=list)
585 raise ExtractorError('Failed to retrieve video list for page %d' % page_num
)
587 for video
in video_list
:
588 yield self
.url_result(
589 'https://www.bilibili.com/video/%s' % video
['bvid'], 'BiliBili', video
['bvid'])
591 def _entries(self
, category
, subcategory
, query
):
592 # map of categories : subcategories : RIDs
596 'manual_vocaloid': 126,
603 if category
not in rid_map
:
604 raise ExtractorError(
605 f
'The category {category} isn\'t supported. Supported categories: {list(rid_map.keys())}')
606 if subcategory
not in rid_map
[category
]:
607 raise ExtractorError(
608 f
'The subcategory {subcategory} isn\'t supported for this category. Supported subcategories: {list(rid_map[category].keys())}')
609 rid_value
= rid_map
[category
][subcategory
]
611 api_url
= 'https://api.bilibili.com/x/web-interface/newlist?rid=%d&type=1&ps=20&jsonp=jsonp' % rid_value
612 page_json
= self
._download
_json
(api_url
, query
, query
={'Search_key': query, 'pn': '1'}
)
613 page_data
= traverse_obj(page_json
, ('data', 'page'), expected_type
=dict)
614 count
, size
= int_or_none(page_data
.get('count')), int_or_none(page_data
.get('size'))
615 if count
is None or not size
:
616 raise ExtractorError('Failed to calculate either page count or size')
618 num_pages
= math
.ceil(count
/ size
)
620 return OnDemandPagedList(functools
.partial(
621 self
._fetch
_page
, api_url
, num_pages
, query
), size
)
623 def _real_extract(self
, url
):
624 category
, subcategory
= urllib
.parse
.urlparse(url
).path
.split('/')[2:4]
625 query
= '%s: %s' % (category
, subcategory
)
627 return self
.playlist_result(self
._entries
(category
, subcategory
, query
), query
, query
)
630 class BiliBiliSearchIE(SearchInfoExtractor
):
631 IE_DESC
= 'Bilibili video search'
632 _MAX_RESULTS
= 100000
633 _SEARCH_KEY
= 'bilisearch'
635 def _search_results(self
, query
):
636 for page_num
in itertools
.count(1):
637 videos
= self
._download
_json
(
638 'https://api.bilibili.com/x/web-interface/search/type', query
,
639 note
=f
'Extracting results from page {page_num}', query
={
646 '__refresh__': 'true',
647 'search_type': 'video',
650 })['data'].get('result')
654 yield self
.url_result(video
['arcurl'], 'BiliBili', str(video
['aid']))
657 class BilibiliAudioBaseIE(InfoExtractor
):
658 def _call_api(self
, path
, sid
, query
=None):
661 return self
._download
_json
(
662 'https://www.bilibili.com/audio/music-service-c/web/' + path
,
663 sid
, query
=query
)['data']
666 class BilibiliAudioIE(BilibiliAudioBaseIE
):
667 _VALID_URL
= r
'https?://(?:www\.)?bilibili\.com/audio/au(?P<id>\d+)'
669 'url': 'https://www.bilibili.com/audio/au1003142',
670 'md5': 'fec4987014ec94ef9e666d4d158ad03b',
674 'title': '【tsukimi】YELLOW / 神山羊',
676 'comment_count': int,
677 'description': 'YELLOW的mp3版!',
684 'thumbnail': r
're:^https?://.+\.jpg',
685 'timestamp': 1564836614,
686 'upload_date': '20190803',
687 'uploader': 'tsukimi-つきみぐー',
692 def _real_extract(self
, url
):
693 au_id
= self
._match
_id
(url
)
695 play_data
= self
._call
_api
('url', au_id
)
697 'url': play_data
['cdns'][0],
698 'filesize': int_or_none(play_data
.get('size')),
702 for a_format
in formats
:
703 a_format
.setdefault('http_headers', {}).update({
707 song
= self
._call
_api
('song/info', au_id
)
708 title
= song
['title']
709 statistic
= song
.get('statistic') or {}
712 lyric
= song
.get('lyric')
724 'artist': song
.get('author'),
725 'comment_count': int_or_none(statistic
.get('comment')),
726 'description': song
.get('intro'),
727 'duration': int_or_none(song
.get('duration')),
728 'subtitles': subtitles
,
729 'thumbnail': song
.get('cover'),
730 'timestamp': int_or_none(song
.get('passtime')),
731 'uploader': song
.get('uname'),
732 'view_count': int_or_none(statistic
.get('play')),
736 class BilibiliAudioAlbumIE(BilibiliAudioBaseIE
):
737 _VALID_URL
= r
'https?://(?:www\.)?bilibili\.com/audio/am(?P<id>\d+)'
739 'url': 'https://www.bilibili.com/audio/am10624',
742 'title': '每日新曲推荐(每日11:00更新)',
743 'description': '每天11:00更新,为你推送最新音乐',
745 'playlist_count': 19,
748 def _real_extract(self
, url
):
749 am_id
= self
._match
_id
(url
)
751 songs
= self
._call
_api
(
752 'song/of-menu', am_id
, {'sid': am_id, 'pn': 1, 'ps': 100}
)['data']
756 sid
= str_or_none(song
.get('id'))
759 entries
.append(self
.url_result(
760 'https://www.bilibili.com/audio/au' + sid
,
761 BilibiliAudioIE
.ie_key(), sid
))
764 album_data
= self
._call
_api
('menu/info', am_id
) or {}
765 album_title
= album_data
.get('title')
767 for entry
in entries
:
768 entry
['album'] = album_title
769 return self
.playlist_result(
770 entries
, am_id
, album_title
, album_data
.get('intro'))
772 return self
.playlist_result(entries
, am_id
)
775 class BiliBiliPlayerIE(InfoExtractor
):
776 _VALID_URL
= r
'https?://player\.bilibili\.com/player\.html\?.*?\baid=(?P<id>\d+)'
778 'url': 'http://player.bilibili.com/player.html?aid=92494333&cid=157926707&page=1',
779 'only_matching': True,
782 def _real_extract(self
, url
):
783 video_id
= self
._match
_id
(url
)
784 return self
.url_result(
785 'http://www.bilibili.tv/video/av%s/' % video_id
,
786 ie
=BiliBiliIE
.ie_key(), video_id
=video_id
)
789 class BiliIntlBaseIE(InfoExtractor
):
790 _API_URL
= 'https://api.bilibili.tv/intl/gateway'
791 _NETRC_MACHINE
= 'biliintl'
793 def _call_api(self
, endpoint
, *args
, **kwargs
):
794 json
= self
._download
_json
(self
._API
_URL
+ endpoint
, *args
, **kwargs
)
796 if json
['code'] in (10004004, 10004005, 10023006):
797 self
.raise_login_required()
798 elif json
['code'] == 10004001:
799 self
.raise_geo_restricted()
801 if json
.get('message') and str(json
['code']) != json
['message']:
802 errmsg
= f
'{kwargs.get("errnote", "Unable to download JSON metadata")}: {self.IE_NAME} said: {json["message"]}'
804 errmsg
= kwargs
.get('errnote', 'Unable to download JSON metadata')
805 if kwargs
.get('fatal'):
806 raise ExtractorError(errmsg
)
808 self
.report_warning(errmsg
)
809 return json
.get('data')
811 def json2srt(self
, json
):
813 f
'{i + 1}\n{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n{line["content"]}'
814 for i
, line
in enumerate(traverse_obj(json
, (
815 'body', lambda _
, l
: l
['content'] and l
['from'] and l
['to']))))
818 def _get_subtitles(self
, *, ep_id
=None, aid
=None):
819 sub_json
= self
._call
_api
(
820 '/web/v2/subtitle', ep_id
or aid
, fatal
=False,
821 note
='Downloading subtitles list', errnote
='Unable to download subtitles list',
829 for sub
in sub_json
.get('subtitles') or []:
830 sub_url
= sub
.get('url')
833 sub_data
= self
._download
_json
(
834 sub_url
, ep_id
or aid
, errnote
='Unable to download subtitles', fatal
=False,
835 note
='Downloading subtitles%s' % f
' for {sub["lang"]}' if sub
.get('lang') else '')
838 subtitles
.setdefault(sub
.get('lang_key', 'en'), []).append({
840 'data': self
.json2srt(sub_data
)
844 def _get_formats(self
, *, ep_id
=None, aid
=None):
845 video_json
= self
._call
_api
(
846 '/web/playurl', ep_id
or aid
, note
='Downloading video formats',
847 errnote
='Unable to download video formats', query
=filter_dict({
852 video_json
= video_json
['playurl']
854 for vid
in video_json
.get('video') or []:
855 video_res
= vid
.get('video_resource') or {}
856 video_info
= vid
.get('stream_info') or {}
857 if not video_res
.get('url'):
860 'url': video_res
['url'],
862 'format_note': video_info
.get('desc_words'),
863 'width': video_res
.get('width'),
864 'height': video_res
.get('height'),
865 'vbr': video_res
.get('bandwidth'),
867 'vcodec': video_res
.get('codecs'),
868 'filesize': video_res
.get('size'),
870 for aud
in video_json
.get('audio_resource') or []:
871 if not aud
.get('url'):
876 'abr': aud
.get('bandwidth'),
877 'acodec': aud
.get('codecs'),
879 'filesize': aud
.get('size'),
882 self
._sort
_formats
(formats
)
885 def _extract_video_info(self
, video_data
, *, ep_id
=None, aid
=None):
888 'title': video_data
.get('title_display') or video_data
.get('title'),
889 'thumbnail': video_data
.get('cover'),
890 'episode_number': int_or_none(self
._search
_regex
(
891 r
'^E(\d+)(?:$| - )', video_data
.get('title_display') or '', 'episode number', default
=None)),
892 'formats': self
._get
_formats
(ep_id
=ep_id
, aid
=aid
),
893 'subtitles': self
._get
_subtitles
(ep_id
=ep_id
, aid
=aid
),
894 'extractor_key': BiliIntlIE
.ie_key(),
897 def _perform_login(self
, username
, password
):
899 from Cryptodome
.PublicKey
import RSA
900 from Cryptodome
.Cipher
import PKCS1_v1_5
903 from Crypto
.PublicKey
import RSA
904 from Crypto
.Cipher
import PKCS1_v1_5
906 raise ExtractorError('pycryptodomex not found. Please install', expected
=True)
908 key_data
= self
._download
_json
(
909 'https://passport.bilibili.tv/x/intl/passport-login/web/key?lang=en-US', None,
910 note
='Downloading login key', errnote
='Unable to download login key')['data']
912 public_key
= RSA
.importKey(key_data
['key'])
913 password_hash
= PKCS1_v1_5
.new(public_key
).encrypt((key_data
['hash'] + password
).encode('utf-8'))
914 login_post
= self
._download
_json
(
915 'https://passport.bilibili.tv/x/intl/passport-login/web/login/password?lang=en-US', None, data
=urlencode_postdata({
916 'username': username
,
917 'password': base64
.b64encode(password_hash
).decode('ascii'),
921 }), note
='Logging in', errnote
='Unable to log in')
922 if login_post
.get('code'):
923 if login_post
.get('message'):
924 raise ExtractorError(f
'Unable to log in: {self.IE_NAME} said: {login_post["message"]}', expected
=True)
926 raise ExtractorError('Unable to log in')
929 class BiliIntlIE(BiliIntlBaseIE
):
930 _VALID_URL
= r
'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?(play/(?P<season_id>\d+)/(?P<ep_id>\d+)|video/(?P<aid>\d+))'
933 'url': 'https://www.bilibili.tv/en/play/34613/341736',
937 'title': 'E2 - The First Night',
938 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
943 'url': 'https://www.bilibili.tv/en/play/1033760/11005006',
947 'title': 'E3 - Who?',
948 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
952 # Subtitle with empty content
953 'url': 'https://www.bilibili.tv/en/play/1005144/10131790',
957 'title': 'E140 - Two Heartbeats: Kabuto\'s Trap',
958 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
959 'episode_number': 140,
961 'skip': 'According to the copyright owner\'s request, you may only watch the video after you log in.'
963 'url': 'https://www.biliintl.com/en/play/34613/341736',
964 'only_matching': True,
966 # User-generated content (as opposed to a series licensed from a studio)
967 'url': 'https://bilibili.tv/en/video/2019955076',
968 'only_matching': True,
971 'url': 'https://www.bilibili.tv/video/2019955076',
972 'only_matching': True,
974 # Uppercase language in URL
975 'url': 'https://www.bilibili.tv/EN/video/2019955076',
976 'only_matching': True,
979 def _real_extract(self
, url
):
980 season_id
, ep_id
, aid
= self
._match
_valid
_url
(url
).group('season_id', 'ep_id', 'aid')
981 video_id
= ep_id
or aid
982 webpage
= self
._download
_webpage
(url
, video_id
)
985 self
._search
_json
(r
'window\.__INITIAL_(?:DATA|STATE)__\s*=', webpage
, 'preload state', video_id
, default
={})
986 or self
._search
_nuxt
_data
(webpage
, video_id
, '__initialState', fatal
=False, traverse
=None))
987 video_data
= traverse_obj(
988 initial_data
, ('OgvVideo', 'epDetail'), ('UgcVideo', 'videoData'), ('ugc', 'archive'), expected_type
=dict)
990 if season_id
and not video_data
:
991 # Non-Bstation layout, read through episode list
992 season_json
= self
._call
_api
(f
'/web/v2/ogv/play/episodes?season_id={season_id}&platform=web', video_id
)
993 video_data
= traverse_obj(season_json
,
994 ('sections', ..., 'episodes', lambda _
, v
: str(v
['episode_id']) == ep_id
),
995 expected_type
=dict, get_all
=False)
996 return self
._extract
_video
_info
(video_data
or {}, ep_id
=ep_id
, aid
=aid
)
999 class BiliIntlSeriesIE(BiliIntlBaseIE
):
1000 _VALID_URL
= r
'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?play/(?P<id>\d+)/?(?:[?#]|$)'
1002 'url': 'https://www.bilibili.tv/en/play/34613',
1003 'playlist_mincount': 15,
1006 'title': 'Fly Me to the Moon',
1007 'description': 'md5:a861ee1c4dc0acfad85f557cc42ac627',
1008 'categories': ['Romance', 'Comedy', 'Slice of life'],
1009 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
1013 'skip_download': True,
1016 'url': 'https://www.biliintl.com/en/play/34613',
1017 'only_matching': True,
1019 'url': 'https://www.biliintl.com/EN/play/34613',
1020 'only_matching': True,
1023 def _entries(self
, series_id
):
1024 series_json
= self
._call
_api
(f
'/web/v2/ogv/play/episodes?season_id={series_id}&platform=web', series_id
)
1025 for episode
in traverse_obj(series_json
, ('sections', ..., 'episodes', ...), expected_type
=dict, default
=[]):
1026 episode_id
= str(episode
.get('episode_id'))
1027 yield self
._extract
_video
_info
(episode
, ep_id
=episode_id
)
1029 def _real_extract(self
, url
):
1030 series_id
= self
._match
_id
(url
)
1031 series_info
= self
._call
_api
(f
'/web/v2/ogv/play/season_info?season_id={series_id}&platform=web', series_id
).get('season') or {}
1032 return self
.playlist_result(
1033 self
._entries
(series_id
), series_id
, series_info
.get('title'), series_info
.get('description'),
1034 categories
=traverse_obj(series_info
, ('styles', ..., 'title'), expected_type
=str_or_none
),
1035 thumbnail
=url_or_none(series_info
.get('horizontal_cover')), view_count
=parse_count(series_info
.get('view')))
1038 class BiliLiveIE(InfoExtractor
):
1039 _VALID_URL
= r
'https?://live.bilibili.com/(?P<id>\d+)'
1042 'url': 'https://live.bilibili.com/196',
1045 'description': "周六杂谈回,其他时候随机游戏。 | \n录播:@下播型泛式录播组。 | \n直播通知群(全员禁言):666906670,902092584,59971⑧481 (功能一样,别多加)",
1047 'title': "太空狼人杀联动,不被爆杀就算赢",
1048 'thumbnail': "https://i0.hdslb.com/bfs/live/new_room_cover/e607bc1529057ef4b332e1026e62cf46984c314d.jpg",
1049 'timestamp': 1650802769,
1053 'url': 'https://live.bilibili.com/196?broadcast_type=0&is_room_feed=1?spm_id_from=333.999.space_home.strengthen_live_card.click',
1054 'only_matching': True
1058 80: {'format_id': 'low', 'format_note': '流畅'}
,
1059 150: {'format_id': 'high_res', 'format_note': '高清'}
,
1060 250: {'format_id': 'ultra_high_res', 'format_note': '超清'}
,
1061 400: {'format_id': 'blue_ray', 'format_note': '蓝光'}
,
1062 10000: {'format_id': 'source', 'format_note': '原画'}
,
1063 20000: {'format_id': '4K', 'format_note': '4K'}
,
1064 30000: {'format_id': 'dolby', 'format_note': '杜比'}
,
1067 _quality
= staticmethod(qualities(list(_FORMATS
)))
1069 def _call_api(self
, path
, room_id
, query
):
1070 api_result
= self
._download
_json
(f
'https://api.live.bilibili.com/{path}', room_id
, query
=query
)
1071 if api_result
.get('code') != 0:
1072 raise ExtractorError(api_result
.get('message') or 'Unable to download JSON metadata')
1073 return api_result
.get('data') or {}
1075 def _parse_formats(self
, qn
, fmt
):
1076 for codec
in fmt
.get('codec') or []:
1077 if codec
.get('current_qn') != qn
:
1079 for url_info
in codec
['url_info']:
1081 'url': f
'{url_info["host"]}{codec["base_url"]}{url_info["extra"]}',
1082 'ext': fmt
.get('format_name'),
1083 'vcodec': codec
.get('codec_name'),
1084 'quality': self
._quality
(qn
),
1085 **self
._FORMATS
[qn
],
1088 def _real_extract(self
, url
):
1089 room_id
= self
._match
_id
(url
)
1090 room_data
= self
._call
_api
('room/v1/Room/get_info', room_id
, {'id': room_id}
)
1091 if room_data
.get('live_status') == 0:
1092 raise ExtractorError('Streamer is not live', expected
=True)
1095 for qn
in self
._FORMATS
.keys():
1096 stream_data
= self
._call
_api
('xlive/web-room/v2/index/getRoomPlayInfo', room_id
, {
1106 for fmt
in traverse_obj(stream_data
, ('playurl_info', 'playurl', 'stream', ..., 'format', ...)) or []:
1107 formats
.extend(self
._parse
_formats
(qn
, fmt
))
1108 self
._sort
_formats
(formats
)
1112 'title': room_data
.get('title'),
1113 'description': room_data
.get('description'),
1114 'thumbnail': room_data
.get('user_cover'),
1115 'timestamp': stream_data
.get('live_time'),