8 from .common
import InfoExtractor
, SearchInfoExtractor
23 srt_subtitles_timecode
,
31 class BilibiliBaseIE(InfoExtractor
):
32 def extract_formats(self
, play_info
):
34 r
['quality']: traverse_obj(r
, 'new_description', 'display_desc')
35 for r
in traverse_obj(play_info
, ('support_formats', lambda _
, v
: v
['quality']))
38 audios
= traverse_obj(play_info
, ('dash', 'audio', ...))
39 flac_audio
= traverse_obj(play_info
, ('dash', 'flac', 'audio'))
41 audios
.append(flac_audio
)
43 'url': traverse_obj(audio
, 'baseUrl', 'base_url', 'url'),
44 'ext': mimetype2ext(traverse_obj(audio
, 'mimeType', 'mime_type')),
45 'acodec': audio
.get('codecs'),
47 'tbr': float_or_none(audio
.get('bandwidth'), scale
=1000),
48 'filesize': int_or_none(audio
.get('size'))
49 } for audio
in audios
]
52 'url': traverse_obj(video
, 'baseUrl', 'base_url', 'url'),
53 'ext': mimetype2ext(traverse_obj(video
, 'mimeType', 'mime_type')),
54 'fps': float_or_none(traverse_obj(video
, 'frameRate', 'frame_rate')),
55 'width': int_or_none(video
.get('width')),
56 'height': int_or_none(video
.get('height')),
57 'vcodec': video
.get('codecs'),
58 'acodec': 'none' if audios
else None,
59 'tbr': float_or_none(video
.get('bandwidth'), scale
=1000),
60 'filesize': int_or_none(video
.get('size')),
61 'quality': int_or_none(video
.get('id')),
62 'format': format_names
.get(video
.get('id')),
63 } for video
in traverse_obj(play_info
, ('dash', 'video', ...)))
65 missing_formats
= format_names
.keys() - set(traverse_obj(formats
, (..., 'quality')))
67 self
.to_screen(f
'Format(s) {", ".join(format_names[i] for i in missing_formats)} are missing; '
68 f
'you have to login or become premium member to download them. {self._login_hint()}')
72 def json2srt(self
, json_data
):
74 for idx
, line
in enumerate(json_data
.get('body') or []):
75 srt_data
+= (f
'{idx + 1}\n'
76 f
'{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n'
77 f
'{line["content"]}\n\n')
80 def _get_subtitles(self
, video_id
, initial_state
, cid
):
84 'url': f
'https://comment.bilibili.com/{cid}.xml',
88 for s
in traverse_obj(initial_state
, ('videoData', 'subtitle', 'list')) or []:
89 subtitles
.setdefault(s
['lan'], []).append({
91 'data': self
.json2srt(self
._download
_json
(s
['subtitle_url'], video_id
))
95 def _get_chapters(self
, aid
, cid
):
96 chapters
= aid
and cid
and self
._download
_json
(
97 'https://api.bilibili.com/x/player/v2', aid
, query
={'aid': aid, 'cid': cid}
,
98 note
='Extracting chapters', fatal
=False)
99 return traverse_obj(chapters
, ('data', 'view_points', ..., {
101 'start_time': 'from',
105 def _get_comments(self
, aid
):
106 for idx
in itertools
.count(1):
107 replies
= traverse_obj(
109 f
'https://api.bilibili.com/x/v2/reply?pn={idx}&oid={aid}&type=1&jsonp=jsonp&sort=2&_=1567227301685',
110 aid
, note
=f
'Extracting comments from page {idx}', fatal
=False),
114 for children
in map(self
._get
_all
_children
, replies
):
117 def _get_all_children(self
, reply
):
119 'author': traverse_obj(reply
, ('member', 'uname')),
120 'author_id': traverse_obj(reply
, ('member', 'mid')),
121 'id': reply
.get('rpid'),
122 'text': traverse_obj(reply
, ('content', 'message')),
123 'timestamp': reply
.get('ctime'),
124 'parent': reply
.get('parent') or 'root',
126 for children
in map(self
._get
_all
_children
, traverse_obj(reply
, ('replies', ...))):
130 class BiliBiliIE(BilibiliBaseIE
):
131 _VALID_URL
= r
'https?://www\.bilibili\.com/video/[aAbB][vV](?P<id>[^/?#&]+)'
134 'url': 'https://www.bilibili.com/video/BV13x41117TL',
136 'id': 'BV13x41117TL',
137 'title': '阿滴英文|英文歌分享#6 "Closer',
139 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文',
140 'uploader_id': '65880958',
142 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
145 'comment_count': int,
146 'upload_date': '20170301',
147 'timestamp': 1488353834,
153 'url': 'http://www.bilibili.com/video/av1074402/',
155 'thumbnail': r
're:^https?://.*\.(jpg|jpeg)$',
158 'uploader_id': '156160',
159 'id': 'BV11x411K7CN',
162 'upload_date': '20140420',
163 'timestamp': 1397983878,
164 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
166 'comment_count': int,
170 'params': {'skip_download': True}
,
173 'url': 'https://www.bilibili.com/video/BV1bK411W797',
175 'id': 'BV1bK411W797',
176 'title': '物语中的人物是如何吐槽自己的OP的'
178 'playlist_count': 18,
181 'id': 'BV1bK411W797_p1',
183 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川',
185 'timestamp': 1589601697,
186 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
187 'uploader': '打牌还是打桩',
188 'uploader_id': '150259984',
190 'comment_count': int,
191 'upload_date': '20200516',
193 'description': 'md5:e3c401cf7bc363118d1783dd74068a68',
198 'note': 'Specific page of Anthology',
199 'url': 'https://www.bilibili.com/video/BV1bK411W797?p=1',
201 'id': 'BV1bK411W797_p1',
203 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川',
205 'timestamp': 1589601697,
206 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
207 'uploader': '打牌还是打桩',
208 'uploader_id': '150259984',
210 'comment_count': int,
211 'upload_date': '20200516',
213 'description': 'md5:e3c401cf7bc363118d1783dd74068a68',
217 'note': 'video has subtitles',
218 'url': 'https://www.bilibili.com/video/BV12N4y1M7rh',
220 'id': 'BV12N4y1M7rh',
222 'title': 'md5:96e8bb42c2b432c0d4ce3434a61479c1',
224 'description': 'md5:afde2b7ba9025c01d9e3dde10de221e4',
226 'upload_date': '20220709',
227 'uploader': '小夫Tech',
228 'timestamp': 1657347907,
229 'uploader_id': '1326814124',
230 'comment_count': int,
233 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
234 'subtitles': 'count:2'
236 'params': {'listsubtitles': True}
,
238 'url': 'https://www.bilibili.com/video/av8903802/',
240 'id': 'BV13x41117TL',
242 'title': '阿滴英文|英文歌分享#6 "Closer',
243 'upload_date': '20170301',
244 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a',
245 'timestamp': 1488353834,
246 'uploader_id': '65880958',
248 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
251 'comment_count': int,
256 'skip_download': True,
259 'note': 'video has chapter',
260 'url': 'https://www.bilibili.com/video/BV1vL411G7N7/',
262 'id': 'BV1vL411G7N7',
264 'title': '如何为你的B站视频添加进度条分段',
265 'timestamp': 1634554558,
266 'upload_date': '20211018',
267 'description': 'md5:a9a3d6702b3a94518d419b2e9c320a6d',
269 'uploader': '爱喝咖啡的当麻',
271 'uploader_id': '1680903',
272 'chapters': 'count:6',
273 'comment_count': int,
276 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
278 'params': {'skip_download': True}
,
281 def _real_extract(self
, url
):
282 video_id
= self
._match
_id
(url
)
283 webpage
= self
._download
_webpage
(url
, video_id
)
284 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial state', video_id
)
285 play_info
= self
._search
_json
(r
'window\.__playinfo__\s*=', webpage
, 'play info', video_id
)['data']
287 video_data
= initial_state
['videoData']
288 video_id
, title
= video_data
['bvid'], video_data
.get('title')
290 # Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself.
291 page_list_json
= traverse_obj(
293 'https://api.bilibili.com/x/player/pagelist', video_id
,
294 fatal
=False, query
={'bvid': video_id, 'jsonp': 'jsonp'}
,
295 note
='Extracting videos in anthology'),
296 'data', expected_type
=list) or []
297 is_anthology
= len(page_list_json
) > 1
299 part_id
= int_or_none(parse_qs(url
).get('p', [None])[-1])
300 if is_anthology
and not part_id
and self
._yes
_playlist
(video_id
, video_id
):
301 return self
.playlist_from_matches(
302 page_list_json
, video_id
, title
, ie
=BiliBiliIE
,
303 getter
=lambda entry
: f
'https://www.bilibili.com/video/{video_id}?p={entry["page"]}')
306 title
+= f
' p{part_id:02d} {traverse_obj(page_list_json, ((part_id or 1) - 1, "part")) or ""}'
308 aid
= video_data
.get('aid')
309 old_video_id
= format_field(aid
, None, f
'%s_part{part_id or 1}')
311 cid
= traverse_obj(video_data
, ('pages', part_id
- 1, 'cid')) if part_id
else video_data
.get('cid')
314 'id': f
'{video_id}{format_field(part_id, None, "_p%d")}',
315 'formats': self
.extract_formats(play_info
),
316 '_old_archive_ids': [make_archive_id(self
, old_video_id
)] if old_video_id
else None,
318 'description': traverse_obj(initial_state
, ('videoData', 'desc')),
319 'view_count': traverse_obj(initial_state
, ('videoData', 'stat', 'view')),
320 'uploader': traverse_obj(initial_state
, ('upData', 'name')),
321 'uploader_id': traverse_obj(initial_state
, ('upData', 'mid')),
322 'like_count': traverse_obj(initial_state
, ('videoData', 'stat', 'like')),
323 'comment_count': traverse_obj(initial_state
, ('videoData', 'stat', 'reply')),
324 'tags': traverse_obj(initial_state
, ('tags', ..., 'tag_name')),
325 'thumbnail': traverse_obj(initial_state
, ('videoData', 'pic')),
326 'timestamp': traverse_obj(initial_state
, ('videoData', 'pubdate')),
327 'duration': float_or_none(play_info
.get('timelength'), scale
=1000),
328 'chapters': self
._get
_chapters
(aid
, cid
),
329 'subtitles': self
.extract_subtitles(video_id
, initial_state
, cid
),
330 '__post_extractor': self
.extract_comments(aid
),
331 'http_headers': {'Referer': url}
,
335 class BiliBiliBangumiIE(BilibiliBaseIE
):
336 _VALID_URL
= r
'(?x)https?://www\.bilibili\.com/bangumi/play/(?P<id>(?:ss|ep)\d+)'
339 'url': 'https://www.bilibili.com/bangumi/play/ss897',
349 'title': '神的记事本:第2话 你与旅行包',
350 'duration': 1428.487,
351 'timestamp': 1310809380,
352 'upload_date': '20110716',
353 'thumbnail': r
're:^https?://.*\.(jpg|jpeg|png)$',
356 'url': 'https://www.bilibili.com/bangumi/play/ep508406',
357 'only_matching': True,
360 def _real_extract(self
, url
):
361 video_id
= self
._match
_id
(url
)
362 webpage
= self
._download
_webpage
(url
, video_id
)
364 if '您所在的地区无法观看本片' in webpage
:
365 raise GeoRestrictedError('This video is restricted')
366 elif ('开通大会员观看' in webpage
and '__playinfo__' not in webpage
367 or '正在观看预览,大会员免费看全片' in webpage
):
368 self
.raise_login_required('This video is for premium members only')
370 play_info
= self
._search
_json
(r
'window\.__playinfo__\s*=', webpage
, 'play info', video_id
)['data']
371 formats
= self
.extract_formats(play_info
)
372 if (not formats
and '成为大会员抢先看' in webpage
373 and play_info
.get('durl') and not play_info
.get('dash')):
374 self
.raise_login_required('This video is for premium members only')
376 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial state', video_id
)
378 season_id
= traverse_obj(initial_state
, ('mediaInfo', 'season_id'))
379 season_number
= season_id
and next((
380 idx
+ 1 for idx
, e
in enumerate(
381 traverse_obj(initial_state
, ('mediaInfo', 'seasons', ...)))
382 if e
.get('season_id') == season_id
388 'title': traverse_obj(initial_state
, 'h1Title'),
389 'episode': traverse_obj(initial_state
, ('epInfo', 'long_title')),
390 'episode_number': int_or_none(traverse_obj(initial_state
, ('epInfo', 'title'))),
391 'series': traverse_obj(initial_state
, ('mediaInfo', 'series')),
392 'season': traverse_obj(initial_state
, ('mediaInfo', 'season_title')),
393 'season_id': season_id
,
394 'season_number': season_number
,
395 'thumbnail': traverse_obj(initial_state
, ('epInfo', 'cover')),
396 'timestamp': traverse_obj(initial_state
, ('epInfo', 'pub_time')),
397 'duration': float_or_none(play_info
.get('timelength'), scale
=1000),
398 'subtitles': self
.extract_subtitles(
399 video_id
, initial_state
, traverse_obj(initial_state
, ('epInfo', 'cid'))),
400 '__post_extractor': self
.extract_comments(traverse_obj(initial_state
, ('epInfo', 'aid'))),
401 'http_headers': {'Referer': url, **self.geo_verification_headers()}
,
405 class BiliBiliBangumiMediaIE(InfoExtractor
):
406 _VALID_URL
= r
'https?://www\.bilibili\.com/bangumi/media/md(?P<id>\d+)'
408 'url': 'https://www.bilibili.com/bangumi/media/md24097891',
412 'playlist_mincount': 25,
415 def _real_extract(self
, url
):
416 media_id
= self
._match
_id
(url
)
417 webpage
= self
._download
_webpage
(url
, media_id
)
419 initial_state
= self
._search
_json
(r
'window\.__INITIAL_STATE__\s*=', webpage
, 'initial_state', media_id
)
420 episode_list
= self
._download
_json
(
421 'https://api.bilibili.com/pgc/web/season/section', media_id
,
422 query
={'season_id': initial_state['mediaInfo']['season_id']}
,
423 note
='Downloading season info')['result']['main_section']['episodes']
425 return self
.playlist_result((
426 self
.url_result(entry
['share_url'], BiliBiliBangumiIE
, entry
['aid'])
427 for entry
in episode_list
), media_id
)
430 class BilibiliSpaceBaseIE(InfoExtractor
):
431 def _extract_playlist(self
, fetch_page
, get_metadata
, get_entries
):
432 first_page
= fetch_page(0)
433 metadata
= get_metadata(first_page
)
435 paged_list
= InAdvancePagedList(
436 lambda idx
: get_entries(fetch_page(idx
) if idx
else first_page
),
437 metadata
['page_count'], metadata
['page_size'])
439 return metadata
, paged_list
442 class BilibiliSpaceVideoIE(BilibiliSpaceBaseIE
):
443 _VALID_URL
= r
'https?://space\.bilibili\.com/(?P<id>\d+)(?P<video>/video)?/?(?:[?#]|$)'
445 'url': 'https://space.bilibili.com/3985676/video',
449 'playlist_mincount': 178,
452 def _real_extract(self
, url
):
453 playlist_id
, is_video_url
= self
._match
_valid
_url
(url
).group('id', 'video')
455 self
.to_screen('A channel URL was given. Only the channel\'s videos will be downloaded. '
456 'To download audios, add a "/audio" to the URL')
458 def fetch_page(page_idx
):
460 response
= self
._download
_json
('https://api.bilibili.com/x/space/arc/search',
461 playlist_id
, note
=f
'Downloading page {page_idx}',
462 query
={'mid': playlist_id, 'pn': page_idx + 1, 'jsonp': 'jsonp'}
)
463 except ExtractorError
as e
:
464 if isinstance(e
.cause
, urllib
.error
.HTTPError
) and e
.cause
.code
== 412:
465 raise ExtractorError(
466 'Request is blocked by server (412), please add cookies, wait and try later.', expected
=True)
468 if response
['code'] == -401:
469 raise ExtractorError(
470 'Request is blocked by server (401), please add cookies, wait and try later.', expected
=True)
471 return response
['data']
473 def get_metadata(page_data
):
474 page_size
= page_data
['page']['ps']
475 entry_count
= page_data
['page']['count']
477 'page_count': math
.ceil(entry_count
/ page_size
),
478 'page_size': page_size
,
481 def get_entries(page_data
):
482 for entry
in traverse_obj(page_data
, ('list', 'vlist')) or []:
483 yield self
.url_result(f
'https://www.bilibili.com/video/{entry["bvid"]}', BiliBiliIE
, entry
['bvid'])
485 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
486 return self
.playlist_result(paged_list
, playlist_id
)
489 class BilibiliSpaceAudioIE(BilibiliSpaceBaseIE
):
490 _VALID_URL
= r
'https?://space\.bilibili\.com/(?P<id>\d+)/audio'
492 'url': 'https://space.bilibili.com/3985676/audio',
496 'playlist_mincount': 1,
499 def _real_extract(self
, url
):
500 playlist_id
= self
._match
_id
(url
)
502 def fetch_page(page_idx
):
503 return self
._download
_json
(
504 'https://api.bilibili.com/audio/music-service/web/song/upper', playlist_id
,
505 note
=f
'Downloading page {page_idx}',
506 query
={'uid': playlist_id, 'pn': page_idx + 1, 'ps': 30, 'order': 1, 'jsonp': 'jsonp'}
)['data']
508 def get_metadata(page_data
):
510 'page_count': page_data
['pageCount'],
511 'page_size': page_data
['pageSize'],
514 def get_entries(page_data
):
515 for entry
in page_data
.get('data', []):
516 yield self
.url_result(f
'https://www.bilibili.com/audio/au{entry["id"]}', BilibiliAudioIE
, entry
['id'])
518 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
519 return self
.playlist_result(paged_list
, playlist_id
)
522 class BilibiliSpacePlaylistIE(BilibiliSpaceBaseIE
):
523 _VALID_URL
= r
'https?://space.bilibili\.com/(?P<mid>\d+)/channel/collectiondetail\?sid=(?P<sid>\d+)'
525 'url': 'https://space.bilibili.com/2142762/channel/collectiondetail?sid=57445',
527 'id': '2142762_57445',
530 'playlist_mincount': 31,
533 def _real_extract(self
, url
):
534 mid
, sid
= self
._match
_valid
_url
(url
).group('mid', 'sid')
535 playlist_id
= f
'{mid}_{sid}'
537 def fetch_page(page_idx
):
538 return self
._download
_json
(
539 'https://api.bilibili.com/x/polymer/space/seasons_archives_list',
540 playlist_id
, note
=f
'Downloading page {page_idx}',
541 query
={'mid': mid, 'season_id': sid, 'page_num': page_idx + 1, 'page_size': 30}
)['data']
543 def get_metadata(page_data
):
544 page_size
= page_data
['page']['page_size']
545 entry_count
= page_data
['page']['total']
547 'page_count': math
.ceil(entry_count
/ page_size
),
548 'page_size': page_size
,
549 'title': traverse_obj(page_data
, ('meta', 'name'))
552 def get_entries(page_data
):
553 for entry
in page_data
.get('archives', []):
554 yield self
.url_result(f
'https://www.bilibili.com/video/{entry["bvid"]}',
555 BiliBiliIE
, entry
['bvid'])
557 metadata
, paged_list
= self
._extract
_playlist
(fetch_page
, get_metadata
, get_entries
)
558 return self
.playlist_result(paged_list
, playlist_id
, metadata
['title'])
561 class BilibiliCategoryIE(InfoExtractor
):
562 IE_NAME
= 'Bilibili category extractor'
563 _MAX_RESULTS
= 1000000
564 _VALID_URL
= r
'https?://www\.bilibili\.com/v/[a-zA-Z]+\/[a-zA-Z]+'
566 'url': 'https://www.bilibili.com/v/kichiku/mad',
568 'id': 'kichiku: mad',
569 'title': 'kichiku: mad'
571 'playlist_mincount': 45,
577 def _fetch_page(self
, api_url
, num_pages
, query
, page_num
):
578 parsed_json
= self
._download
_json
(
579 api_url
, query
, query
={'Search_key': query, 'pn': page_num}
,
580 note
='Extracting results from page %s of %s' % (page_num
, num_pages
))
582 video_list
= traverse_obj(parsed_json
, ('data', 'archives'), expected_type
=list)
584 raise ExtractorError('Failed to retrieve video list for page %d' % page_num
)
586 for video
in video_list
:
587 yield self
.url_result(
588 'https://www.bilibili.com/video/%s' % video
['bvid'], 'BiliBili', video
['bvid'])
590 def _entries(self
, category
, subcategory
, query
):
591 # map of categories : subcategories : RIDs
595 'manual_vocaloid': 126,
602 if category
not in rid_map
:
603 raise ExtractorError(
604 f
'The category {category} isn\'t supported. Supported categories: {list(rid_map.keys())}')
605 if subcategory
not in rid_map
[category
]:
606 raise ExtractorError(
607 f
'The subcategory {subcategory} isn\'t supported for this category. Supported subcategories: {list(rid_map[category].keys())}')
608 rid_value
= rid_map
[category
][subcategory
]
610 api_url
= 'https://api.bilibili.com/x/web-interface/newlist?rid=%d&type=1&ps=20&jsonp=jsonp' % rid_value
611 page_json
= self
._download
_json
(api_url
, query
, query
={'Search_key': query, 'pn': '1'}
)
612 page_data
= traverse_obj(page_json
, ('data', 'page'), expected_type
=dict)
613 count
, size
= int_or_none(page_data
.get('count')), int_or_none(page_data
.get('size'))
614 if count
is None or not size
:
615 raise ExtractorError('Failed to calculate either page count or size')
617 num_pages
= math
.ceil(count
/ size
)
619 return OnDemandPagedList(functools
.partial(
620 self
._fetch
_page
, api_url
, num_pages
, query
), size
)
622 def _real_extract(self
, url
):
623 category
, subcategory
= urllib
.parse
.urlparse(url
).path
.split('/')[2:4]
624 query
= '%s: %s' % (category
, subcategory
)
626 return self
.playlist_result(self
._entries
(category
, subcategory
, query
), query
, query
)
629 class BiliBiliSearchIE(SearchInfoExtractor
):
630 IE_DESC
= 'Bilibili video search'
631 _MAX_RESULTS
= 100000
632 _SEARCH_KEY
= 'bilisearch'
634 def _search_results(self
, query
):
635 for page_num
in itertools
.count(1):
636 videos
= self
._download
_json
(
637 'https://api.bilibili.com/x/web-interface/search/type', query
,
638 note
=f
'Extracting results from page {page_num}', query
={
645 '__refresh__': 'true',
646 'search_type': 'video',
649 })['data'].get('result')
653 yield self
.url_result(video
['arcurl'], 'BiliBili', str(video
['aid']))
656 class BilibiliAudioBaseIE(InfoExtractor
):
657 def _call_api(self
, path
, sid
, query
=None):
660 return self
._download
_json
(
661 'https://www.bilibili.com/audio/music-service-c/web/' + path
,
662 sid
, query
=query
)['data']
665 class BilibiliAudioIE(BilibiliAudioBaseIE
):
666 _VALID_URL
= r
'https?://(?:www\.)?bilibili\.com/audio/au(?P<id>\d+)'
668 'url': 'https://www.bilibili.com/audio/au1003142',
669 'md5': 'fec4987014ec94ef9e666d4d158ad03b',
673 'title': '【tsukimi】YELLOW / 神山羊',
675 'comment_count': int,
676 'description': 'YELLOW的mp3版!',
683 'thumbnail': r
're:^https?://.+\.jpg',
684 'timestamp': 1564836614,
685 'upload_date': '20190803',
686 'uploader': 'tsukimi-つきみぐー',
691 def _real_extract(self
, url
):
692 au_id
= self
._match
_id
(url
)
694 play_data
= self
._call
_api
('url', au_id
)
696 'url': play_data
['cdns'][0],
697 'filesize': int_or_none(play_data
.get('size')),
701 for a_format
in formats
:
702 a_format
.setdefault('http_headers', {}).update({
706 song
= self
._call
_api
('song/info', au_id
)
707 title
= song
['title']
708 statistic
= song
.get('statistic') or {}
711 lyric
= song
.get('lyric')
723 'artist': song
.get('author'),
724 'comment_count': int_or_none(statistic
.get('comment')),
725 'description': song
.get('intro'),
726 'duration': int_or_none(song
.get('duration')),
727 'subtitles': subtitles
,
728 'thumbnail': song
.get('cover'),
729 'timestamp': int_or_none(song
.get('passtime')),
730 'uploader': song
.get('uname'),
731 'view_count': int_or_none(statistic
.get('play')),
735 class BilibiliAudioAlbumIE(BilibiliAudioBaseIE
):
736 _VALID_URL
= r
'https?://(?:www\.)?bilibili\.com/audio/am(?P<id>\d+)'
738 'url': 'https://www.bilibili.com/audio/am10624',
741 'title': '每日新曲推荐(每日11:00更新)',
742 'description': '每天11:00更新,为你推送最新音乐',
744 'playlist_count': 19,
747 def _real_extract(self
, url
):
748 am_id
= self
._match
_id
(url
)
750 songs
= self
._call
_api
(
751 'song/of-menu', am_id
, {'sid': am_id, 'pn': 1, 'ps': 100}
)['data']
755 sid
= str_or_none(song
.get('id'))
758 entries
.append(self
.url_result(
759 'https://www.bilibili.com/audio/au' + sid
,
760 BilibiliAudioIE
.ie_key(), sid
))
763 album_data
= self
._call
_api
('menu/info', am_id
) or {}
764 album_title
= album_data
.get('title')
766 for entry
in entries
:
767 entry
['album'] = album_title
768 return self
.playlist_result(
769 entries
, am_id
, album_title
, album_data
.get('intro'))
771 return self
.playlist_result(entries
, am_id
)
774 class BiliBiliPlayerIE(InfoExtractor
):
775 _VALID_URL
= r
'https?://player\.bilibili\.com/player\.html\?.*?\baid=(?P<id>\d+)'
777 'url': 'http://player.bilibili.com/player.html?aid=92494333&cid=157926707&page=1',
778 'only_matching': True,
781 def _real_extract(self
, url
):
782 video_id
= self
._match
_id
(url
)
783 return self
.url_result(
784 'http://www.bilibili.tv/video/av%s/' % video_id
,
785 ie
=BiliBiliIE
.ie_key(), video_id
=video_id
)
788 class BiliIntlBaseIE(InfoExtractor
):
789 _API_URL
= 'https://api.bilibili.tv/intl/gateway'
790 _NETRC_MACHINE
= 'biliintl'
792 def _call_api(self
, endpoint
, *args
, **kwargs
):
793 json
= self
._download
_json
(self
._API
_URL
+ endpoint
, *args
, **kwargs
)
795 if json
['code'] in (10004004, 10004005, 10023006):
796 self
.raise_login_required()
797 elif json
['code'] == 10004001:
798 self
.raise_geo_restricted()
800 if json
.get('message') and str(json
['code']) != json
['message']:
801 errmsg
= f
'{kwargs.get("errnote", "Unable to download JSON metadata")}: {self.IE_NAME} said: {json["message"]}'
803 errmsg
= kwargs
.get('errnote', 'Unable to download JSON metadata')
804 if kwargs
.get('fatal'):
805 raise ExtractorError(errmsg
)
807 self
.report_warning(errmsg
)
808 return json
.get('data')
810 def json2srt(self
, json
):
812 f
'{i + 1}\n{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n{line["content"]}'
813 for i
, line
in enumerate(traverse_obj(json
, (
814 'body', lambda _
, l
: l
['content'] and l
['from'] and l
['to']))))
817 def _get_subtitles(self
, *, ep_id
=None, aid
=None):
818 sub_json
= self
._call
_api
(
819 '/web/v2/subtitle', ep_id
or aid
, fatal
=False,
820 note
='Downloading subtitles list', errnote
='Unable to download subtitles list',
828 for sub
in sub_json
.get('subtitles') or []:
829 sub_url
= sub
.get('url')
832 sub_data
= self
._download
_json
(
833 sub_url
, ep_id
or aid
, errnote
='Unable to download subtitles', fatal
=False,
834 note
='Downloading subtitles%s' % f
' for {sub["lang"]}' if sub
.get('lang') else '')
837 subtitles
.setdefault(sub
.get('lang_key', 'en'), []).append({
839 'data': self
.json2srt(sub_data
)
843 def _get_formats(self
, *, ep_id
=None, aid
=None):
844 video_json
= self
._call
_api
(
845 '/web/playurl', ep_id
or aid
, note
='Downloading video formats',
846 errnote
='Unable to download video formats', query
=filter_dict({
851 video_json
= video_json
['playurl']
853 for vid
in video_json
.get('video') or []:
854 video_res
= vid
.get('video_resource') or {}
855 video_info
= vid
.get('stream_info') or {}
856 if not video_res
.get('url'):
859 'url': video_res
['url'],
861 'format_note': video_info
.get('desc_words'),
862 'width': video_res
.get('width'),
863 'height': video_res
.get('height'),
864 'vbr': video_res
.get('bandwidth'),
866 'vcodec': video_res
.get('codecs'),
867 'filesize': video_res
.get('size'),
869 for aud
in video_json
.get('audio_resource') or []:
870 if not aud
.get('url'):
875 'abr': aud
.get('bandwidth'),
876 'acodec': aud
.get('codecs'),
878 'filesize': aud
.get('size'),
883 def _extract_video_info(self
, video_data
, *, ep_id
=None, aid
=None):
886 'title': video_data
.get('title_display') or video_data
.get('title'),
887 'thumbnail': video_data
.get('cover'),
888 'episode_number': int_or_none(self
._search
_regex
(
889 r
'^E(\d+)(?:$| - )', video_data
.get('title_display') or '', 'episode number', default
=None)),
890 'formats': self
._get
_formats
(ep_id
=ep_id
, aid
=aid
),
891 'subtitles': self
._get
_subtitles
(ep_id
=ep_id
, aid
=aid
),
892 'extractor_key': BiliIntlIE
.ie_key(),
895 def _perform_login(self
, username
, password
):
897 from Cryptodome
.PublicKey
import RSA
898 from Cryptodome
.Cipher
import PKCS1_v1_5
901 from Crypto
.PublicKey
import RSA
902 from Crypto
.Cipher
import PKCS1_v1_5
904 raise ExtractorError('pycryptodomex not found. Please install', expected
=True)
906 key_data
= self
._download
_json
(
907 'https://passport.bilibili.tv/x/intl/passport-login/web/key?lang=en-US', None,
908 note
='Downloading login key', errnote
='Unable to download login key')['data']
910 public_key
= RSA
.importKey(key_data
['key'])
911 password_hash
= PKCS1_v1_5
.new(public_key
).encrypt((key_data
['hash'] + password
).encode('utf-8'))
912 login_post
= self
._download
_json
(
913 'https://passport.bilibili.tv/x/intl/passport-login/web/login/password?lang=en-US', None, data
=urlencode_postdata({
914 'username': username
,
915 'password': base64
.b64encode(password_hash
).decode('ascii'),
919 }), note
='Logging in', errnote
='Unable to log in')
920 if login_post
.get('code'):
921 if login_post
.get('message'):
922 raise ExtractorError(f
'Unable to log in: {self.IE_NAME} said: {login_post["message"]}', expected
=True)
924 raise ExtractorError('Unable to log in')
927 class BiliIntlIE(BiliIntlBaseIE
):
928 _VALID_URL
= r
'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?(play/(?P<season_id>\d+)/(?P<ep_id>\d+)|video/(?P<aid>\d+))'
931 'url': 'https://www.bilibili.tv/en/play/34613/341736',
935 'title': 'E2 - The First Night',
936 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
941 'url': 'https://www.bilibili.tv/en/play/1033760/11005006',
945 'title': 'E3 - Who?',
946 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
950 # Subtitle with empty content
951 'url': 'https://www.bilibili.tv/en/play/1005144/10131790',
955 'title': 'E140 - Two Heartbeats: Kabuto\'s Trap',
956 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
957 'episode_number': 140,
959 'skip': 'According to the copyright owner\'s request, you may only watch the video after you log in.'
961 'url': 'https://www.biliintl.com/en/play/34613/341736',
962 'only_matching': True,
964 # User-generated content (as opposed to a series licensed from a studio)
965 'url': 'https://bilibili.tv/en/video/2019955076',
966 'only_matching': True,
969 'url': 'https://www.bilibili.tv/video/2019955076',
970 'only_matching': True,
972 # Uppercase language in URL
973 'url': 'https://www.bilibili.tv/EN/video/2019955076',
974 'only_matching': True,
977 def _real_extract(self
, url
):
978 season_id
, ep_id
, aid
= self
._match
_valid
_url
(url
).group('season_id', 'ep_id', 'aid')
979 video_id
= ep_id
or aid
980 webpage
= self
._download
_webpage
(url
, video_id
)
983 self
._search
_json
(r
'window\.__INITIAL_(?:DATA|STATE)__\s*=', webpage
, 'preload state', video_id
, default
={})
984 or self
._search
_nuxt
_data
(webpage
, video_id
, '__initialState', fatal
=False, traverse
=None))
985 video_data
= traverse_obj(
986 initial_data
, ('OgvVideo', 'epDetail'), ('UgcVideo', 'videoData'), ('ugc', 'archive'), expected_type
=dict)
988 if season_id
and not video_data
:
989 # Non-Bstation layout, read through episode list
990 season_json
= self
._call
_api
(f
'/web/v2/ogv/play/episodes?season_id={season_id}&platform=web', video_id
)
991 video_data
= traverse_obj(season_json
,
992 ('sections', ..., 'episodes', lambda _
, v
: str(v
['episode_id']) == ep_id
),
993 expected_type
=dict, get_all
=False)
994 return self
._extract
_video
_info
(video_data
or {}, ep_id
=ep_id
, aid
=aid
)
997 class BiliIntlSeriesIE(BiliIntlBaseIE
):
998 _VALID_URL
= r
'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?play/(?P<id>\d+)/?(?:[?#]|$)'
1000 'url': 'https://www.bilibili.tv/en/play/34613',
1001 'playlist_mincount': 15,
1004 'title': 'Fly Me to the Moon',
1005 'description': 'md5:a861ee1c4dc0acfad85f557cc42ac627',
1006 'categories': ['Romance', 'Comedy', 'Slice of life'],
1007 'thumbnail': r
're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
1011 'skip_download': True,
1014 'url': 'https://www.biliintl.com/en/play/34613',
1015 'only_matching': True,
1017 'url': 'https://www.biliintl.com/EN/play/34613',
1018 'only_matching': True,
1021 def _entries(self
, series_id
):
1022 series_json
= self
._call
_api
(f
'/web/v2/ogv/play/episodes?season_id={series_id}&platform=web', series_id
)
1023 for episode
in traverse_obj(series_json
, ('sections', ..., 'episodes', ...), expected_type
=dict, default
=[]):
1024 episode_id
= str(episode
.get('episode_id'))
1025 yield self
._extract
_video
_info
(episode
, ep_id
=episode_id
)
1027 def _real_extract(self
, url
):
1028 series_id
= self
._match
_id
(url
)
1029 series_info
= self
._call
_api
(f
'/web/v2/ogv/play/season_info?season_id={series_id}&platform=web', series_id
).get('season') or {}
1030 return self
.playlist_result(
1031 self
._entries
(series_id
), series_id
, series_info
.get('title'), series_info
.get('description'),
1032 categories
=traverse_obj(series_info
, ('styles', ..., 'title'), expected_type
=str_or_none
),
1033 thumbnail
=url_or_none(series_info
.get('horizontal_cover')), view_count
=parse_count(series_info
.get('view')))
1036 class BiliLiveIE(InfoExtractor
):
1037 _VALID_URL
= r
'https?://live.bilibili.com/(?P<id>\d+)'
1040 'url': 'https://live.bilibili.com/196',
1043 'description': "周六杂谈回,其他时候随机游戏。 | \n录播:@下播型泛式录播组。 | \n直播通知群(全员禁言):666906670,902092584,59971⑧481 (功能一样,别多加)",
1045 'title': "太空狼人杀联动,不被爆杀就算赢",
1046 'thumbnail': "https://i0.hdslb.com/bfs/live/new_room_cover/e607bc1529057ef4b332e1026e62cf46984c314d.jpg",
1047 'timestamp': 1650802769,
1051 'url': 'https://live.bilibili.com/196?broadcast_type=0&is_room_feed=1?spm_id_from=333.999.space_home.strengthen_live_card.click',
1052 'only_matching': True
1056 80: {'format_id': 'low', 'format_note': '流畅'}
,
1057 150: {'format_id': 'high_res', 'format_note': '高清'}
,
1058 250: {'format_id': 'ultra_high_res', 'format_note': '超清'}
,
1059 400: {'format_id': 'blue_ray', 'format_note': '蓝光'}
,
1060 10000: {'format_id': 'source', 'format_note': '原画'}
,
1061 20000: {'format_id': '4K', 'format_note': '4K'}
,
1062 30000: {'format_id': 'dolby', 'format_note': '杜比'}
,
1065 _quality
= staticmethod(qualities(list(_FORMATS
)))
1067 def _call_api(self
, path
, room_id
, query
):
1068 api_result
= self
._download
_json
(f
'https://api.live.bilibili.com/{path}', room_id
, query
=query
)
1069 if api_result
.get('code') != 0:
1070 raise ExtractorError(api_result
.get('message') or 'Unable to download JSON metadata')
1071 return api_result
.get('data') or {}
1073 def _parse_formats(self
, qn
, fmt
):
1074 for codec
in fmt
.get('codec') or []:
1075 if codec
.get('current_qn') != qn
:
1077 for url_info
in codec
['url_info']:
1079 'url': f
'{url_info["host"]}{codec["base_url"]}{url_info["extra"]}',
1080 'ext': fmt
.get('format_name'),
1081 'vcodec': codec
.get('codec_name'),
1082 'quality': self
._quality
(qn
),
1083 **self
._FORMATS
[qn
],
1086 def _real_extract(self
, url
):
1087 room_id
= self
._match
_id
(url
)
1088 room_data
= self
._call
_api
('room/v1/Room/get_info', room_id
, {'id': room_id}
)
1089 if room_data
.get('live_status') == 0:
1090 raise ExtractorError('Streamer is not live', expected
=True)
1093 for qn
in self
._FORMATS
.keys():
1094 stream_data
= self
._call
_api
('xlive/web-room/v2/index/getRoomPlayInfo', room_id
, {
1104 for fmt
in traverse_obj(stream_data
, ('playurl_info', 'playurl', 'stream', ..., 'format', ...)) or []:
1105 formats
.extend(self
._parse
_formats
(qn
, fmt
))
1109 'title': room_data
.get('title'),
1110 'description': room_data
.get('description'),
1111 'thumbnail': room_data
.get('user_cover'),
1112 'timestamp': stream_data
.get('live_time'),