+ webpage = self._download_webpage(url, ss_id)
+ metainfo = traverse_obj(
+ self._search_json(r'<script[^>]+type="application/ld\+json"[^>]*>', webpage, 'info', ss_id),
+ ('itemListElement', ..., {
+ 'title': ('name', {str}),
+ 'description': ('description', {str}),
+ }), get_all=False)
+
+ return self.playlist_result(self._get_episodes_from_season(ss_id, url), ss_id, **metainfo)
+
+
+class BilibiliCheeseBaseIE(BilibiliBaseIE):
+ _HEADERS = {'Referer': 'https://www.bilibili.com/'}
+
+ def _extract_episode(self, season_info, ep_id):
+ episode_info = traverse_obj(season_info, (
+ 'episodes', lambda _, v: v['id'] == int(ep_id)), get_all=False)
+ aid, cid = episode_info['aid'], episode_info['cid']
+
+ if traverse_obj(episode_info, 'ep_status') == -1:
+ raise ExtractorError('This course episode is not yet available.', expected=True)
+ if not traverse_obj(episode_info, 'playable'):
+ self.raise_login_required('You need to purchase the course to download this episode')
+
+ play_info = self._download_json(
+ 'https://api.bilibili.com/pugv/player/web/playurl', ep_id,
+ query={'avid': aid, 'cid': cid, 'ep_id': ep_id, 'fnval': 16, 'fourk': 1},
+ headers=self._HEADERS, note='Downloading playinfo')['data']
+
+ return {
+ 'id': str_or_none(ep_id),
+ 'episode_id': str_or_none(ep_id),
+ 'formats': self.extract_formats(play_info),
+ 'extractor_key': BilibiliCheeseIE.ie_key(),
+ 'extractor': BilibiliCheeseIE.IE_NAME,
+ 'webpage_url': f'https://www.bilibili.com/cheese/play/ep{ep_id}',
+ **traverse_obj(episode_info, {
+ 'episode': ('title', {str}),
+ 'title': {lambda v: v and join_nonempty('index', 'title', delim=' - ', from_dict=v)},
+ 'alt_title': ('subtitle', {str}),
+ 'duration': ('duration', {int_or_none}),
+ 'episode_number': ('index', {int_or_none}),
+ 'thumbnail': ('cover', {url_or_none}),
+ 'timestamp': ('release_date', {int_or_none}),
+ 'view_count': ('play', {int_or_none}),
+ }),
+ **traverse_obj(season_info, {
+ 'uploader': ('up_info', 'uname', {str}),
+ 'uploader_id': ('up_info', 'mid', {str_or_none}),
+ }),
+ 'subtitles': self.extract_subtitles(ep_id, cid, aid=aid),
+ '__post_extractor': self.extract_comments(aid),
+ 'http_headers': self._HEADERS,
+ }