]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/extractor/crunchyroll.py
[extractor, cleanup] Refactor `_download_...` methods
[yt-dlp.git] / yt_dlp / extractor / crunchyroll.py
index 98aa1dd9a4f0a4029c6ab07fc79e7ff2712cb218..1d186bd3886eed576ab9c1007d9baf997a4c65cb 100644 (file)
@@ -1,17 +1,15 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
+import base64
 import re
 import json
 import zlib
 
 import re
 import json
 import zlib
 
+import xml.etree.ElementTree
 from hashlib import sha1
 from math import pow, sqrt, floor
 from .common import InfoExtractor
 from hashlib import sha1
 from math import pow, sqrt, floor
 from .common import InfoExtractor
-from .vrv import VRVIE
+from .vrv import VRVBaseIE
 from ..compat import (
     compat_b64decode,
 from ..compat import (
     compat_b64decode,
-    compat_etree_Element,
     compat_etree_fromstring,
     compat_str,
     compat_urllib_parse_urlencode,
     compat_etree_fromstring,
     compat_str,
     compat_urllib_parse_urlencode,
     bytes_to_intlist,
     extract_attributes,
     float_or_none,
     bytes_to_intlist,
     extract_attributes,
     float_or_none,
+    format_field,
     intlist_to_bytes,
     int_or_none,
     intlist_to_bytes,
     int_or_none,
+    join_nonempty,
     lowercase_escape,
     merge_dicts,
     lowercase_escape,
     merge_dicts,
+    qualities,
     remove_end,
     sanitized_Request,
     remove_end,
     sanitized_Request,
-    urlencode_postdata,
+    traverse_obj,
+    try_get,
     xpath_text,
 )
 from ..aes import (
     xpath_text,
 )
 from ..aes import (
@@ -38,8 +40,8 @@
 
 
 class CrunchyrollBaseIE(InfoExtractor):
 
 
 class CrunchyrollBaseIE(InfoExtractor):
-    _LOGIN_URL = 'https://www.crunchyroll.com/login'
-    _LOGIN_FORM = 'login_form'
+    _LOGIN_URL = 'https://www.crunchyroll.com/welcome/login'
+    _API_BASE = 'https://api.crunchyroll.com'
     _NETRC_MACHINE = 'crunchyroll'
 
     def _call_rpc_api(self, method, video_id, note=None, data=None):
     _NETRC_MACHINE = 'crunchyroll'
 
     def _call_rpc_api(self, method, video_id, note=None, data=None):
@@ -52,57 +54,50 @@ def _call_rpc_api(self, method, video_id, note=None, data=None):
                 'Content-Type': 'application/x-www-form-urlencoded',
             })
 
                 'Content-Type': 'application/x-www-form-urlencoded',
             })
 
-    def _login(self):
-        username, password = self._get_login_info()
-        if username is None:
-            return
-
-        login_page = self._download_webpage(
-            self._LOGIN_URL, None, 'Downloading login page')
-
-        def is_logged(webpage):
-            return 'href="/logout"' in webpage
-
-        # Already logged in
-        if is_logged(login_page):
-            return
-
-        login_form_str = self._search_regex(
-            r'(?P<form><form[^>]+?id=(["\'])%s\2[^>]*>)' % self._LOGIN_FORM,
-            login_page, 'login form', group='form')
-
-        post_url = extract_attributes(login_form_str).get('action')
-        if not post_url:
-            post_url = self._LOGIN_URL
-        elif not post_url.startswith('http'):
-            post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
-
-        login_form = self._form_hidden_inputs(self._LOGIN_FORM, login_page)
-
-        login_form.update({
-            'login_form[name]': username,
-            'login_form[password]': password,
-        })
-
-        response = self._download_webpage(
-            post_url, None, 'Logging in', 'Wrong login info',
-            data=urlencode_postdata(login_form),
-            headers={'Content-Type': 'application/x-www-form-urlencoded'})
-
-        # Successful login
-        if is_logged(response):
+    def _perform_login(self, username, password):
+        if self._get_cookies(self._LOGIN_URL).get('etp_rt'):
             return
 
             return
 
-        error = self._html_search_regex(
-            '(?s)<ul[^>]+class=["\']messages["\'][^>]*>(.+?)</ul>',
-            response, 'error message', default=None)
-        if error:
-            raise ExtractorError('Unable to login: %s' % error, expected=True)
-
-        raise ExtractorError('Unable to log in')
-
-    def _real_initialize(self):
-        self._login()
+        upsell_response = self._download_json(
+            f'{self._API_BASE}/get_upsell_data.0.json', None, 'Getting session id',
+            query={
+                'sess_id': 1,
+                'device_id': 'whatvalueshouldbeforweb',
+                'device_type': 'com.crunchyroll.static',
+                'access_token': 'giKq5eY27ny3cqz',
+                'referer': self._LOGIN_URL
+            })
+        if upsell_response['code'] != 'ok':
+            raise ExtractorError('Could not get session id')
+        session_id = upsell_response['data']['session_id']
+
+        login_response = self._download_json(
+            f'{self._API_BASE}/login.1.json', None, 'Logging in',
+            data=compat_urllib_parse_urlencode({
+                'account': username,
+                'password': password,
+                'session_id': session_id
+            }).encode('ascii'))
+        if login_response['code'] != 'ok':
+            raise ExtractorError('Login failed. Server message: %s' % login_response['message'], expected=True)
+        if not self._get_cookies(self._LOGIN_URL).get('etp_rt'):
+            raise ExtractorError('Login succeeded but did not set etp_rt cookie')
+
+    # Beta-specific, but needed for redirects
+    def _get_beta_embedded_json(self, webpage, display_id):
+        initial_state = self._parse_json(self._search_regex(
+            r'__INITIAL_STATE__\s*=\s*({.+?})\s*;', webpage, 'initial state'), display_id)
+        app_config = self._parse_json(self._search_regex(
+            r'__APP_CONFIG__\s*=\s*({.+?})\s*;', webpage, 'app config'), display_id)
+        return initial_state, app_config
+
+    def _redirect_to_beta(self, webpage, iekey, video_id):
+        if not self._get_cookies(self._LOGIN_URL).get('etp_rt'):
+            raise ExtractorError('Received a beta page from non-beta url when not logged in.')
+        initial_state, app_config = self._get_beta_embedded_json(webpage, video_id)
+        url = app_config['baseSiteUrl'] + initial_state['router']['locations']['current']['pathname']
+        self.to_screen(f'{video_id}: Redirected to beta site - {url}')
+        return self.url_result(f'{url}', iekey, video_id)
 
     @staticmethod
     def _add_skip_wall(url):
 
     @staticmethod
     def _add_skip_wall(url):
@@ -118,7 +113,7 @@ def _add_skip_wall(url):
             parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
 
 
             parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
 
 
-class CrunchyrollIE(CrunchyrollBaseIE, VRVIE):
+class CrunchyrollIE(CrunchyrollBaseIE, VRVBaseIE):
     IE_NAME = 'crunchyroll'
     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|(?:[^/]*/){1,2}[^/?&]*?)(?P<id>[0-9]+))(?:[/?&]|$)'
     _TESTS = [{
     IE_NAME = 'crunchyroll'
     _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:media(?:-|/\?id=)|(?:[^/]*/){1,2}[^/?&]*?)(?P<id>[0-9]+))(?:[/?&]|$)'
     _TESTS = [{
@@ -397,7 +392,7 @@ def _get_subtitles(self, video_id, webpage):
                 'Downloading subtitles for ' + sub_name, data={
                     'subtitle_script_id': sub_id,
                 })
                 'Downloading subtitles for ' + sub_name, data={
                     'subtitle_script_id': sub_id,
                 })
-            if not isinstance(sub_doc, compat_etree_Element):
+            if not isinstance(sub_doc, xml.etree.ElementTree.Element):
                 continue
             sid = sub_doc.get('id')
             iv = xpath_text(sub_doc, 'iv', 'subtitle iv')
                 continue
             sid = sub_doc.get('id')
             iv = xpath_text(sub_doc, 'iv', 'subtitle iv')
@@ -412,7 +407,7 @@ def _get_subtitles(self, video_id, webpage):
         return subtitles
 
     def _real_extract(self, url):
         return subtitles
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
+        mobj = self._match_valid_url(url)
         video_id = mobj.group('id')
 
         if mobj.group('prefix') == 'm':
         video_id = mobj.group('id')
 
         if mobj.group('prefix') == 'm':
@@ -424,6 +419,8 @@ def _real_extract(self, url):
         webpage = self._download_webpage(
             self._add_skip_wall(webpage_url), video_id,
             headers=self.geo_verification_headers())
         webpage = self._download_webpage(
             self._add_skip_wall(webpage_url), video_id,
             headers=self.geo_verification_headers())
+        if re.search(r'<div id="preload-data">', webpage):
+            return self._redirect_to_beta(webpage, CrunchyrollBetaIE.ie_key(), video_id)
         note_m = self._html_search_regex(
             r'<div class="showmedia-trailer-notice">(.+?)</div>',
             webpage, 'trailer-notice', default='')
         note_m = self._html_search_regex(
             r'<div class="showmedia-trailer-notice">(.+?)</div>',
             webpage, 'trailer-notice', default='')
@@ -458,6 +455,18 @@ def _real_extract(self, url):
         video_description = (self._parse_json(self._html_search_regex(
             r'<script[^>]*>\s*.+?\[media_id=%s\].+?({.+?"description"\s*:.+?})\);' % video_id,
             webpage, 'description', default='{}'), video_id) or media_metadata).get('description')
         video_description = (self._parse_json(self._html_search_regex(
             r'<script[^>]*>\s*.+?\[media_id=%s\].+?({.+?"description"\s*:.+?})\);' % video_id,
             webpage, 'description', default='{}'), video_id) or media_metadata).get('description')
+
+        thumbnails = []
+        thumbnail_url = (self._parse_json(self._html_search_regex(
+            r'<script type="application\/ld\+json">\n\s*(.+?)<\/script>',
+            webpage, 'thumbnail_url', default='{}'), video_id)).get('image')
+        if thumbnail_url:
+            thumbnails.append({
+                'url': thumbnail_url,
+                'width': 1920,
+                'height': 1080
+            })
+
         if video_description:
             video_description = lowercase_escape(video_description.replace(r'\r\n', '\n'))
         video_uploader = self._html_search_regex(
         if video_description:
             video_description = lowercase_escape(video_description.replace(r'\r\n', '\n'))
         video_uploader = self._html_search_regex(
@@ -465,19 +474,24 @@ def _real_extract(self, url):
             [r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', r'<div>\s*Publisher:\s*<span>\s*(.+?)\s*</span>\s*</div>'],
             webpage, 'video_uploader', default=False)
 
             [r'<a[^>]+href="/publisher/[^"]+"[^>]*>([^<]+)</a>', r'<div>\s*Publisher:\s*<span>\s*(.+?)\s*</span>\s*</div>'],
             webpage, 'video_uploader', default=False)
 
+        requested_languages = self._configuration_arg('language')
+        requested_hardsubs = [('' if val == 'none' else val) for val in self._configuration_arg('hardsub')]
+        language_preference = qualities((requested_languages or [language or ''])[::-1])
+        hardsub_preference = qualities((requested_hardsubs or ['', language or ''])[::-1])
+
         formats = []
         for stream in media.get('streams', []):
         formats = []
         for stream in media.get('streams', []):
-            audio_lang = stream.get('audio_lang')
-            hardsub_lang = stream.get('hardsub_lang')
+            audio_lang = stream.get('audio_lang') or ''
+            hardsub_lang = stream.get('hardsub_lang') or ''
+            if (requested_languages and audio_lang.lower() not in requested_languages
+                    or requested_hardsubs and hardsub_lang.lower() not in requested_hardsubs):
+                continue
             vrv_formats = self._extract_vrv_formats(
                 stream.get('url'), video_id, stream.get('format'),
                 audio_lang, hardsub_lang)
             for f in vrv_formats:
             vrv_formats = self._extract_vrv_formats(
                 stream.get('url'), video_id, stream.get('format'),
                 audio_lang, hardsub_lang)
             for f in vrv_formats:
-                f['language_preference'] = 1 if audio_lang == language else 0
-                f['quality'] = (
-                    1 if not hardsub_lang
-                    else 0 if hardsub_lang == language
-                    else -1)
+                f['language_preference'] = language_preference(audio_lang)
+                f['quality'] = hardsub_preference(hardsub_lang)
             formats.extend(vrv_formats)
         if not formats:
             available_fmts = []
             formats.extend(vrv_formats)
         if not formats:
             available_fmts = []
@@ -508,7 +522,7 @@ def _real_extract(self, url):
                         'video_quality': stream_quality,
                         'current_page': url,
                     })
                         'video_quality': stream_quality,
                         'current_page': url,
                     })
-                if isinstance(streamdata, compat_etree_Element):
+                if isinstance(streamdata, xml.etree.ElementTree.Element):
                     stream_info = streamdata.find('./{default}preload/stream_info')
                     if stream_info is not None:
                         stream_infos.append(stream_info)
                     stream_info = streamdata.find('./{default}preload/stream_info')
                     if stream_info is not None:
                         stream_infos.append(stream_info)
@@ -519,7 +533,7 @@ def _real_extract(self, url):
                         'video_format': stream_format,
                         'video_encode_quality': stream_quality,
                     })
                         'video_format': stream_format,
                         'video_encode_quality': stream_quality,
                     })
-                if isinstance(stream_info, compat_etree_Element):
+                if isinstance(stream_info, xml.etree.ElementTree.Element):
                     stream_infos.append(stream_info)
                 for stream_info in stream_infos:
                     video_encode_id = xpath_text(stream_info, './video_encode_id')
                     stream_infos.append(stream_info)
                 for stream_info in stream_infos:
                     video_encode_id = xpath_text(stream_info, './video_encode_id')
@@ -592,21 +606,25 @@ def _real_extract(self, url):
             r'(?s)<h\d[^>]+\bid=["\']showmedia_about_episode_num[^>]+>(.+?)</h\d',
             webpage, 'series', fatal=False)
 
             r'(?s)<h\d[^>]+\bid=["\']showmedia_about_episode_num[^>]+>(.+?)</h\d',
             webpage, 'series', fatal=False)
 
-        season = episode = episode_number = duration = thumbnail = None
+        season = episode = episode_number = duration = None
 
 
-        if isinstance(metadata, compat_etree_Element):
+        if isinstance(metadata, xml.etree.ElementTree.Element):
             season = xpath_text(metadata, 'series_title')
             episode = xpath_text(metadata, 'episode_title')
             episode_number = int_or_none(xpath_text(metadata, 'episode_number'))
             duration = float_or_none(media_metadata.get('duration'), 1000)
             season = xpath_text(metadata, 'series_title')
             episode = xpath_text(metadata, 'episode_title')
             episode_number = int_or_none(xpath_text(metadata, 'episode_number'))
             duration = float_or_none(media_metadata.get('duration'), 1000)
-            thumbnail = xpath_text(metadata, 'episode_image_url')
 
         if not episode:
             episode = media_metadata.get('title')
         if not episode_number:
             episode_number = int_or_none(media_metadata.get('episode_number'))
 
         if not episode:
             episode = media_metadata.get('title')
         if not episode_number:
             episode_number = int_or_none(media_metadata.get('episode_number'))
-        if not thumbnail:
-            thumbnail = media_metadata.get('thumbnail', {}).get('url')
+        thumbnail_url = try_get(media, lambda x: x['thumbnail']['url'])
+        if thumbnail_url:
+            thumbnails.append({
+                'url': thumbnail_url,
+                'width': 640,
+                'height': 360
+            })
 
         season_number = int_or_none(self._search_regex(
             r'(?s)<h\d[^>]+id=["\']showmedia_about_episode_num[^>]+>.+?</h\d>\s*<h4>\s*Season (\d+)',
 
         season_number = int_or_none(self._search_regex(
             r'(?s)<h\d[^>]+id=["\']showmedia_about_episode_num[^>]+>.+?</h\d>\s*<h4>\s*Season (\d+)',
@@ -619,7 +637,7 @@ def _real_extract(self, url):
             'title': video_title,
             'description': video_description,
             'duration': duration,
             'title': video_title,
             'description': video_description,
             'duration': duration,
-            'thumbnail': thumbnail,
+            'thumbnails': thumbnails,
             'uploader': video_uploader,
             'series': series,
             'season': season,
             'uploader': video_uploader,
             'series': series,
             'season': season,
@@ -633,7 +651,7 @@ def _real_extract(self, url):
 
 class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
     IE_NAME = 'crunchyroll:playlist'
 
 class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
     IE_NAME = 'crunchyroll:playlist'
-    _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login|media-\d+))(?P<id>[\w\-]+))/?(?:\?|$)'
+    _VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:\w{1,2}/)?(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login|media-\d+))(?P<id>[\w\-]+))/?(?:\?|$)'
 
     _TESTS = [{
         'url': 'https://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
 
     _TESTS = [{
         'url': 'https://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
@@ -655,6 +673,9 @@ class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
         # geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14
         'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1',
         'only_matching': True,
         # geo-restricted (US), 18+ maturity wall, non-premium will be available since 2015.11.14
         'url': 'http://www.crunchyroll.com/ladies-versus-butlers?skip_wall=1',
         'only_matching': True,
+    }, {
+        'url': 'http://www.crunchyroll.com/fr/ladies-versus-butlers',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
     }]
 
     def _real_extract(self, url):
@@ -664,20 +685,272 @@ def _real_extract(self, url):
             # https:// gives a 403, but http:// does not
             self._add_skip_wall(url).replace('https://', 'http://'), show_id,
             headers=self.geo_verification_headers())
             # https:// gives a 403, but http:// does not
             self._add_skip_wall(url).replace('https://', 'http://'), show_id,
             headers=self.geo_verification_headers())
+        if re.search(r'<div id="preload-data">', webpage):
+            return self._redirect_to_beta(webpage, CrunchyrollBetaShowIE.ie_key(), show_id)
         title = self._html_search_meta('name', webpage, default=None)
 
         title = self._html_search_meta('name', webpage, default=None)
 
-        episode_paths = re.findall(
-            r'(?s)<li id="showview_videos_media_(\d+)"[^>]+>.*?<a href="([^"]+)"',
-            webpage)
-        entries = [
-            self.url_result('http://www.crunchyroll.com' + ep, 'Crunchyroll', ep_id)
-            for ep_id, ep in episode_paths
-        ]
-        entries.reverse()
+        episode_re = r'<li id="showview_videos_media_(\d+)"[^>]+>.*?<a href="([^"]+)"'
+        season_re = r'<a [^>]+season-dropdown[^>]+>([^<]+)'
+        paths = re.findall(f'(?s){episode_re}|{season_re}', webpage)
+
+        entries, current_season = [], None
+        for ep_id, ep, season in paths:
+            if season:
+                current_season = season
+                continue
+            entries.append(self.url_result(
+                f'http://www.crunchyroll.com{ep}', CrunchyrollIE.ie_key(), ep_id, season=current_season))
 
         return {
             '_type': 'playlist',
             'id': show_id,
             'title': title,
 
         return {
             '_type': 'playlist',
             'id': show_id,
             'title': title,
-            'entries': entries,
+            'entries': reversed(entries),
+        }
+
+
+class CrunchyrollBetaBaseIE(CrunchyrollBaseIE):
+    params = None
+
+    def _get_params(self, lang):
+        if not CrunchyrollBetaBaseIE.params:
+            initial_state, app_config = self._get_beta_embedded_json(self._download_webpage(
+                f'https://beta.crunchyroll.com/{lang}', None, note='Retrieving main page'), None)
+            api_domain = app_config['cxApiParams']['apiDomain']
+            basic_token = str(base64.b64encode(('%s:' % app_config['cxApiParams']['accountAuthClientId']).encode('ascii')), 'ascii')
+            auth_response = self._download_json(
+                f'{api_domain}/auth/v1/token', None, note='Authenticating with cookie',
+                headers={
+                    'Authorization': 'Basic ' + basic_token
+                }, data='grant_type=etp_rt_cookie'.encode('ascii'))
+            policy_response = self._download_json(
+                f'{api_domain}/index/v2', None, note='Retrieving signed policy',
+                headers={
+                    'Authorization': auth_response['token_type'] + ' ' + auth_response['access_token']
+                })
+            cms = traverse_obj(policy_response, 'cms_beta', 'cms')
+            bucket = cms['bucket']
+            params = {
+                'Policy': cms['policy'],
+                'Signature': cms['signature'],
+                'Key-Pair-Id': cms['key_pair_id']
+            }
+            locale = traverse_obj(initial_state, ('localization', 'locale'))
+            if locale:
+                params['locale'] = locale
+            CrunchyrollBetaBaseIE.params = (api_domain, bucket, params)
+        return CrunchyrollBetaBaseIE.params
+
+    def _redirect_from_beta(self, url, lang, internal_id, display_id, is_episode, iekey):
+        initial_state, app_config = self._get_beta_embedded_json(self._download_webpage(url, display_id), display_id)
+        content_data = initial_state['content']['byId'][internal_id]
+        if is_episode:
+            video_id = content_data['external_id'].split('.')[1]
+            series_id = content_data['episode_metadata']['series_slug_title']
+        else:
+            series_id = content_data['slug_title']
+        series_id = re.sub(r'-{2,}', '-', series_id)
+        url = f'https://www.crunchyroll.com/{lang}{series_id}'
+        if is_episode:
+            url = url + f'/{display_id}-{video_id}'
+        self.to_screen(f'{display_id}: Not logged in. Redirecting to non-beta site - {url}')
+        return self.url_result(url, iekey, display_id)
+
+
+class CrunchyrollBetaIE(CrunchyrollBetaBaseIE):
+    IE_NAME = 'crunchyroll:beta'
+    _VALID_URL = r'https?://beta\.crunchyroll\.com/(?P<lang>(?:\w{1,2}/)?)watch/(?P<id>\w+)/(?P<display_id>[\w\-]*)/?(?:\?|$)'
+    _TESTS = [{
+        'url': 'https://beta.crunchyroll.com/watch/GY2P1Q98Y/to-the-future',
+        'info_dict': {
+            'id': '696363',
+            'ext': 'mp4',
+            'timestamp': 1459610100,
+            'description': 'md5:a022fbec4fbb023d43631032c91ed64b',
+            'uploader': 'Toei Animation',
+            'title': 'World Trigger Episode 73 – To the Future',
+            'upload_date': '20160402',
+            'episode_number': 73,
+            'series': 'World Trigger',
+            'average_rating': 4.9,
+            'episode': 'To the Future',
+            'season': 'World Trigger',
+            'thumbnail': 'https://img1.ak.crunchyroll.com/i/spire3-tmb/c870dedca1a83137c2d3d144984155ed1459527119_main.jpg',
+            'season_number': 1,
+        },
+        'params': {'skip_download': 'm3u8'},
+        'expected_warnings': ['Unable to download XML']
+    }, {
+        'url': 'https://beta.crunchyroll.com/watch/GYK53DMPR/wicked-lord-shingan-reborn',
+        'info_dict': {
+            'id': '648781',
+            'ext': 'mp4',
+            'episode_number': 1,
+            'timestamp': 1389173400,
+            'series': 'Love, Chunibyo & Other Delusions - Heart Throb -',
+            'description': 'md5:5579d1a0355cc618558ba23d27067a62',
+            'uploader': 'TBS',
+            'episode': 'Wicked Lord Shingan... Reborn',
+            'average_rating': 4.9,
+            'season': 'Love, Chunibyo & Other Delusions - Heart Throb -',
+            'thumbnail': 'https://img1.ak.crunchyroll.com/i/spire3-tmb/2ba0384e225a5370d5f0ee9496d91ea51389046521_main.jpg',
+            'title': 'Love, Chunibyo & Other Delusions - Heart Throb - Episode 1 – Wicked Lord Shingan... Reborn',
+            'season_number': 2,
+            'upload_date': '20140108',
+        },
+        'params': {'skip_download': 'm3u8'},
+        'expected_warnings': ['Unable to download XML']
+    }, {
+        'url': 'https://beta.crunchyroll.com/watch/GY2P1Q98Y/',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        lang, internal_id, display_id = self._match_valid_url(url).group('lang', 'id', 'display_id')
+
+        if not self._get_cookies(url).get('etp_rt'):
+            return self._redirect_from_beta(url, lang, internal_id, display_id, True, CrunchyrollIE.ie_key())
+
+        api_domain, bucket, params = self._get_params(lang)
+
+        episode_response = self._download_json(
+            f'{api_domain}/cms/v2{bucket}/episodes/{internal_id}', display_id,
+            note='Retrieving episode metadata',
+            query=params)
+        if episode_response.get('is_premium_only') and not episode_response.get('playback'):
+            raise ExtractorError('This video is for premium members only.', expected=True)
+        stream_response = self._download_json(
+            episode_response['playback'], display_id,
+            note='Retrieving stream info')
+
+        thumbnails = []
+        for thumbnails_data in traverse_obj(episode_response, ('images', 'thumbnail')):
+            for thumbnail_data in thumbnails_data:
+                thumbnails.append({
+                    'url': thumbnail_data.get('source'),
+                    'width': thumbnail_data.get('width'),
+                    'height': thumbnail_data.get('height'),
+                })
+        subtitles = {}
+        for lang, subtitle_data in stream_response.get('subtitles').items():
+            subtitles[lang] = [{
+                'url': subtitle_data.get('url'),
+                'ext': subtitle_data.get('format')
+            }]
+
+        requested_hardsubs = [('' if val == 'none' else val) for val in (self._configuration_arg('hardsub') or ['none'])]
+        hardsub_preference = qualities(requested_hardsubs[::-1])
+        requested_formats = self._configuration_arg('format') or ['adaptive_hls']
+
+        formats = []
+        for stream_type, streams in stream_response.get('streams', {}).items():
+            if stream_type not in requested_formats:
+                continue
+            for stream in streams.values():
+                hardsub_lang = stream.get('hardsub_locale') or ''
+                if hardsub_lang.lower() not in requested_hardsubs:
+                    continue
+                format_id = join_nonempty(
+                    stream_type,
+                    format_field(stream, 'hardsub_locale', 'hardsub-%s'))
+                if not stream.get('url'):
+                    continue
+                if stream_type.split('_')[-1] == 'hls':
+                    adaptive_formats = self._extract_m3u8_formats(
+                        stream['url'], display_id, 'mp4', m3u8_id=format_id,
+                        note='Downloading %s information' % format_id,
+                        fatal=False)
+                elif stream_type.split('_')[-1] == 'dash':
+                    adaptive_formats = self._extract_mpd_formats(
+                        stream['url'], display_id, mpd_id=format_id,
+                        note='Downloading %s information' % format_id,
+                        fatal=False)
+                for f in adaptive_formats:
+                    if f.get('acodec') != 'none':
+                        f['language'] = stream_response.get('audio_locale')
+                    f['quality'] = hardsub_preference(hardsub_lang.lower())
+                formats.extend(adaptive_formats)
+        self._sort_formats(formats)
+
+        return {
+            'id': internal_id,
+            'title': '%s Episode %s – %s' % (episode_response.get('season_title'), episode_response.get('episode'), episode_response.get('title')),
+            'description': episode_response.get('description').replace(r'\r\n', '\n'),
+            'duration': float_or_none(episode_response.get('duration_ms'), 1000),
+            'thumbnails': thumbnails,
+            'series': episode_response.get('series_title'),
+            'series_id': episode_response.get('series_id'),
+            'season': episode_response.get('season_title'),
+            'season_id': episode_response.get('season_id'),
+            'season_number': episode_response.get('season_number'),
+            'episode': episode_response.get('title'),
+            'episode_number': episode_response.get('sequence_number'),
+            'subtitles': subtitles,
+            'formats': formats
         }
         }
+
+
+class CrunchyrollBetaShowIE(CrunchyrollBetaBaseIE):
+    IE_NAME = 'crunchyroll:playlist:beta'
+    _VALID_URL = r'https?://beta\.crunchyroll\.com/(?P<lang>(?:\w{1,2}/)?)series/(?P<id>\w+)/(?P<display_id>[\w\-]*)/?(?:\?|$)'
+    _TESTS = [{
+        'url': 'https://beta.crunchyroll.com/series/GY19NQ2QR/Girl-Friend-BETA',
+        'info_dict': {
+            'id': 'girl-friend-beta',
+            'title': 'Girl Friend BETA',
+        },
+        'playlist_mincount': 10,
+    }, {
+        'url': 'https://beta.crunchyroll.com/series/GYJQV73V6/love-chunibyo--other-delusions---heart-throb--',
+        'info_dict': {
+            'id': 'love-chunibyo-other-delusions-heart-throb-',
+            'title': 'Love, Chunibyo & Other Delusions - Heart Throb -',
+        },
+        'playlist_mincount': 10,
+    }, {
+        'url': 'https://beta.crunchyroll.com/it/series/GY19NQ2QR/Girl-Friend-BETA',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        lang, internal_id, display_id = self._match_valid_url(url).group('lang', 'id', 'display_id')
+
+        if not self._get_cookies(url).get('etp_rt'):
+            return self._redirect_from_beta(url, lang, internal_id, display_id, False, CrunchyrollShowPlaylistIE.ie_key())
+
+        api_domain, bucket, params = self._get_params(lang)
+
+        series_response = self._download_json(
+            f'{api_domain}/cms/v2{bucket}/series/{internal_id}', display_id,
+            note='Retrieving series metadata', query=params)
+
+        seasons_response = self._download_json(
+            f'{api_domain}/cms/v2{bucket}/seasons?series_id={internal_id}', display_id,
+            note='Retrieving season list', query=params)
+
+        def entries():
+            for season in seasons_response['items']:
+                episodes_response = self._download_json(
+                    f'{api_domain}/cms/v2{bucket}/episodes?season_id={season["id"]}', display_id,
+                    note=f'Retrieving episode list for {season.get("slug_title")}', query=params)
+                for episode in episodes_response['items']:
+                    episode_id = episode['id']
+                    episode_display_id = episode['slug_title']
+                    yield {
+                        '_type': 'url',
+                        'url': f'https://beta.crunchyroll.com/{lang}watch/{episode_id}/{episode_display_id}',
+                        'ie_key': CrunchyrollBetaIE.ie_key(),
+                        'id': episode_id,
+                        'title': '%s Episode %s – %s' % (episode.get('season_title'), episode.get('episode'), episode.get('title')),
+                        'description': try_get(episode, lambda x: x['description'].replace(r'\r\n', '\n')),
+                        'duration': float_or_none(episode.get('duration_ms'), 1000),
+                        'series': episode.get('series_title'),
+                        'series_id': episode.get('series_id'),
+                        'season': episode.get('season_title'),
+                        'season_id': episode.get('season_id'),
+                        'season_number': episode.get('season_number'),
+                        'episode': episode.get('title'),
+                        'episode_number': episode.get('sequence_number')
+                    }
+
+        return self.playlist_result(entries(), internal_id, series_response.get('title'))