]> jfr.im git - yt-dlp.git/commitdiff
[extractor/nhk] `NhkRadiruLive`: Add extractor (#7332)
authorgarret <redacted>
Mon, 19 Jun 2023 13:25:27 +0000 (14:25 +0100)
committerGitHub <redacted>
Mon, 19 Jun 2023 13:25:27 +0000 (13:25 +0000)
Authored by: garret1317

README.md
yt_dlp/extractor/_extractors.py
yt_dlp/extractor/nhk.py

index ce555c66f02422638b66fc6758c6c1541236f40a..659730410bc67b9f198ea23bd39c56fd9ead74fb 100644 (file)
--- a/README.md
+++ b/README.md
@@ -1850,6 +1850,9 @@ ### wrestleuniverse
 #### twitchstream (Twitch)
 * `client_id`: Client ID value to be sent with GraphQL requests, e.g. `twitchstream:client_id=kimne78kx3ncx6brgo4mv6wki5h1ko`
 
+#### nhkradirulive (NHK らじる★らじる LIVE)
+* `area`: Which regional variation to extract. Valid areas are: `sapporo`, `sendai`, `tokyo`, `nagoya`, `osaka`, `hiroshima`, `matsuyama`, `fukuoka`. Defaults to `tokyo`
+
 **Note**: These options may be changed/removed in the future without concern for backward compatibility
 
 <!-- MANPAGE: MOVE "INSTALLATION" SECTION HERE -->
index 10e132b4b833071a1cfeb6d1373e608da327b0aa..394f3c29d36f668f40848e40e49dd379ecc98a7d 100644 (file)
     NhkForSchoolProgramListIE,
     NhkRadioNewsPageIE,
     NhkRadiruIE,
+    NhkRadiruLiveIE,
 )
 from .nhl import NHLIE
 from .nick import (
index a3efa326a13447fc2bbe10a39ccad4ca4f7b0dc7..fbd6a18f6d8211b213970c81bdb73c86b8ba72ee 100644 (file)
@@ -2,12 +2,15 @@
 
 from .common import InfoExtractor
 from ..utils import (
+    ExtractorError,
+    int_or_none,
+    join_nonempty,
     parse_duration,
     traverse_obj,
     unescapeHTML,
     unified_timestamp,
+    url_or_none,
     urljoin,
-    url_or_none
 )
 
 
@@ -492,3 +495,73 @@ class NhkRadioNewsPageIE(InfoExtractor):
 
     def _real_extract(self, url):
         return self.url_result('https://www.nhk.or.jp/radio/ondemand/detail.html?p=F261_01', NhkRadiruIE)
+
+
+class NhkRadiruLiveIE(InfoExtractor):
+    _GEO_COUNTRIES = ['JP']
+    _VALID_URL = r'https?://www\.nhk\.or\.jp/radio/player/\?ch=(?P<id>r[12]|fm)'
+    _TESTS = [{
+        # radio 1, no area specified
+        'url': 'https://www.nhk.or.jp/radio/player/?ch=r1',
+        'info_dict': {
+            'id': 'r1-tokyo',
+            'title': 're:^NHKネットラジオ第1 東京.+$',
+            'ext': 'm4a',
+            'thumbnail': 'https://www.nhk.or.jp/common/img/media/r1-200x200.png',
+            'live_status': 'is_live',
+        },
+    }, {
+        # radio 2, area specified
+        # (the area doesnt actually matter, r2 is national)
+        'url': 'https://www.nhk.or.jp/radio/player/?ch=r2',
+        'params': {'extractor_args': {'nhkradirulive': {'area': ['fukuoka']}}},
+        'info_dict': {
+            'id': 'r2-fukuoka',
+            'title': 're:^NHKネットラジオ第2 福岡.+$',
+            'ext': 'm4a',
+            'thumbnail': 'https://www.nhk.or.jp/common/img/media/r2-200x200.png',
+            'live_status': 'is_live',
+        },
+    }, {
+        # fm, area specified
+        'url': 'https://www.nhk.or.jp/radio/player/?ch=fm',
+        'params': {'extractor_args': {'nhkradirulive': {'area': ['sapporo']}}},
+        'info_dict': {
+            'id': 'fm-sapporo',
+            'title': 're:^NHKネットラジオFM 札幌.+$',
+            'ext': 'm4a',
+            'thumbnail': 'https://www.nhk.or.jp/common/img/media/fm-200x200.png',
+            'live_status': 'is_live',
+        }
+    }]
+
+    _NOA_STATION_IDS = {'r1': 'n1', 'r2': 'n2', 'fm': 'n3'}
+
+    def _real_extract(self, url):
+        station = self._match_id(url)
+        area = self._configuration_arg('area', ['tokyo'])[0]
+
+        config = self._download_xml(
+            'https://www.nhk.or.jp/radio/config/config_web.xml', station, 'Downloading area information')
+        data = config.find(f'.//data//area[.="{area}"]/..')
+
+        if not data:
+            raise ExtractorError('Invalid area. Valid areas are: %s' % ', '.join(
+                [i.text for i in config.findall('.//data//area')]), expected=True)
+
+        noa_info = self._download_json(
+            f'https:{config.find(".//url_program_noa").text}'.format(area=data.find('areakey').text),
+            station, note=f'Downloading {area} station metadata')
+        present_info = traverse_obj(noa_info, ('nowonair_list', self._NOA_STATION_IDS.get(station), 'present'))
+
+        return {
+            'title': ' '.join(traverse_obj(present_info, (('service', 'area',), 'name', {str}))),
+            'id': join_nonempty(station, area),
+            'thumbnails': traverse_obj(present_info, ('service', 'images', ..., {
+                'url': 'url',
+                'width': ('width', {int_or_none}),
+                'height': ('height', {int_or_none}),
+            })),
+            'formats': self._extract_m3u8_formats(data.find(f'{station}hls').text, station),
+            'is_live': True,
+        }