-from __future__ import unicode_literals
-
import re
from .common import InfoExtractor
-from ..utils import urljoin
+from ..utils import (
+ parse_duration,
+ traverse_obj,
+ unescapeHTML,
+ unified_timestamp,
+ urljoin,
+ url_or_none
+)
class NhkBaseIE(InfoExtractor):
- _API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sod%slist/v7a/%s/%s/%s/all%s.json'
+ _API_URL_TEMPLATE = 'https://nwapi.nhk.jp/nhkworld/%sod%slist/v7b/%s/%s/%s/all%s.json'
_BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand'
_TYPE_REGEX = r'/(?P<type>video|audio)/'
def _extract_episode_info(self, url, episode=None):
fetch_episode = episode is None
- lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()
- if episode_id.isdigit():
+ lang, m_type, episode_id = NhkVodIE._match_valid_url(url).groups()
+ if len(episode_id) == 7:
episode_id = episode_id[:4] + '-' + episode_id[4:]
is_video = m_type == 'video'
class NhkVodIE(NhkBaseIE):
- _VALID_URL = r'%s%s(?P<id>\d{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
+ # the 7-character IDs can have alphabetic chars too: assume [a-z] rather than just [a-f], eg
+ _VALID_URL = r'%s%s(?P<id>[0-9a-z]{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
# Content available only for a limited period of time. Visit
# https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
_TESTS = [{
}, {
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',
'only_matching': True,
+ }, {
+ # video, alphabetic character in ID #29670
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/',
+ 'only_matching': True,
+ 'info_dict': {
+ 'id': 'qfjay6cg',
+ 'ext': 'mp4',
+ 'title': 'DESIGN TALKS plus - Fishermen’s Finery',
+ 'description': 'md5:8a8f958aaafb0d7cb59d38de53f1e448',
+ 'thumbnail': r're:^https?:/(/[a-z0-9.-]+)+\.jpg\?w=1920&h=1080$',
+ 'upload_date': '20210615',
+ 'timestamp': 1623722008,
+ }
}]
def _real_extract(self, url):
}]
def _real_extract(self, url):
- lang, m_type, program_id, episode_type = re.match(self._VALID_URL, url).groups()
+ lang, m_type, program_id, episode_type = self._match_valid_url(url).groups()
episodes = self._call_api(
program_id, lang, m_type == 'video', False, episode_type == 'clip')
program_title = entries[0].get('series')
return self.playlist_result(entries, program_id, program_title)
+
+
+class NhkForSchoolBangumiIE(InfoExtractor):
+ _VALID_URL = r'https?://www2\.nhk\.or\.jp/school/movie/(?P<type>bangumi|clip)\.cgi\?das_id=(?P<id>[a-zA-Z0-9_-]+)'
+ _TESTS = [{
+ 'url': 'https://www2.nhk.or.jp/school/movie/bangumi.cgi?das_id=D0005150191_00000',
+ 'info_dict': {
+ 'id': 'D0005150191_00003',
+ 'title': 'にている かな',
+ 'duration': 599.999,
+ 'timestamp': 1396414800,
+
+ 'upload_date': '20140402',
+ 'ext': 'mp4',
+
+ 'chapters': 'count:12'
+ },
+ 'params': {
+ # m3u8 download
+ 'skip_download': True,
+ },
+ }]
+
+ def _real_extract(self, url):
+ program_type, video_id = self._match_valid_url(url).groups()
+
+ webpage = self._download_webpage(
+ f'https://www2.nhk.or.jp/school/movie/{program_type}.cgi?das_id={video_id}', video_id)
+
+ # searches all variables
+ base_values = {g.group(1): g.group(2) for g in re.finditer(r'var\s+([a-zA-Z_]+)\s*=\s*"([^"]+?)";', webpage)}
+ # and programObj values too
+ program_values = {g.group(1): g.group(3) for g in re.finditer(r'(?:program|clip)Obj\.([a-zA-Z_]+)\s*=\s*(["\'])([^"]+?)\2;', webpage)}
+ # extract all chapters
+ chapter_durations = [parse_duration(g.group(1)) for g in re.finditer(r'chapterTime\.push\(\'([0-9:]+?)\'\);', webpage)]
+ chapter_titles = [' '.join([g.group(1) or '', unescapeHTML(g.group(2))]).strip() for g in re.finditer(r'<div class="cpTitle"><span>(scene\s*\d+)?</span>([^<]+?)</div>', webpage)]
+
+ # this is how player_core.js is actually doing (!)
+ version = base_values.get('r_version') or program_values.get('version')
+ if version:
+ video_id = f'{video_id.split("_")[0]}_{version}'
+
+ formats = self._extract_m3u8_formats(
+ f'https://nhks-vh.akamaihd.net/i/das/{video_id[0:8]}/{video_id}_V_000.f4v/master.m3u8',
+ video_id, ext='mp4', m3u8_id='hls')
+
+ duration = parse_duration(base_values.get('r_duration'))
+
+ chapters = None
+ if chapter_durations and chapter_titles and len(chapter_durations) == len(chapter_titles):
+ start_time = chapter_durations
+ end_time = chapter_durations[1:] + [duration]
+ chapters = [{
+ 'start_time': s,
+ 'end_time': e,
+ 'title': t,
+ } for s, e, t in zip(start_time, end_time, chapter_titles)]
+
+ return {
+ 'id': video_id,
+ 'title': program_values.get('name'),
+ 'duration': parse_duration(base_values.get('r_duration')),
+ 'timestamp': unified_timestamp(base_values['r_upload']),
+ 'formats': formats,
+ 'chapters': chapters,
+ }
+
+
+class NhkForSchoolSubjectIE(InfoExtractor):
+ IE_DESC = 'Portal page for each school subjects, like Japanese (kokugo, 国語) or math (sansuu/suugaku or 算数・数学)'
+ KNOWN_SUBJECTS = (
+ 'rika', 'syakai', 'kokugo',
+ 'sansuu', 'seikatsu', 'doutoku',
+ 'ongaku', 'taiiku', 'zukou',
+ 'gijutsu', 'katei', 'sougou',
+ 'eigo', 'tokkatsu',
+ 'tokushi', 'sonota',
+ )
+ _VALID_URL = r'https?://www\.nhk\.or\.jp/school/(?P<id>%s)/?(?:[\?#].*)?$' % '|'.join(re.escape(s) for s in KNOWN_SUBJECTS)
+
+ _TESTS = [{
+ 'url': 'https://www.nhk.or.jp/school/sougou/',
+ 'info_dict': {
+ 'id': 'sougou',
+ 'title': '総合的な学習の時間',
+ },
+ 'playlist_mincount': 16,
+ }, {
+ 'url': 'https://www.nhk.or.jp/school/rika/',
+ 'info_dict': {
+ 'id': 'rika',
+ 'title': '理科',
+ },
+ 'playlist_mincount': 15,
+ }]
+
+ def _real_extract(self, url):
+ subject_id = self._match_id(url)
+ webpage = self._download_webpage(url, subject_id)
+
+ return self.playlist_from_matches(
+ re.finditer(rf'href="((?:https?://www\.nhk\.or\.jp)?/school/{re.escape(subject_id)}/[^/]+/)"', webpage),
+ subject_id,
+ self._html_search_regex(r'(?s)<span\s+class="subjectName">\s*<img\s*[^<]+>\s*([^<]+?)</span>', webpage, 'title', fatal=False),
+ lambda g: urljoin(url, g.group(1)))
+
+
+class NhkForSchoolProgramListIE(InfoExtractor):
+ _VALID_URL = r'https?://www\.nhk\.or\.jp/school/(?P<id>(?:%s)/[a-zA-Z0-9_-]+)' % (
+ '|'.join(re.escape(s) for s in NhkForSchoolSubjectIE.KNOWN_SUBJECTS)
+ )
+ _TESTS = [{
+ 'url': 'https://www.nhk.or.jp/school/sougou/q/',
+ 'info_dict': {
+ 'id': 'sougou/q',
+ 'title': 'Q~こどものための哲学',
+ },
+ 'playlist_mincount': 20,
+ }]
+
+ def _real_extract(self, url):
+ program_id = self._match_id(url)
+
+ webpage = self._download_webpage(f'https://www.nhk.or.jp/school/{program_id}/', program_id)
+
+ title = (self._generic_title('', webpage)
+ or self._html_search_regex(r'<h3>([^<]+?)とは?\s*</h3>', webpage, 'title', fatal=False))
+ title = re.sub(r'\s*\|\s*NHK\s+for\s+School\s*$', '', title) if title else None
+ description = self._html_search_regex(
+ r'(?s)<div\s+class="programDetail\s*">\s*<p>[^<]+</p>',
+ webpage, 'description', fatal=False, group=0)
+
+ bangumi_list = self._download_json(
+ f'https://www.nhk.or.jp/school/{program_id}/meta/program.json', program_id)
+ # they're always bangumi
+ bangumis = [
+ self.url_result(f'https://www2.nhk.or.jp/school/movie/bangumi.cgi?das_id={x}')
+ for x in traverse_obj(bangumi_list, ('part', ..., 'part-video-dasid')) or []]
+
+ return self.playlist_result(bangumis, program_id, title, description)
+
+
+class NhkRadiruIE(InfoExtractor):
+ _GEO_COUNTRIES = ['JP']
+ IE_DESC = 'NHK らじる (Radiru/Rajiru)'
+ _VALID_URL = r'https?://www\.nhk\.or\.jp/radio/(?:player/ondemand|ondemand/detail)\.html\?p=(?P<site>[\da-zA-Z]+)_(?P<corner>[\da-zA-Z]+)(?:_(?P<headline>[\da-zA-Z]+))?'
+ _TESTS = [{
+ 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=0449_01_3853544',
+ 'skip': 'Episode expired on 2023-04-16',
+ 'info_dict': {
+ 'channel': 'NHK-FM',
+ 'description': 'md5:94b08bdeadde81a97df4ec882acce3e9',
+ 'ext': 'm4a',
+ 'id': '0449_01_3853544',
+ 'series': 'ジャズ・トゥナイト',
+ 'thumbnail': 'https://www.nhk.or.jp/prog/img/449/g449.jpg',
+ 'timestamp': 1680969600,
+ 'title': 'ジャズ・トゥナイト NEWジャズ特集',
+ 'upload_date': '20230408',
+ 'release_timestamp': 1680962400,
+ 'release_date': '20230408',
+ 'was_live': True,
+ },
+ }, {
+ # playlist, airs every weekday so it should _hopefully_ be okay forever
+ 'url': 'https://www.nhk.or.jp/radio/ondemand/detail.html?p=0458_01',
+ 'info_dict': {
+ 'id': '0458_01',
+ 'title': 'ベストオブクラシック',
+ 'description': '世界中の上質な演奏会をじっくり堪能する本格派クラシック番組。',
+ 'channel': 'NHK-FM',
+ 'thumbnail': 'https://www.nhk.or.jp/prog/img/458/g458.jpg',
+ },
+ 'playlist_mincount': 3,
+ }, {
+ # one with letters in the id
+ 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=F300_06_3738470',
+ 'note': 'Expires on 2024-03-31',
+ 'info_dict': {
+ 'id': 'F300_06_3738470',
+ 'ext': 'm4a',
+ 'title': '有島武郎「一房のぶどう」',
+ 'description': '朗読:川野一宇(ラジオ深夜便アンカー)\r\n\r\n(2016年12月8日放送「ラジオ深夜便『アンカー朗読シリーズ』」より)',
+ 'channel': 'NHKラジオ第1、NHK-FM',
+ 'timestamp': 1635757200,
+ 'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/F300/img/corner/box_109_thumbnail.jpg',
+ 'release_date': '20161207',
+ 'series': 'らじる文庫 by ラジオ深夜便 ',
+ 'release_timestamp': 1481126700,
+ 'upload_date': '20211101',
+ }
+ }, {
+ # news
+ 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=F261_01_3855109',
+ 'skip': 'Expires on 2023-04-17',
+ 'info_dict': {
+ 'id': 'F261_01_3855109',
+ 'ext': 'm4a',
+ 'channel': 'NHKラジオ第1',
+ 'timestamp': 1681635900,
+ 'release_date': '20230416',
+ 'series': 'NHKラジオニュース',
+ 'title': '午後6時のNHKニュース',
+ 'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/F261/img/RADIONEWS_640.jpg',
+ 'upload_date': '20230416',
+ 'release_timestamp': 1681635600,
+ },
+ }]
+
+ def _extract_episode_info(self, headline, programme_id, series_meta):
+ episode_id = f'{programme_id}_{headline["headline_id"]}'
+ episode = traverse_obj(headline, ('file_list', 0, {dict}))
+
+ return {
+ **series_meta,
+ 'id': episode_id,
+ 'formats': self._extract_m3u8_formats(episode.get('file_name'), episode_id, fatal=False),
+ 'container': 'm4a_dash', # force fixup, AAC-only HLS
+ 'was_live': True,
+ 'series': series_meta.get('title'),
+ 'thumbnail': url_or_none(headline.get('headline_image')) or series_meta.get('thumbnail'),
+ **traverse_obj(episode, {
+ 'title': 'file_title',
+ 'description': 'file_title_sub',
+ 'timestamp': ('open_time', {unified_timestamp}),
+ 'release_timestamp': ('aa_vinfo4', {lambda x: x.split('_')[0]}, {unified_timestamp}),
+ }),
+ }
+
+ def _real_extract(self, url):
+ site_id, corner_id, headline_id = self._match_valid_url(url).group('site', 'corner', 'headline')
+ programme_id = f'{site_id}_{corner_id}'
+
+ if site_id == 'F261':
+ json_url = 'https://www.nhk.or.jp/s-media/news/news-site/list/v1/all.json'
+ else:
+ json_url = f'https://www.nhk.or.jp/radioondemand/json/{site_id}/bangumi_{programme_id}.json'
+
+ meta = self._download_json(json_url, programme_id)['main']
+
+ series_meta = traverse_obj(meta, {
+ 'title': 'program_name',
+ 'channel': 'media_name',
+ 'thumbnail': (('thumbnail_c', 'thumbnail_p'), {url_or_none}),
+ }, get_all=False)
+
+ if headline_id:
+ return self._extract_episode_info(
+ traverse_obj(meta, (
+ 'detail_list', lambda _, v: v['headline_id'] == headline_id), get_all=False),
+ programme_id, series_meta)
+
+ def entries():
+ for headline in traverse_obj(meta, ('detail_list', ..., {dict})):
+ yield self._extract_episode_info(headline, programme_id, series_meta)
+
+ return self.playlist_result(
+ entries(), programme_id, playlist_description=meta.get('site_detail'), **series_meta)
+
+
+class NhkRadioNewsPageIE(InfoExtractor):
+ _VALID_URL = r'https?://www\.nhk\.or\.jp/radionews/?(?:$|[?#])'
+ _TESTS = [{
+ # airs daily, on-the-hour most hours
+ 'url': 'https://www.nhk.or.jp/radionews/',
+ 'playlist_mincount': 5,
+ 'info_dict': {
+ 'id': 'F261_01',
+ 'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/F261/img/RADIONEWS_640.jpg',
+ 'description': 'md5:bf2c5b397e44bc7eb26de98d8f15d79d',
+ 'channel': 'NHKラジオ第1',
+ 'title': 'NHKラジオニュース',
+ }
+ }]
+
+ def _real_extract(self, url):
+ return self.url_result('https://www.nhk.or.jp/radio/ondemand/detail.html?p=F261_01', NhkRadiruIE)