-# coding: utf-8
-from __future__ import unicode_literals
-
+import itertools
import re
+from urllib.parse import urlparse, parse_qs
from .common import InfoExtractor
from ..utils import (
+ ExtractorError,
clean_html,
dict_get,
- ExtractorError,
int_or_none,
+ merge_dicts,
parse_duration,
+ traverse_obj,
+ try_call,
try_get,
+ unified_timestamp,
update_url_query,
)
formats.append({
'format_id': '%s_%s' % (stream.get('type') or stream_type, dict_get(encoding_option, ('name', 'id'))),
'url': stream_url,
+ 'ext': 'mp4',
'width': int_or_none(encoding_option.get('width')),
'height': int_or_none(encoding_option.get('height')),
'vbr': int_or_none(bitrate.get('video')),
'url': 'https://tv.naver.com/l/52010',
'info_dict': {
'id': '52010',
- 'ext': 'm3u8',
+ 'ext': 'mp4',
'title': '[LIVE] 뉴스특보 : "수도권 거리두기, 2주간 2단계로 조정"',
'description': 'md5:df7f0c237a5ed5e786ce5c91efbeaab3',
'channel_id': 'NTV-ytnnews24-0',
'url': 'https://tv.naver.com/l/51549',
'info_dict': {
'id': '51549',
- 'ext': 'm3u8',
+ 'ext': 'mp4',
'title': '연합뉴스TV - 코로나19 뉴스특보',
'description': 'md5:c655e82091bc21e413f549c0eaccc481',
'channel_id': 'NTV-yonhapnewstv-0',
continue
formats.extend(self._extract_m3u8_formats(
- quality.get('url'), video_id, 'm3u8',
+ quality.get('url'), video_id, 'mp4',
m3u8_id=quality.get('qualityId'), live=True
))
self._sort_formats(formats)
'categories': [meta.get('categoryId')],
'is_live': True
}
+
+
+class NaverNowIE(NaverBaseIE):
+ IE_NAME = 'navernow'
+ _VALID_URL = r'https?://now\.naver\.com/show/(?P<id>[0-9]+)'
+ _PAGE_SIZE = 30
+ _API_URL = 'https://apis.naver.com/now_web/nowcms-api-xhmac/cms/v1'
+ _TESTS = [{
+ 'url': 'https://now.naver.com/show/4759?shareReplayId=5901#replay=',
+ 'md5': 'e05854162c21c221481de16b2944a0bc',
+ 'info_dict': {
+ 'id': '4759-5901',
+ 'title': '아이키X노제\r\n💖꽁냥꽁냥💖(1)',
+ 'ext': 'mp4',
+ 'thumbnail': r're:^https?://.*\.jpg',
+ 'timestamp': 1650369600,
+ 'upload_date': '20220419',
+ 'uploader_id': 'now',
+ 'view_count': int,
+ },
+ 'params': {
+ 'noplaylist': True,
+ }
+ }, {
+ 'url': 'https://now.naver.com/show/4759?shareHightlight=1078#highlight=',
+ 'md5': '9f6118e398aa0f22b2152f554ea7851b',
+ 'info_dict': {
+ 'id': '4759-1078',
+ 'title': '아이키: 나 리정한테 흔들렸어,,, 질투 폭발하는 노제 여보😾 [아이키의 떰즈업]ㅣ네이버 NOW.',
+ 'ext': 'mp4',
+ 'thumbnail': r're:^https?://.*\.jpg',
+ 'upload_date': '20220504',
+ 'timestamp': 1651648042,
+ 'uploader_id': 'now',
+ 'view_count': int,
+ },
+ 'params': {
+ 'noplaylist': True,
+ },
+ }, {
+ 'url': 'https://now.naver.com/show/4759',
+ 'info_dict': {
+ 'id': '4759',
+ 'title': '아이키의 떰즈업',
+ },
+ 'playlist_mincount': 48
+ }, {
+ 'url': 'https://now.naver.com/show/4759?shareReplayId=5901#replay',
+ 'info_dict': {
+ 'id': '4759',
+ 'title': '아이키의 떰즈업',
+ },
+ 'playlist_mincount': 48,
+ }, {
+ 'url': 'https://now.naver.com/show/4759?shareHightlight=1078#highlight=',
+ 'info_dict': {
+ 'id': '4759',
+ 'title': '아이키의 떰즈업',
+ },
+ 'playlist_mincount': 48,
+ }]
+
+ def _extract_replay(self, show_id, replay_id):
+ vod_info = self._download_json(f'{self._API_URL}/shows/{show_id}/vod/{replay_id}', replay_id)
+ in_key = self._download_json(f'{self._API_URL}/shows/{show_id}/vod/{replay_id}/inkey', replay_id)['inKey']
+ return merge_dicts({
+ 'id': f'{show_id}-{replay_id}',
+ 'title': traverse_obj(vod_info, ('episode', 'title')),
+ 'timestamp': unified_timestamp(traverse_obj(vod_info, ('episode', 'start_time'))),
+ 'thumbnail': vod_info.get('thumbnail_image_url'),
+ }, self._extract_video_info(replay_id, vod_info['video_id'], in_key))
+
+ def _extract_show_replays(self, show_id):
+ page = 0
+ while True:
+ show_vod_info = self._download_json(
+ f'{self._API_URL}/vod-shows/{show_id}', show_id,
+ query={'offset': page * self._PAGE_SIZE, 'limit': self._PAGE_SIZE},
+ note=f'Downloading JSON vod list for show {show_id} - page {page}'
+ )['response']['result']
+ for v in show_vod_info.get('vod_list') or []:
+ yield self._extract_replay(show_id, v['id'])
+
+ if try_call(lambda: show_vod_info['count'] <= self._PAGE_SIZE * (page + 1)):
+ break
+ page += 1
+
+ def _extract_show_highlights(self, show_id, highlight_id=None):
+ page = 0
+ while True:
+ highlights_videos = self._download_json(
+ f'{self._API_URL}/shows/{show_id}/highlights/videos/', show_id,
+ query={'offset': page * self._PAGE_SIZE, 'limit': self._PAGE_SIZE},
+ note=f'Downloading JSON highlights for show {show_id} - page {page}')
+
+ for highlight in highlights_videos.get('results') or []:
+ if highlight_id and highlight.get('id') != int(highlight_id):
+ continue
+ yield merge_dicts({
+ 'id': f'{show_id}-{highlight["id"]}',
+ 'title': highlight.get('title'),
+ 'timestamp': unified_timestamp(highlight.get('regdate')),
+ 'thumbnail': highlight.get('thumbnail_url'),
+ }, self._extract_video_info(highlight['id'], highlight['video_id'], highlight['video_inkey']))
+
+ if try_call(lambda: highlights_videos['count'] <= self._PAGE_SIZE * (page + 1)):
+ break
+ page += 1
+
+ def _extract_highlight(self, show_id, highlight_id):
+ try:
+ return next(self._extract_show_highlights(show_id, highlight_id))
+ except StopIteration:
+ raise ExtractorError(f'Unable to find highlight {highlight_id} for show {show_id}')
+
+ def _real_extract(self, url):
+ show_id = self._match_id(url)
+ qs = parse_qs(urlparse(url).query)
+
+ if not self._yes_playlist(show_id, qs.get('shareHightlight')):
+ return self._extract_highlight(show_id, qs['shareHightlight'][0])
+ elif not self._yes_playlist(show_id, qs.get('shareReplayId')):
+ return self._extract_replay(show_id, qs['shareReplayId'][0])
+
+ show_info = self._download_json(
+ f'{self._API_URL}/shows/{show_id}', show_id,
+ note=f'Downloading JSON vod list for show {show_id}')
+
+ return self.playlist_result(
+ itertools.chain(self._extract_show_replays(show_id), self._extract_show_highlights(show_id)),
+ show_id, show_info.get('title'))