]> jfr.im git - yt-dlp.git/blame - yt_dlp/extractor/naver.py
[ie/PIAULIZAPortal] Add extractor (#7903)
[yt-dlp.git] / yt_dlp / extractor / naver.py
CommitLineData
14c3a980 1import itertools
c88debff 2import re
14c3a980 3from urllib.parse import urlparse, parse_qs
c88debff 4
6b95b065 5from .common import InfoExtractor
1cc79574 6from ..utils import (
14c3a980 7 ExtractorError,
c88debff
RA
8 clean_html,
9 dict_get,
b02b960c 10 int_or_none,
c0caa805 11 join_nonempty,
14c3a980 12 merge_dicts,
83817163 13 parse_duration,
14c3a980 14 traverse_obj,
c88debff 15 try_get,
14c3a980 16 unified_timestamp,
b02b960c 17 update_url_query,
6b95b065
JMF
18)
19
20
c88debff
RA
21class NaverBaseIE(InfoExtractor):
22 _CAPTION_EXT_RE = r'\.(?:ttml|vtt)'
190f6c93 23
9652bca1 24 @staticmethod # NB: Used in WeverseIE
b3eaab7c 25 def process_subtitles(vod_data, process_url):
26 ret = {'subtitles': {}, 'automatic_captions': {}}
27 for caption in traverse_obj(vod_data, ('captions', 'list', ...)):
28 caption_url = caption.get('source')
29 if not caption_url:
30 continue
31 type_ = 'automatic_captions' if caption.get('type') == 'auto' else 'subtitles'
32 lang = caption.get('locale') or join_nonempty('language', 'country', from_dict=caption) or 'und'
33 if caption.get('type') == 'fan':
34 lang += '_fan%d' % next(i for i in itertools.count(1) if f'{lang}_fan{i}' not in ret[type_])
35 ret[type_].setdefault(lang, []).extend({
36 'url': sub_url,
37 'name': join_nonempty('label', 'fanName', from_dict=caption, delim=' - '),
38 } for sub_url in process_url(caption_url))
39 return ret
40
c88debff 41 def _extract_video_info(self, video_id, vid, key):
f65dc41b 42 video_data = self._download_json(
190f6c93 43 'http://play.rmcnmv.naver.com/vod/play/v2.0/' + vid,
f65dc41b 44 video_id, query={
c88debff 45 'key': key,
f65dc41b 46 })
b02b960c
RA
47 meta = video_data['meta']
48 title = meta['subject']
6b95b065 49 formats = []
c88debff 50 get_list = lambda x: try_get(video_data, lambda y: y[x + 's']['list'], list) or []
b02b960c
RA
51
52 def extract_formats(streams, stream_type, query={}):
53 for stream in streams:
54 stream_url = stream.get('source')
55 if not stream_url:
56 continue
57 stream_url = update_url_query(stream_url, query)
58 encoding_option = stream.get('encodingOption', {})
59 bitrate = stream.get('bitrate', {})
60 formats.append({
c88debff 61 'format_id': '%s_%s' % (stream.get('type') or stream_type, dict_get(encoding_option, ('name', 'id'))),
b02b960c 62 'url': stream_url,
652fb0d4 63 'ext': 'mp4',
b02b960c
RA
64 'width': int_or_none(encoding_option.get('width')),
65 'height': int_or_none(encoding_option.get('height')),
66 'vbr': int_or_none(bitrate.get('video')),
67 'abr': int_or_none(bitrate.get('audio')),
68 'filesize': int_or_none(stream.get('size')),
69 'protocol': 'm3u8_native' if stream_type == 'HLS' else None,
087ca2cb 70 })
b02b960c 71
c88debff 72 extract_formats(get_list('video'), 'H264')
b02b960c
RA
73 for stream_set in video_data.get('streams', []):
74 query = {}
75 for param in stream_set.get('keys', []):
76 query[param['name']] = param['value']
77 stream_type = stream_set.get('type')
78 videos = stream_set.get('videos')
79 if videos:
80 extract_formats(videos, stream_type, query)
81 elif stream_type == 'HLS':
82 stream_url = stream_set.get('source')
83 if not stream_url:
84 continue
85 formats.extend(self._extract_m3u8_formats(
86 update_url_query(stream_url, query), video_id,
87 'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False))
6b95b065 88
c88debff
RA
89 replace_ext = lambda x, y: re.sub(self._CAPTION_EXT_RE, '.' + y, x)
90
91 def get_subs(caption_url):
92 if re.search(self._CAPTION_EXT_RE, caption_url):
c0caa805 93 return [
94 replace_ext(caption_url, 'ttml'),
95 replace_ext(caption_url, 'vtt'),
96 ]
97 return [caption_url]
c88debff 98
c88debff 99 user = meta.get('user', {})
f65dc41b 100
fb7abb31 101 return {
6b95b065 102 'id': video_id,
b02b960c 103 'title': title,
6b95b065 104 'formats': formats,
c88debff 105 'thumbnail': try_get(meta, lambda x: x['cover']['source']),
b02b960c 106 'view_count': int_or_none(meta.get('count')),
c88debff
RA
107 'uploader_id': user.get('id'),
108 'uploader': user.get('name'),
109 'uploader_url': user.get('url'),
b3eaab7c 110 **self.process_subtitles(video_data, get_subs),
6b95b065 111 }
c88debff
RA
112
113
114class NaverIE(NaverBaseIE):
115 _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/(?:v|embed)/(?P<id>\d+)'
116 _GEO_BYPASS = False
117 _TESTS = [{
118 'url': 'http://tv.naver.com/v/81652',
119 'info_dict': {
120 'id': '81652',
121 'ext': 'mp4',
122 'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
123 'description': '메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
83817163 124 'timestamp': 1378200754,
c88debff
RA
125 'upload_date': '20130903',
126 'uploader': '메가스터디, 합격불변의 법칙',
127 'uploader_id': 'megastudy',
128 },
129 }, {
130 'url': 'http://tv.naver.com/v/395837',
131 'md5': '8a38e35354d26a17f73f4e90094febd3',
132 'info_dict': {
133 'id': '395837',
134 'ext': 'mp4',
135 'title': '9년이 지나도 아픈 기억, 전효성의 아버지',
136 'description': 'md5:eb6aca9d457b922e43860a2a2b1984d3',
83817163 137 'timestamp': 1432030253,
c88debff
RA
138 'upload_date': '20150519',
139 'uploader': '4가지쇼 시즌2',
140 'uploader_id': 'wrappinguser29',
141 },
142 'skip': 'Georestricted',
143 }, {
144 'url': 'http://tvcast.naver.com/v/81652',
145 'only_matching': True,
146 }]
147
148 def _real_extract(self, url):
149 video_id = self._match_id(url)
150 content = self._download_json(
83817163 151 'https://tv.naver.com/api/json/v/' + video_id,
c88debff 152 video_id, headers=self.geo_verification_headers())
83817163
RA
153 player_info_json = content.get('playerInfoJson') or {}
154 current_clip = player_info_json.get('currentClip') or {}
c88debff 155
83817163
RA
156 vid = current_clip.get('videoId')
157 in_key = current_clip.get('inKey')
c88debff
RA
158
159 if not vid or not in_key:
83817163 160 player_auth = try_get(player_info_json, lambda x: x['playerOption']['auth'])
c88debff
RA
161 if player_auth == 'notCountry':
162 self.raise_geo_restricted(countries=['KR'])
163 elif player_auth == 'notLogin':
164 self.raise_login_required()
165 raise ExtractorError('couldn\'t extract vid and key')
166 info = self._extract_video_info(video_id, vid, in_key)
83817163
RA
167 info.update({
168 'description': clean_html(current_clip.get('description')),
169 'timestamp': int_or_none(current_clip.get('firstExposureTime'), 1000),
170 'duration': parse_duration(current_clip.get('displayPlayTime')),
171 'like_count': int_or_none(current_clip.get('recommendPoint')),
172 'age_limit': 19 if current_clip.get('adult') else None,
173 })
c88debff 174 return info
217e5173
S
175
176
177class NaverLiveIE(InfoExtractor):
178 IE_NAME = 'Naver:live'
179 _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/l/(?P<id>\d+)'
180 _GEO_BYPASS = False
181 _TESTS = [{
182 'url': 'https://tv.naver.com/l/52010',
183 'info_dict': {
184 'id': '52010',
652fb0d4 185 'ext': 'mp4',
217e5173
S
186 'title': '[LIVE] 뉴스특보 : "수도권 거리두기, 2주간 2단계로 조정"',
187 'description': 'md5:df7f0c237a5ed5e786ce5c91efbeaab3',
188 'channel_id': 'NTV-ytnnews24-0',
189 'start_time': 1597026780000,
190 },
191 }, {
192 'url': 'https://tv.naver.com/l/51549',
193 'info_dict': {
194 'id': '51549',
652fb0d4 195 'ext': 'mp4',
217e5173
S
196 'title': '연합뉴스TV - 코로나19 뉴스특보',
197 'description': 'md5:c655e82091bc21e413f549c0eaccc481',
198 'channel_id': 'NTV-yonhapnewstv-0',
199 'start_time': 1596406380000,
200 },
201 }, {
202 'url': 'https://tv.naver.com/l/54887',
203 'only_matching': True,
204 }]
205
206 def _real_extract(self, url):
207 video_id = self._match_id(url)
208 page = self._download_webpage(url, video_id, 'Downloading Page', 'Unable to download Page')
209 secure_url = self._search_regex(r'sApiF:\s+(?:"|\')([^"\']+)', page, 'secureurl')
210
211 info = self._extract_video_info(video_id, secure_url)
212 info.update({
213 'description': self._og_search_description(page)
214 })
215
216 return info
217
218 def _extract_video_info(self, video_id, url):
219 video_data = self._download_json(url, video_id, headers=self.geo_verification_headers())
220 meta = video_data.get('meta')
221 status = meta.get('status')
222
223 if status == 'CLOSED':
224 raise ExtractorError('Stream is offline.', expected=True)
225 elif status != 'OPENED':
226 raise ExtractorError('Unknown status %s' % status)
227
228 title = meta.get('title')
229 stream_list = video_data.get('streams')
230
231 if stream_list is None:
232 raise ExtractorError('Could not get stream data.', expected=True)
233
234 formats = []
235 for quality in stream_list:
236 if not quality.get('url'):
237 continue
238
239 prop = quality.get('property')
240 if prop.get('abr'): # This abr doesn't mean Average audio bitrate.
241 continue
242
243 formats.extend(self._extract_m3u8_formats(
652fb0d4 244 quality.get('url'), video_id, 'mp4',
217e5173
S
245 m3u8_id=quality.get('qualityId'), live=True
246 ))
217e5173
S
247
248 return {
249 'id': video_id,
250 'title': title,
251 'formats': formats,
252 'channel_id': meta.get('channelId'),
253 'channel_url': meta.get('channelUrl'),
254 'thumbnail': meta.get('imgUrl'),
255 'start_time': meta.get('startTime'),
256 'categories': [meta.get('categoryId')],
257 'is_live': True
258 }
14c3a980 259
260
261class NaverNowIE(NaverBaseIE):
262 IE_NAME = 'navernow'
d761dfd0 263 _VALID_URL = r'https?://now\.naver\.com/s/now\.(?P<id>\w+)'
bfbb5a1b 264 _API_URL = 'https://apis.naver.com/now_web/oldnow_web/v4'
14c3a980 265 _TESTS = [{
bfbb5a1b 266 'url': 'https://now.naver.com/s/now.4759?shareReplayId=26331132#replay=',
14c3a980 267 'md5': 'e05854162c21c221481de16b2944a0bc',
268 'info_dict': {
bfbb5a1b 269 'id': '4759-26331132',
14c3a980 270 'title': '아이키X노제\r\n💖꽁냥꽁냥💖(1)',
271 'ext': 'mp4',
272 'thumbnail': r're:^https?://.*\.jpg',
273 'timestamp': 1650369600,
274 'upload_date': '20220419',
275 'uploader_id': 'now',
276 'view_count': int,
bfbb5a1b 277 'uploader_url': 'https://now.naver.com/show/4759',
278 'uploader': '아이키의 떰즈업',
14c3a980 279 },
280 'params': {
281 'noplaylist': True,
282 }
283 }, {
bfbb5a1b 284 'url': 'https://now.naver.com/s/now.4759?shareHightlight=26601461#highlight=',
14c3a980 285 'md5': '9f6118e398aa0f22b2152f554ea7851b',
286 'info_dict': {
bfbb5a1b 287 'id': '4759-26601461',
14c3a980 288 'title': '아이키: 나 리정한테 흔들렸어,,, 질투 폭발하는 노제 여보😾 [아이키의 떰즈업]ㅣ네이버 NOW.',
289 'ext': 'mp4',
290 'thumbnail': r're:^https?://.*\.jpg',
291 'upload_date': '20220504',
bfbb5a1b 292 'timestamp': 1651648311,
14c3a980 293 'uploader_id': 'now',
294 'view_count': int,
bfbb5a1b 295 'uploader_url': 'https://now.naver.com/show/4759',
296 'uploader': '아이키의 떰즈업',
14c3a980 297 },
298 'params': {
299 'noplaylist': True,
300 },
301 }, {
bfbb5a1b 302 'url': 'https://now.naver.com/s/now.4759',
14c3a980 303 'info_dict': {
304 'id': '4759',
305 'title': '아이키의 떰즈업',
306 },
bfbb5a1b 307 'playlist_mincount': 101
14c3a980 308 }, {
bfbb5a1b 309 'url': 'https://now.naver.com/s/now.4759?shareReplayId=26331132#replay',
14c3a980 310 'info_dict': {
311 'id': '4759',
312 'title': '아이키의 떰즈업',
313 },
bfbb5a1b 314 'playlist_mincount': 101,
14c3a980 315 }, {
bfbb5a1b 316 'url': 'https://now.naver.com/s/now.4759?shareHightlight=26601461#highlight=',
14c3a980 317 'info_dict': {
318 'id': '4759',
319 'title': '아이키의 떰즈업',
320 },
bfbb5a1b 321 'playlist_mincount': 101,
d761dfd0 322 }, {
323 'url': 'https://now.naver.com/s/now.kihyunplay?shareReplayId=30573291#replay',
324 'only_matching': True,
14c3a980 325 }]
326
327 def _extract_replay(self, show_id, replay_id):
bfbb5a1b 328 vod_info = self._download_json(f'{self._API_URL}/shows/now.{show_id}/vod/{replay_id}', replay_id)
329 in_key = self._download_json(f'{self._API_URL}/shows/now.{show_id}/vod/{replay_id}/inkey', replay_id)['inKey']
14c3a980 330 return merge_dicts({
331 'id': f'{show_id}-{replay_id}',
332 'title': traverse_obj(vod_info, ('episode', 'title')),
333 'timestamp': unified_timestamp(traverse_obj(vod_info, ('episode', 'start_time'))),
334 'thumbnail': vod_info.get('thumbnail_image_url'),
335 }, self._extract_video_info(replay_id, vod_info['video_id'], in_key))
336
337 def _extract_show_replays(self, show_id):
bfbb5a1b 338 page_size = 15
339 page = 1
14c3a980 340 while True:
341 show_vod_info = self._download_json(
bfbb5a1b 342 f'{self._API_URL}/vod-shows/now.{show_id}', show_id,
343 query={'page': page, 'page_size': page_size},
14c3a980 344 note=f'Downloading JSON vod list for show {show_id} - page {page}'
345 )['response']['result']
346 for v in show_vod_info.get('vod_list') or []:
347 yield self._extract_replay(show_id, v['id'])
348
bfbb5a1b 349 if len(show_vod_info.get('vod_list') or []) < page_size:
14c3a980 350 break
351 page += 1
352
353 def _extract_show_highlights(self, show_id, highlight_id=None):
bfbb5a1b 354 page_size = 10
355 page = 1
14c3a980 356 while True:
357 highlights_videos = self._download_json(
bfbb5a1b 358 f'{self._API_URL}/shows/now.{show_id}/highlights/videos/', show_id,
359 query={'page': page, 'page_size': page_size},
14c3a980 360 note=f'Downloading JSON highlights for show {show_id} - page {page}')
361
362 for highlight in highlights_videos.get('results') or []:
bfbb5a1b 363 if highlight_id and highlight.get('clip_no') != int(highlight_id):
14c3a980 364 continue
365 yield merge_dicts({
bfbb5a1b 366 'id': f'{show_id}-{highlight["clip_no"]}',
14c3a980 367 'title': highlight.get('title'),
368 'timestamp': unified_timestamp(highlight.get('regdate')),
369 'thumbnail': highlight.get('thumbnail_url'),
bfbb5a1b 370 }, self._extract_video_info(highlight['clip_no'], highlight['video_id'], highlight['video_inkey']))
14c3a980 371
bfbb5a1b 372 if len(highlights_videos.get('results') or []) < page_size:
14c3a980 373 break
374 page += 1
375
376 def _extract_highlight(self, show_id, highlight_id):
377 try:
378 return next(self._extract_show_highlights(show_id, highlight_id))
379 except StopIteration:
380 raise ExtractorError(f'Unable to find highlight {highlight_id} for show {show_id}')
381
382 def _real_extract(self, url):
383 show_id = self._match_id(url)
384 qs = parse_qs(urlparse(url).query)
385
386 if not self._yes_playlist(show_id, qs.get('shareHightlight')):
387 return self._extract_highlight(show_id, qs['shareHightlight'][0])
388 elif not self._yes_playlist(show_id, qs.get('shareReplayId')):
389 return self._extract_replay(show_id, qs['shareReplayId'][0])
390
391 show_info = self._download_json(
bfbb5a1b 392 f'{self._API_URL}/shows/now.{show_id}/', show_id,
14c3a980 393 note=f'Downloading JSON vod list for show {show_id}')
394
395 return self.playlist_result(
396 itertools.chain(self._extract_show_replays(show_id), self._extract_show_highlights(show_id)),
397 show_id, show_info.get('title'))