]> jfr.im git - yt-dlp.git/blame - yt_dlp/extractor/naver.py
[extractor/NaverNow] Change endpoint (#4457)
[yt-dlp.git] / yt_dlp / extractor / naver.py
CommitLineData
14c3a980 1import itertools
c88debff 2import re
14c3a980 3from urllib.parse import urlparse, parse_qs
c88debff 4
6b95b065 5from .common import InfoExtractor
1cc79574 6from ..utils import (
14c3a980 7 ExtractorError,
c88debff
RA
8 clean_html,
9 dict_get,
b02b960c 10 int_or_none,
14c3a980 11 merge_dicts,
83817163 12 parse_duration,
14c3a980 13 traverse_obj,
c88debff 14 try_get,
14c3a980 15 unified_timestamp,
b02b960c 16 update_url_query,
6b95b065
JMF
17)
18
19
c88debff
RA
20class NaverBaseIE(InfoExtractor):
21 _CAPTION_EXT_RE = r'\.(?:ttml|vtt)'
190f6c93 22
c88debff 23 def _extract_video_info(self, video_id, vid, key):
f65dc41b 24 video_data = self._download_json(
190f6c93 25 'http://play.rmcnmv.naver.com/vod/play/v2.0/' + vid,
f65dc41b 26 video_id, query={
c88debff 27 'key': key,
f65dc41b 28 })
b02b960c
RA
29 meta = video_data['meta']
30 title = meta['subject']
6b95b065 31 formats = []
c88debff 32 get_list = lambda x: try_get(video_data, lambda y: y[x + 's']['list'], list) or []
b02b960c
RA
33
34 def extract_formats(streams, stream_type, query={}):
35 for stream in streams:
36 stream_url = stream.get('source')
37 if not stream_url:
38 continue
39 stream_url = update_url_query(stream_url, query)
40 encoding_option = stream.get('encodingOption', {})
41 bitrate = stream.get('bitrate', {})
42 formats.append({
c88debff 43 'format_id': '%s_%s' % (stream.get('type') or stream_type, dict_get(encoding_option, ('name', 'id'))),
b02b960c 44 'url': stream_url,
652fb0d4 45 'ext': 'mp4',
b02b960c
RA
46 'width': int_or_none(encoding_option.get('width')),
47 'height': int_or_none(encoding_option.get('height')),
48 'vbr': int_or_none(bitrate.get('video')),
49 'abr': int_or_none(bitrate.get('audio')),
50 'filesize': int_or_none(stream.get('size')),
51 'protocol': 'm3u8_native' if stream_type == 'HLS' else None,
087ca2cb 52 })
b02b960c 53
c88debff 54 extract_formats(get_list('video'), 'H264')
b02b960c
RA
55 for stream_set in video_data.get('streams', []):
56 query = {}
57 for param in stream_set.get('keys', []):
58 query[param['name']] = param['value']
59 stream_type = stream_set.get('type')
60 videos = stream_set.get('videos')
61 if videos:
62 extract_formats(videos, stream_type, query)
63 elif stream_type == 'HLS':
64 stream_url = stream_set.get('source')
65 if not stream_url:
66 continue
67 formats.extend(self._extract_m3u8_formats(
68 update_url_query(stream_url, query), video_id,
69 'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False))
087ca2cb 70 self._sort_formats(formats)
6b95b065 71
c88debff
RA
72 replace_ext = lambda x, y: re.sub(self._CAPTION_EXT_RE, '.' + y, x)
73
74 def get_subs(caption_url):
75 if re.search(self._CAPTION_EXT_RE, caption_url):
76 return [{
77 'url': replace_ext(caption_url, 'ttml'),
78 }, {
79 'url': replace_ext(caption_url, 'vtt'),
80 }]
81 else:
82 return [{'url': caption_url}]
83
84 automatic_captions = {}
b02b960c 85 subtitles = {}
c88debff 86 for caption in get_list('caption'):
b02b960c
RA
87 caption_url = caption.get('source')
88 if not caption_url:
89 continue
c88debff
RA
90 sub_dict = automatic_captions if caption.get('type') == 'auto' else subtitles
91 sub_dict.setdefault(dict_get(caption, ('locale', 'language')), []).extend(get_subs(caption_url))
b02b960c 92
c88debff 93 user = meta.get('user', {})
f65dc41b 94
fb7abb31 95 return {
6b95b065 96 'id': video_id,
b02b960c 97 'title': title,
6b95b065 98 'formats': formats,
b02b960c 99 'subtitles': subtitles,
c88debff
RA
100 'automatic_captions': automatic_captions,
101 'thumbnail': try_get(meta, lambda x: x['cover']['source']),
b02b960c 102 'view_count': int_or_none(meta.get('count')),
c88debff
RA
103 'uploader_id': user.get('id'),
104 'uploader': user.get('name'),
105 'uploader_url': user.get('url'),
6b95b065 106 }
c88debff
RA
107
108
109class NaverIE(NaverBaseIE):
110 _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/(?:v|embed)/(?P<id>\d+)'
111 _GEO_BYPASS = False
112 _TESTS = [{
113 'url': 'http://tv.naver.com/v/81652',
114 'info_dict': {
115 'id': '81652',
116 'ext': 'mp4',
117 'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
118 'description': '메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
83817163 119 'timestamp': 1378200754,
c88debff
RA
120 'upload_date': '20130903',
121 'uploader': '메가스터디, 합격불변의 법칙',
122 'uploader_id': 'megastudy',
123 },
124 }, {
125 'url': 'http://tv.naver.com/v/395837',
126 'md5': '8a38e35354d26a17f73f4e90094febd3',
127 'info_dict': {
128 'id': '395837',
129 'ext': 'mp4',
130 'title': '9년이 지나도 아픈 기억, 전효성의 아버지',
131 'description': 'md5:eb6aca9d457b922e43860a2a2b1984d3',
83817163 132 'timestamp': 1432030253,
c88debff
RA
133 'upload_date': '20150519',
134 'uploader': '4가지쇼 시즌2',
135 'uploader_id': 'wrappinguser29',
136 },
137 'skip': 'Georestricted',
138 }, {
139 'url': 'http://tvcast.naver.com/v/81652',
140 'only_matching': True,
141 }]
142
143 def _real_extract(self, url):
144 video_id = self._match_id(url)
145 content = self._download_json(
83817163 146 'https://tv.naver.com/api/json/v/' + video_id,
c88debff 147 video_id, headers=self.geo_verification_headers())
83817163
RA
148 player_info_json = content.get('playerInfoJson') or {}
149 current_clip = player_info_json.get('currentClip') or {}
c88debff 150
83817163
RA
151 vid = current_clip.get('videoId')
152 in_key = current_clip.get('inKey')
c88debff
RA
153
154 if not vid or not in_key:
83817163 155 player_auth = try_get(player_info_json, lambda x: x['playerOption']['auth'])
c88debff
RA
156 if player_auth == 'notCountry':
157 self.raise_geo_restricted(countries=['KR'])
158 elif player_auth == 'notLogin':
159 self.raise_login_required()
160 raise ExtractorError('couldn\'t extract vid and key')
161 info = self._extract_video_info(video_id, vid, in_key)
83817163
RA
162 info.update({
163 'description': clean_html(current_clip.get('description')),
164 'timestamp': int_or_none(current_clip.get('firstExposureTime'), 1000),
165 'duration': parse_duration(current_clip.get('displayPlayTime')),
166 'like_count': int_or_none(current_clip.get('recommendPoint')),
167 'age_limit': 19 if current_clip.get('adult') else None,
168 })
c88debff 169 return info
217e5173
S
170
171
172class NaverLiveIE(InfoExtractor):
173 IE_NAME = 'Naver:live'
174 _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/l/(?P<id>\d+)'
175 _GEO_BYPASS = False
176 _TESTS = [{
177 'url': 'https://tv.naver.com/l/52010',
178 'info_dict': {
179 'id': '52010',
652fb0d4 180 'ext': 'mp4',
217e5173
S
181 'title': '[LIVE] 뉴스특보 : "수도권 거리두기, 2주간 2단계로 조정"',
182 'description': 'md5:df7f0c237a5ed5e786ce5c91efbeaab3',
183 'channel_id': 'NTV-ytnnews24-0',
184 'start_time': 1597026780000,
185 },
186 }, {
187 'url': 'https://tv.naver.com/l/51549',
188 'info_dict': {
189 'id': '51549',
652fb0d4 190 'ext': 'mp4',
217e5173
S
191 'title': '연합뉴스TV - 코로나19 뉴스특보',
192 'description': 'md5:c655e82091bc21e413f549c0eaccc481',
193 'channel_id': 'NTV-yonhapnewstv-0',
194 'start_time': 1596406380000,
195 },
196 }, {
197 'url': 'https://tv.naver.com/l/54887',
198 'only_matching': True,
199 }]
200
201 def _real_extract(self, url):
202 video_id = self._match_id(url)
203 page = self._download_webpage(url, video_id, 'Downloading Page', 'Unable to download Page')
204 secure_url = self._search_regex(r'sApiF:\s+(?:"|\')([^"\']+)', page, 'secureurl')
205
206 info = self._extract_video_info(video_id, secure_url)
207 info.update({
208 'description': self._og_search_description(page)
209 })
210
211 return info
212
213 def _extract_video_info(self, video_id, url):
214 video_data = self._download_json(url, video_id, headers=self.geo_verification_headers())
215 meta = video_data.get('meta')
216 status = meta.get('status')
217
218 if status == 'CLOSED':
219 raise ExtractorError('Stream is offline.', expected=True)
220 elif status != 'OPENED':
221 raise ExtractorError('Unknown status %s' % status)
222
223 title = meta.get('title')
224 stream_list = video_data.get('streams')
225
226 if stream_list is None:
227 raise ExtractorError('Could not get stream data.', expected=True)
228
229 formats = []
230 for quality in stream_list:
231 if not quality.get('url'):
232 continue
233
234 prop = quality.get('property')
235 if prop.get('abr'): # This abr doesn't mean Average audio bitrate.
236 continue
237
238 formats.extend(self._extract_m3u8_formats(
652fb0d4 239 quality.get('url'), video_id, 'mp4',
217e5173
S
240 m3u8_id=quality.get('qualityId'), live=True
241 ))
242 self._sort_formats(formats)
243
244 return {
245 'id': video_id,
246 'title': title,
247 'formats': formats,
248 'channel_id': meta.get('channelId'),
249 'channel_url': meta.get('channelUrl'),
250 'thumbnail': meta.get('imgUrl'),
251 'start_time': meta.get('startTime'),
252 'categories': [meta.get('categoryId')],
253 'is_live': True
254 }
14c3a980 255
256
257class NaverNowIE(NaverBaseIE):
258 IE_NAME = 'navernow'
bfbb5a1b 259 _VALID_URL = r'https?://now\.naver\.com/s/now\.(?P<id>[0-9]+)'
260 _API_URL = 'https://apis.naver.com/now_web/oldnow_web/v4'
14c3a980 261 _TESTS = [{
bfbb5a1b 262 'url': 'https://now.naver.com/s/now.4759?shareReplayId=26331132#replay=',
14c3a980 263 'md5': 'e05854162c21c221481de16b2944a0bc',
264 'info_dict': {
bfbb5a1b 265 'id': '4759-26331132',
14c3a980 266 'title': '아이키X노제\r\n💖꽁냥꽁냥💖(1)',
267 'ext': 'mp4',
268 'thumbnail': r're:^https?://.*\.jpg',
269 'timestamp': 1650369600,
270 'upload_date': '20220419',
271 'uploader_id': 'now',
272 'view_count': int,
bfbb5a1b 273 'uploader_url': 'https://now.naver.com/show/4759',
274 'uploader': '아이키의 떰즈업',
14c3a980 275 },
276 'params': {
277 'noplaylist': True,
278 }
279 }, {
bfbb5a1b 280 'url': 'https://now.naver.com/s/now.4759?shareHightlight=26601461#highlight=',
14c3a980 281 'md5': '9f6118e398aa0f22b2152f554ea7851b',
282 'info_dict': {
bfbb5a1b 283 'id': '4759-26601461',
14c3a980 284 'title': '아이키: 나 리정한테 흔들렸어,,, 질투 폭발하는 노제 여보😾 [아이키의 떰즈업]ㅣ네이버 NOW.',
285 'ext': 'mp4',
286 'thumbnail': r're:^https?://.*\.jpg',
287 'upload_date': '20220504',
bfbb5a1b 288 'timestamp': 1651648311,
14c3a980 289 'uploader_id': 'now',
290 'view_count': int,
bfbb5a1b 291 'uploader_url': 'https://now.naver.com/show/4759',
292 'uploader': '아이키의 떰즈업',
14c3a980 293 },
294 'params': {
295 'noplaylist': True,
296 },
297 }, {
bfbb5a1b 298 'url': 'https://now.naver.com/s/now.4759',
14c3a980 299 'info_dict': {
300 'id': '4759',
301 'title': '아이키의 떰즈업',
302 },
bfbb5a1b 303 'playlist_mincount': 101
14c3a980 304 }, {
bfbb5a1b 305 'url': 'https://now.naver.com/s/now.4759?shareReplayId=26331132#replay',
14c3a980 306 'info_dict': {
307 'id': '4759',
308 'title': '아이키의 떰즈업',
309 },
bfbb5a1b 310 'playlist_mincount': 101,
14c3a980 311 }, {
bfbb5a1b 312 'url': 'https://now.naver.com/s/now.4759?shareHightlight=26601461#highlight=',
14c3a980 313 'info_dict': {
314 'id': '4759',
315 'title': '아이키의 떰즈업',
316 },
bfbb5a1b 317 'playlist_mincount': 101,
14c3a980 318 }]
319
320 def _extract_replay(self, show_id, replay_id):
bfbb5a1b 321 vod_info = self._download_json(f'{self._API_URL}/shows/now.{show_id}/vod/{replay_id}', replay_id)
322 in_key = self._download_json(f'{self._API_URL}/shows/now.{show_id}/vod/{replay_id}/inkey', replay_id)['inKey']
14c3a980 323 return merge_dicts({
324 'id': f'{show_id}-{replay_id}',
325 'title': traverse_obj(vod_info, ('episode', 'title')),
326 'timestamp': unified_timestamp(traverse_obj(vod_info, ('episode', 'start_time'))),
327 'thumbnail': vod_info.get('thumbnail_image_url'),
328 }, self._extract_video_info(replay_id, vod_info['video_id'], in_key))
329
330 def _extract_show_replays(self, show_id):
bfbb5a1b 331 page_size = 15
332 page = 1
14c3a980 333 while True:
334 show_vod_info = self._download_json(
bfbb5a1b 335 f'{self._API_URL}/vod-shows/now.{show_id}', show_id,
336 query={'page': page, 'page_size': page_size},
14c3a980 337 note=f'Downloading JSON vod list for show {show_id} - page {page}'
338 )['response']['result']
339 for v in show_vod_info.get('vod_list') or []:
340 yield self._extract_replay(show_id, v['id'])
341
bfbb5a1b 342 if len(show_vod_info.get('vod_list') or []) < page_size:
14c3a980 343 break
344 page += 1
345
346 def _extract_show_highlights(self, show_id, highlight_id=None):
bfbb5a1b 347 page_size = 10
348 page = 1
14c3a980 349 while True:
350 highlights_videos = self._download_json(
bfbb5a1b 351 f'{self._API_URL}/shows/now.{show_id}/highlights/videos/', show_id,
352 query={'page': page, 'page_size': page_size},
14c3a980 353 note=f'Downloading JSON highlights for show {show_id} - page {page}')
354
355 for highlight in highlights_videos.get('results') or []:
bfbb5a1b 356 if highlight_id and highlight.get('clip_no') != int(highlight_id):
14c3a980 357 continue
358 yield merge_dicts({
bfbb5a1b 359 'id': f'{show_id}-{highlight["clip_no"]}',
14c3a980 360 'title': highlight.get('title'),
361 'timestamp': unified_timestamp(highlight.get('regdate')),
362 'thumbnail': highlight.get('thumbnail_url'),
bfbb5a1b 363 }, self._extract_video_info(highlight['clip_no'], highlight['video_id'], highlight['video_inkey']))
14c3a980 364
bfbb5a1b 365 if len(highlights_videos.get('results') or []) < page_size:
14c3a980 366 break
367 page += 1
368
369 def _extract_highlight(self, show_id, highlight_id):
370 try:
371 return next(self._extract_show_highlights(show_id, highlight_id))
372 except StopIteration:
373 raise ExtractorError(f'Unable to find highlight {highlight_id} for show {show_id}')
374
375 def _real_extract(self, url):
376 show_id = self._match_id(url)
377 qs = parse_qs(urlparse(url).query)
378
379 if not self._yes_playlist(show_id, qs.get('shareHightlight')):
380 return self._extract_highlight(show_id, qs['shareHightlight'][0])
381 elif not self._yes_playlist(show_id, qs.get('shareReplayId')):
382 return self._extract_replay(show_id, qs['shareReplayId'][0])
383
384 show_info = self._download_json(
bfbb5a1b 385 f'{self._API_URL}/shows/now.{show_id}/', show_id,
14c3a980 386 note=f'Downloading JSON vod list for show {show_id}')
387
388 return self.playlist_result(
389 itertools.chain(self._extract_show_replays(show_id), self._extract_show_highlights(show_id)),
390 show_id, show_info.get('title'))