]>
Commit | Line | Data |
---|---|---|
1 | # coding: utf-8 | |
2 | from __future__ import unicode_literals | |
3 | ||
4 | import re | |
5 | ||
6 | from .common import InfoExtractor | |
7 | from ..utils import ( | |
8 | clean_html, | |
9 | dict_get, | |
10 | ExtractorError, | |
11 | int_or_none, | |
12 | parse_duration, | |
13 | try_get, | |
14 | update_url_query, | |
15 | ) | |
16 | ||
17 | ||
18 | class NaverBaseIE(InfoExtractor): | |
19 | _CAPTION_EXT_RE = r'\.(?:ttml|vtt)' | |
20 | ||
21 | def _extract_video_info(self, video_id, vid, key): | |
22 | video_data = self._download_json( | |
23 | 'http://play.rmcnmv.naver.com/vod/play/v2.0/' + vid, | |
24 | video_id, query={ | |
25 | 'key': key, | |
26 | }) | |
27 | meta = video_data['meta'] | |
28 | title = meta['subject'] | |
29 | formats = [] | |
30 | get_list = lambda x: try_get(video_data, lambda y: y[x + 's']['list'], list) or [] | |
31 | ||
32 | def extract_formats(streams, stream_type, query={}): | |
33 | for stream in streams: | |
34 | stream_url = stream.get('source') | |
35 | if not stream_url: | |
36 | continue | |
37 | stream_url = update_url_query(stream_url, query) | |
38 | encoding_option = stream.get('encodingOption', {}) | |
39 | bitrate = stream.get('bitrate', {}) | |
40 | formats.append({ | |
41 | 'format_id': '%s_%s' % (stream.get('type') or stream_type, dict_get(encoding_option, ('name', 'id'))), | |
42 | 'url': stream_url, | |
43 | 'width': int_or_none(encoding_option.get('width')), | |
44 | 'height': int_or_none(encoding_option.get('height')), | |
45 | 'vbr': int_or_none(bitrate.get('video')), | |
46 | 'abr': int_or_none(bitrate.get('audio')), | |
47 | 'filesize': int_or_none(stream.get('size')), | |
48 | 'protocol': 'm3u8_native' if stream_type == 'HLS' else None, | |
49 | }) | |
50 | ||
51 | extract_formats(get_list('video'), 'H264') | |
52 | for stream_set in video_data.get('streams', []): | |
53 | query = {} | |
54 | for param in stream_set.get('keys', []): | |
55 | query[param['name']] = param['value'] | |
56 | stream_type = stream_set.get('type') | |
57 | videos = stream_set.get('videos') | |
58 | if videos: | |
59 | extract_formats(videos, stream_type, query) | |
60 | elif stream_type == 'HLS': | |
61 | stream_url = stream_set.get('source') | |
62 | if not stream_url: | |
63 | continue | |
64 | formats.extend(self._extract_m3u8_formats( | |
65 | update_url_query(stream_url, query), video_id, | |
66 | 'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False)) | |
67 | self._sort_formats(formats) | |
68 | ||
69 | replace_ext = lambda x, y: re.sub(self._CAPTION_EXT_RE, '.' + y, x) | |
70 | ||
71 | def get_subs(caption_url): | |
72 | if re.search(self._CAPTION_EXT_RE, caption_url): | |
73 | return [{ | |
74 | 'url': replace_ext(caption_url, 'ttml'), | |
75 | }, { | |
76 | 'url': replace_ext(caption_url, 'vtt'), | |
77 | }] | |
78 | else: | |
79 | return [{'url': caption_url}] | |
80 | ||
81 | automatic_captions = {} | |
82 | subtitles = {} | |
83 | for caption in get_list('caption'): | |
84 | caption_url = caption.get('source') | |
85 | if not caption_url: | |
86 | continue | |
87 | sub_dict = automatic_captions if caption.get('type') == 'auto' else subtitles | |
88 | sub_dict.setdefault(dict_get(caption, ('locale', 'language')), []).extend(get_subs(caption_url)) | |
89 | ||
90 | user = meta.get('user', {}) | |
91 | ||
92 | return { | |
93 | 'id': video_id, | |
94 | 'title': title, | |
95 | 'formats': formats, | |
96 | 'subtitles': subtitles, | |
97 | 'automatic_captions': automatic_captions, | |
98 | 'thumbnail': try_get(meta, lambda x: x['cover']['source']), | |
99 | 'view_count': int_or_none(meta.get('count')), | |
100 | 'uploader_id': user.get('id'), | |
101 | 'uploader': user.get('name'), | |
102 | 'uploader_url': user.get('url'), | |
103 | } | |
104 | ||
105 | ||
106 | class NaverIE(NaverBaseIE): | |
107 | _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/(?:v|embed)/(?P<id>\d+)' | |
108 | _GEO_BYPASS = False | |
109 | _TESTS = [{ | |
110 | 'url': 'http://tv.naver.com/v/81652', | |
111 | 'info_dict': { | |
112 | 'id': '81652', | |
113 | 'ext': 'mp4', | |
114 | 'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번', | |
115 | 'description': '메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.', | |
116 | 'timestamp': 1378200754, | |
117 | 'upload_date': '20130903', | |
118 | 'uploader': '메가스터디, 합격불변의 법칙', | |
119 | 'uploader_id': 'megastudy', | |
120 | }, | |
121 | }, { | |
122 | 'url': 'http://tv.naver.com/v/395837', | |
123 | 'md5': '8a38e35354d26a17f73f4e90094febd3', | |
124 | 'info_dict': { | |
125 | 'id': '395837', | |
126 | 'ext': 'mp4', | |
127 | 'title': '9년이 지나도 아픈 기억, 전효성의 아버지', | |
128 | 'description': 'md5:eb6aca9d457b922e43860a2a2b1984d3', | |
129 | 'timestamp': 1432030253, | |
130 | 'upload_date': '20150519', | |
131 | 'uploader': '4가지쇼 시즌2', | |
132 | 'uploader_id': 'wrappinguser29', | |
133 | }, | |
134 | 'skip': 'Georestricted', | |
135 | }, { | |
136 | 'url': 'http://tvcast.naver.com/v/81652', | |
137 | 'only_matching': True, | |
138 | }] | |
139 | ||
140 | def _real_extract(self, url): | |
141 | video_id = self._match_id(url) | |
142 | content = self._download_json( | |
143 | 'https://tv.naver.com/api/json/v/' + video_id, | |
144 | video_id, headers=self.geo_verification_headers()) | |
145 | player_info_json = content.get('playerInfoJson') or {} | |
146 | current_clip = player_info_json.get('currentClip') or {} | |
147 | ||
148 | vid = current_clip.get('videoId') | |
149 | in_key = current_clip.get('inKey') | |
150 | ||
151 | if not vid or not in_key: | |
152 | player_auth = try_get(player_info_json, lambda x: x['playerOption']['auth']) | |
153 | if player_auth == 'notCountry': | |
154 | self.raise_geo_restricted(countries=['KR']) | |
155 | elif player_auth == 'notLogin': | |
156 | self.raise_login_required() | |
157 | raise ExtractorError('couldn\'t extract vid and key') | |
158 | info = self._extract_video_info(video_id, vid, in_key) | |
159 | info.update({ | |
160 | 'description': clean_html(current_clip.get('description')), | |
161 | 'timestamp': int_or_none(current_clip.get('firstExposureTime'), 1000), | |
162 | 'duration': parse_duration(current_clip.get('displayPlayTime')), | |
163 | 'like_count': int_or_none(current_clip.get('recommendPoint')), | |
164 | 'age_limit': 19 if current_clip.get('adult') else None, | |
165 | }) | |
166 | return info | |
167 | ||
168 | ||
169 | class NaverLiveIE(InfoExtractor): | |
170 | IE_NAME = 'Naver:live' | |
171 | _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/l/(?P<id>\d+)' | |
172 | _GEO_BYPASS = False | |
173 | _TESTS = [{ | |
174 | 'url': 'https://tv.naver.com/l/52010', | |
175 | 'info_dict': { | |
176 | 'id': '52010', | |
177 | 'ext': 'm3u8', | |
178 | 'title': '[LIVE] 뉴스특보 : "수도권 거리두기, 2주간 2단계로 조정"', | |
179 | 'description': 'md5:df7f0c237a5ed5e786ce5c91efbeaab3', | |
180 | 'channel_id': 'NTV-ytnnews24-0', | |
181 | 'start_time': 1597026780000, | |
182 | }, | |
183 | }, { | |
184 | 'url': 'https://tv.naver.com/l/51549', | |
185 | 'info_dict': { | |
186 | 'id': '51549', | |
187 | 'ext': 'm3u8', | |
188 | 'title': '연합뉴스TV - 코로나19 뉴스특보', | |
189 | 'description': 'md5:c655e82091bc21e413f549c0eaccc481', | |
190 | 'channel_id': 'NTV-yonhapnewstv-0', | |
191 | 'start_time': 1596406380000, | |
192 | }, | |
193 | }, { | |
194 | 'url': 'https://tv.naver.com/l/54887', | |
195 | 'only_matching': True, | |
196 | }] | |
197 | ||
198 | def _real_extract(self, url): | |
199 | video_id = self._match_id(url) | |
200 | page = self._download_webpage(url, video_id, 'Downloading Page', 'Unable to download Page') | |
201 | secure_url = self._search_regex(r'sApiF:\s+(?:"|\')([^"\']+)', page, 'secureurl') | |
202 | ||
203 | info = self._extract_video_info(video_id, secure_url) | |
204 | info.update({ | |
205 | 'description': self._og_search_description(page) | |
206 | }) | |
207 | ||
208 | return info | |
209 | ||
210 | def _extract_video_info(self, video_id, url): | |
211 | video_data = self._download_json(url, video_id, headers=self.geo_verification_headers()) | |
212 | meta = video_data.get('meta') | |
213 | status = meta.get('status') | |
214 | ||
215 | if status == 'CLOSED': | |
216 | raise ExtractorError('Stream is offline.', expected=True) | |
217 | elif status != 'OPENED': | |
218 | raise ExtractorError('Unknown status %s' % status) | |
219 | ||
220 | title = meta.get('title') | |
221 | stream_list = video_data.get('streams') | |
222 | ||
223 | if stream_list is None: | |
224 | raise ExtractorError('Could not get stream data.', expected=True) | |
225 | ||
226 | formats = [] | |
227 | for quality in stream_list: | |
228 | if not quality.get('url'): | |
229 | continue | |
230 | ||
231 | prop = quality.get('property') | |
232 | if prop.get('abr'): # This abr doesn't mean Average audio bitrate. | |
233 | continue | |
234 | ||
235 | formats.extend(self._extract_m3u8_formats( | |
236 | quality.get('url'), video_id, 'm3u8', | |
237 | m3u8_id=quality.get('qualityId'), live=True | |
238 | )) | |
239 | self._sort_formats(formats) | |
240 | ||
241 | return { | |
242 | 'id': video_id, | |
243 | 'title': title, | |
244 | 'formats': formats, | |
245 | 'channel_id': meta.get('channelId'), | |
246 | 'channel_url': meta.get('channelUrl'), | |
247 | 'thumbnail': meta.get('imgUrl'), | |
248 | 'start_time': meta.get('startTime'), | |
249 | 'categories': [meta.get('categoryId')], | |
250 | 'is_live': True | |
251 | } |