]>
Commit | Line | Data |
---|---|---|
c5e8d7af | 1 | # coding: utf-8 |
c5e8d7af PH |
2 | |
3 | import json | |
4 | import netrc | |
5 | import re | |
6 | import socket | |
7 | ||
b05654f0 | 8 | from .common import InfoExtractor, SearchInfoExtractor |
c5e8d7af PH |
9 | from ..utils import ( |
10 | compat_http_client, | |
11 | compat_parse_qs, | |
12 | compat_urllib_error, | |
13 | compat_urllib_parse, | |
14 | compat_urllib_request, | |
15 | compat_str, | |
16 | ||
17 | clean_html, | |
18 | get_element_by_id, | |
19 | ExtractorError, | |
20 | unescapeHTML, | |
21 | unified_strdate, | |
22 | ) | |
23 | ||
24 | ||
25 | class YoutubeIE(InfoExtractor): | |
26 | """Information extractor for youtube.com.""" | |
27 | ||
28 | _VALID_URL = r"""^ | |
29 | ( | |
30 | (?:https?://)? # http(s):// (optional) | |
31 | (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/| | |
32 | tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains | |
33 | (?:.*?\#/)? # handle anchor (#/) redirect urls | |
34 | (?: # the various things that can precede the ID: | |
35 | (?:(?:v|embed|e)/) # v/ or embed/ or e/ | |
36 | |(?: # or the v= param in all its forms | |
37 | (?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) | |
38 | (?:\?|\#!?) # the params delimiter ? or # or #! | |
39 | (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx) | |
40 | v= | |
41 | ) | |
42 | )? # optional -> youtube.com/xxxx is OK | |
43 | )? # all until now is optional -> you can pass the naked ID | |
44 | ([0-9A-Za-z_-]+) # here is it! the YouTube video ID | |
45 | (?(1).+)? # if we found the ID, everything can follow | |
46 | $""" | |
47 | _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' | |
48 | _LOGIN_URL = 'https://accounts.google.com/ServiceLogin' | |
49 | _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' | |
50 | _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' | |
51 | _NETRC_MACHINE = 'youtube' | |
52 | # Listed in order of quality | |
53 | _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] | |
54 | _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] | |
55 | _video_extensions = { | |
56 | '13': '3gp', | |
57 | '17': 'mp4', | |
58 | '18': 'mp4', | |
59 | '22': 'mp4', | |
60 | '37': 'mp4', | |
d69cf69a | 61 | '38': 'mp4', |
c5e8d7af PH |
62 | '43': 'webm', |
63 | '44': 'webm', | |
64 | '45': 'webm', | |
65 | '46': 'webm', | |
66 | } | |
67 | _video_dimensions = { | |
68 | '5': '240x400', | |
69 | '6': '???', | |
70 | '13': '???', | |
71 | '17': '144x176', | |
72 | '18': '360x640', | |
73 | '22': '720x1280', | |
74 | '34': '360x640', | |
75 | '35': '480x854', | |
76 | '37': '1080x1920', | |
77 | '38': '3072x4096', | |
78 | '43': '360x640', | |
79 | '44': '480x854', | |
80 | '45': '720x1280', | |
81 | '46': '1080x1920', | |
82 | } | |
83 | IE_NAME = u'youtube' | |
84 | ||
85 | @classmethod | |
86 | def suitable(cls, url): | |
87 | """Receives a URL and returns True if suitable for this IE.""" | |
88 | if YoutubePlaylistIE.suitable(url): return False | |
89 | return re.match(cls._VALID_URL, url, re.VERBOSE) is not None | |
90 | ||
91 | def report_lang(self): | |
92 | """Report attempt to set language.""" | |
93 | self.to_screen(u'Setting language') | |
94 | ||
95 | def report_login(self): | |
96 | """Report attempt to log in.""" | |
97 | self.to_screen(u'Logging in') | |
98 | ||
99 | def report_video_webpage_download(self, video_id): | |
100 | """Report attempt to download video webpage.""" | |
101 | self.to_screen(u'%s: Downloading video webpage' % video_id) | |
102 | ||
103 | def report_video_info_webpage_download(self, video_id): | |
104 | """Report attempt to download video info webpage.""" | |
105 | self.to_screen(u'%s: Downloading video info webpage' % video_id) | |
106 | ||
107 | def report_video_subtitles_download(self, video_id): | |
108 | """Report attempt to download video info webpage.""" | |
109 | self.to_screen(u'%s: Checking available subtitles' % video_id) | |
110 | ||
111 | def report_video_subtitles_request(self, video_id, sub_lang, format): | |
112 | """Report attempt to download video info webpage.""" | |
113 | self.to_screen(u'%s: Downloading video subtitles for %s.%s' % (video_id, sub_lang, format)) | |
114 | ||
115 | def report_video_subtitles_available(self, video_id, sub_lang_list): | |
116 | """Report available subtitles.""" | |
117 | sub_lang = ",".join(list(sub_lang_list.keys())) | |
118 | self.to_screen(u'%s: Available subtitles for video: %s' % (video_id, sub_lang)) | |
119 | ||
120 | def report_information_extraction(self, video_id): | |
121 | """Report attempt to extract video information.""" | |
122 | self.to_screen(u'%s: Extracting video information' % video_id) | |
123 | ||
124 | def report_unavailable_format(self, video_id, format): | |
125 | """Report extracted video URL.""" | |
126 | self.to_screen(u'%s: Format %s not available' % (video_id, format)) | |
127 | ||
128 | def report_rtmp_download(self): | |
129 | """Indicate the download will use the RTMP protocol.""" | |
130 | self.to_screen(u'RTMP download detected') | |
131 | ||
98bcd283 | 132 | def _decrypt_signature(self, s): |
c5e8d7af | 133 | """Decrypt the key the two subkeys must have a length of 43""" |
98bcd283 | 134 | if self._downloader.params.get('verbose'): |
6b37f0be PH |
135 | self.to_screen('encrypted signature length %d' % (len(s))) |
136 | ||
137 | if len(s) == 88: | |
138 | return s[48] + s[81] + s[80] + s[79] + s[78] + s[77] + s[76] + s[75] + s[74] + s[73] + s[72] + s[71] + s[70] + s[69] + s[68] + s[82] + s[66] + s[65] + s[64] + s[63] + s[85] + s[61] + s[60] + s[59] + s[58] + s[57] + s[56] + s[55] + s[54] + s[53] + s[52] + s[51] + s[50] + s[49] + s[67] + s[47] + s[46] + s[45] + s[44] + s[43] + s[42] + s[41] + s[40] + s[39] + s[38] + s[37] + s[36] + s[35] + s[34] + s[33] + s[32] + s[31] + s[30] + s[29] + s[28] + s[27] + s[26] + s[25] + s[24] + s[23] + s[22] + s[21] + s[20] + s[19] + s[18] + s[17] + s[16] + s[15] + s[14] + s[13] + s[3] + s[11] + s[10] + s[9] + s[8] + s[7] + s[6] + s[5] + s[4] + s[2] + s[12] | |
139 | elif len(s) == 87: | |
140 | return s[62] + s[82] + s[81] + s[80] + s[79] + s[78] + s[77] + s[76] + s[75] + s[74] + s[73] + s[72] + s[71] + s[70] + s[69] + s[68] + s[67] + s[66] + s[65] + s[64] + s[63] + s[83] + s[61] + s[60] + s[59] + s[58] + s[57] + s[56] + s[55] + s[54] + s[53] + s[0] + s[51] + s[50] + s[49] + s[48] + s[47] + s[46] + s[45] + s[44] + s[43] + s[42] + s[41] + s[40] + s[39] + s[38] + s[37] + s[36] + s[35] + s[34] + s[33] + s[32] + s[31] + s[30] + s[29] + s[28] + s[27] + s[26] + s[25] + s[24] + s[23] + s[22] + s[21] + s[20] + s[19] + s[18] + s[17] + s[16] + s[15] + s[14] + s[13] + s[12] + s[11] + s[10] + s[9] + s[8] + s[7] + s[6] + s[5] + s[4] + s[3] | |
141 | elif len(s) == 86: | |
142 | return s[2] + s[3] + s[4] + s[5] + s[6] + s[7] + s[8] + s[9] + s[10] + s[11] + s[12] + s[13] + s[14] + s[15] + s[16] + s[0] + s[18] + s[19] + s[20] + s[21] + s[22] + s[23] + s[24] + s[25] + s[26] + s[27] + s[28] + s[29] + s[30] + s[31] + s[32] + s[33] + s[34] + s[35] + s[36] + s[37] + s[38] + s[39] + s[40] + s[79] + s[42] + s[43] + s[44] + s[45] + s[46] + s[47] + s[48] + s[49] + s[50] + s[51] + s[52] + s[53] + s[54] + s[55] + s[56] + s[57] + s[58] + s[59] + s[60] + s[61] + s[62] + s[63] + s[64] + s[65] + s[66] + s[67] + s[68] + s[69] + s[70] + s[71] + s[72] + s[73] + s[74] + s[75] + s[76] + s[77] + s[78] + s[82] + s[80] + s[81] + s[41] | |
143 | elif len(s) == 85: | |
144 | return s[76] + s[82] + s[81] + s[80] + s[79] + s[78] + s[77] + s[83] + s[75] + s[74] + s[73] + s[72] + s[71] + s[70] + s[69] + s[68] + s[67] + s[66] + s[65] + s[64] + s[63] + s[62] + s[61] + s[0] + s[59] + s[58] + s[57] + s[56] + s[55] + s[54] + s[53] + s[52] + s[51] + s[1] + s[49] + s[48] + s[47] + s[46] + s[45] + s[44] + s[43] + s[42] + s[41] + s[40] + s[39] + s[38] + s[37] + s[36] + s[35] + s[34] + s[33] + s[32] + s[31] + s[30] + s[29] + s[28] + s[27] + s[26] + s[25] + s[24] + s[23] + s[22] + s[21] + s[20] + s[19] + s[18] + s[17] + s[16] + s[15] + s[14] + s[13] + s[12] + s[11] + s[10] + s[9] + s[8] + s[7] + s[6] + s[5] + s[4] + s[3] | |
145 | elif len(s) == 84: | |
146 | return s[83] + s[82] + s[81] + s[80] + s[79] + s[78] + s[77] + s[76] + s[75] + s[74] + s[73] + s[72] + s[71] + s[70] + s[69] + s[68] + s[67] + s[66] + s[65] + s[64] + s[63] + s[62] + s[61] + s[60] + s[59] + s[58] + s[57] + s[56] + s[55] + s[54] + s[53] + s[52] + s[51] + s[50] + s[49] + s[48] + s[47] + s[46] + s[45] + s[44] + s[43] + s[42] + s[41] + s[40] + s[39] + s[38] + s[37] + s[2] + s[35] + s[34] + s[33] + s[32] + s[31] + s[30] + s[29] + s[28] + s[27] + s[3] + s[25] + s[24] + s[23] + s[22] + s[21] + s[20] + s[19] + s[18] + s[17] + s[16] + s[15] + s[14] + s[13] + s[12] + s[11] + s[10] + s[9] + s[8] + s[7] + s[6] + s[5] + s[4] + s[26] | |
147 | elif len(s) == 83: | |
148 | return s[52] + s[81] + s[80] + s[79] + s[78] + s[77] + s[76] + s[75] + s[74] + s[73] + s[72] + s[71] + s[70] + s[69] + s[68] + s[67] + s[66] + s[65] + s[64] + s[63] + s[62] + s[61] + s[60] + s[59] + s[58] + s[57] + s[56] + s[2] + s[54] + s[53] + s[82] + s[51] + s[50] + s[49] + s[48] + s[47] + s[46] + s[45] + s[44] + s[43] + s[42] + s[41] + s[40] + s[39] + s[38] + s[37] + s[55] + s[35] + s[34] + s[33] + s[32] + s[31] + s[30] + s[29] + s[28] + s[27] + s[26] + s[25] + s[24] + s[23] + s[22] + s[21] + s[20] + s[19] + s[18] + s[17] + s[16] + s[15] + s[14] + s[13] + s[12] + s[11] + s[10] + s[9] + s[8] + s[7] + s[6] + s[5] + s[4] + s[3] + s[36] | |
149 | elif len(s) == 82: | |
150 | return s[36] + s[79] + s[78] + s[77] + s[76] + s[75] + s[74] + s[73] + s[72] + s[71] + s[70] + s[69] + s[68] + s[81] + s[66] + s[65] + s[64] + s[63] + s[62] + s[61] + s[60] + s[59] + s[58] + s[57] + s[56] + s[55] + s[54] + s[53] + s[52] + s[51] + s[50] + s[49] + s[48] + s[47] + s[46] + s[45] + s[44] + s[43] + s[42] + s[41] + s[33] + s[39] + s[38] + s[37] + s[40] + s[35] + s[0] + s[67] + s[32] + s[31] + s[30] + s[29] + s[28] + s[27] + s[26] + s[25] + s[24] + s[23] + s[22] + s[21] + s[20] + s[19] + s[18] + s[17] + s[16] + s[15] + s[14] + s[13] + s[12] + s[11] + s[10] + s[9] + s[8] + s[7] + s[6] + s[5] + s[4] + s[3] + s[2] + s[1] + s[34] | |
151 | else: | |
152 | raise ExtractorError(u'Unable to decrypt signature, subkeys length %d not supported; retrying might work' % (len(s))) | |
c5e8d7af PH |
153 | |
154 | def _get_available_subtitles(self, video_id): | |
155 | self.report_video_subtitles_download(video_id) | |
156 | request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) | |
157 | try: | |
158 | sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8') | |
159 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
160 | return (u'unable to download video subtitles: %s' % compat_str(err), None) | |
161 | sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list) | |
162 | sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list) | |
163 | if not sub_lang_list: | |
164 | return (u'video doesn\'t have subtitles', None) | |
165 | return sub_lang_list | |
166 | ||
167 | def _list_available_subtitles(self, video_id): | |
168 | sub_lang_list = self._get_available_subtitles(video_id) | |
169 | self.report_video_subtitles_available(video_id, sub_lang_list) | |
170 | ||
171 | def _request_subtitle(self, sub_lang, sub_name, video_id, format): | |
172 | """ | |
173 | Return tuple: | |
174 | (error_message, sub_lang, sub) | |
175 | """ | |
176 | self.report_video_subtitles_request(video_id, sub_lang, format) | |
177 | params = compat_urllib_parse.urlencode({ | |
178 | 'lang': sub_lang, | |
179 | 'name': sub_name, | |
180 | 'v': video_id, | |
181 | 'fmt': format, | |
182 | }) | |
183 | url = 'http://www.youtube.com/api/timedtext?' + params | |
184 | try: | |
185 | sub = compat_urllib_request.urlopen(url).read().decode('utf-8') | |
186 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
187 | return (u'unable to download video subtitles: %s' % compat_str(err), None, None) | |
188 | if not sub: | |
189 | return (u'Did not fetch video subtitles', None, None) | |
190 | return (None, sub_lang, sub) | |
191 | ||
192 | def _request_automatic_caption(self, video_id, webpage): | |
193 | """We need the webpage for getting the captions url, pass it as an | |
194 | argument to speed up the process.""" | |
195 | sub_lang = self._downloader.params.get('subtitleslang') or 'en' | |
196 | sub_format = self._downloader.params.get('subtitlesformat') | |
197 | self.to_screen(u'%s: Looking for automatic captions' % video_id) | |
198 | mobj = re.search(r';ytplayer.config = ({.*?});', webpage) | |
199 | err_msg = u'Couldn\'t find automatic captions for "%s"' % sub_lang | |
200 | if mobj is None: | |
201 | return [(err_msg, None, None)] | |
202 | player_config = json.loads(mobj.group(1)) | |
203 | try: | |
204 | args = player_config[u'args'] | |
205 | caption_url = args[u'ttsurl'] | |
206 | timestamp = args[u'timestamp'] | |
207 | params = compat_urllib_parse.urlencode({ | |
208 | 'lang': 'en', | |
209 | 'tlang': sub_lang, | |
210 | 'fmt': sub_format, | |
211 | 'ts': timestamp, | |
212 | 'kind': 'asr', | |
213 | }) | |
214 | subtitles_url = caption_url + '&' + params | |
215 | sub = self._download_webpage(subtitles_url, video_id, u'Downloading automatic captions') | |
216 | return [(None, sub_lang, sub)] | |
217 | except KeyError: | |
218 | return [(err_msg, None, None)] | |
219 | ||
220 | def _extract_subtitle(self, video_id): | |
221 | """ | |
222 | Return a list with a tuple: | |
223 | [(error_message, sub_lang, sub)] | |
224 | """ | |
225 | sub_lang_list = self._get_available_subtitles(video_id) | |
226 | sub_format = self._downloader.params.get('subtitlesformat') | |
227 | if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles | |
228 | return [(sub_lang_list[0], None, None)] | |
229 | if self._downloader.params.get('subtitleslang', False): | |
230 | sub_lang = self._downloader.params.get('subtitleslang') | |
231 | elif 'en' in sub_lang_list: | |
232 | sub_lang = 'en' | |
233 | else: | |
234 | sub_lang = list(sub_lang_list.keys())[0] | |
235 | if not sub_lang in sub_lang_list: | |
236 | return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)] | |
237 | ||
238 | subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) | |
239 | return [subtitle] | |
240 | ||
241 | def _extract_all_subtitles(self, video_id): | |
242 | sub_lang_list = self._get_available_subtitles(video_id) | |
243 | sub_format = self._downloader.params.get('subtitlesformat') | |
244 | if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles | |
245 | return [(sub_lang_list[0], None, None)] | |
246 | subtitles = [] | |
247 | for sub_lang in sub_lang_list: | |
248 | subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format) | |
249 | subtitles.append(subtitle) | |
250 | return subtitles | |
251 | ||
252 | def _print_formats(self, formats): | |
253 | print('Available formats:') | |
254 | for x in formats: | |
255 | print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???'))) | |
256 | ||
257 | def _real_initialize(self): | |
258 | if self._downloader is None: | |
259 | return | |
260 | ||
261 | username = None | |
262 | password = None | |
263 | downloader_params = self._downloader.params | |
264 | ||
265 | # Attempt to use provided username and password or .netrc data | |
266 | if downloader_params.get('username', None) is not None: | |
267 | username = downloader_params['username'] | |
268 | password = downloader_params['password'] | |
269 | elif downloader_params.get('usenetrc', False): | |
270 | try: | |
271 | info = netrc.netrc().authenticators(self._NETRC_MACHINE) | |
272 | if info is not None: | |
273 | username = info[0] | |
274 | password = info[2] | |
275 | else: | |
276 | raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) | |
277 | except (IOError, netrc.NetrcParseError) as err: | |
278 | self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err)) | |
279 | return | |
280 | ||
281 | # Set language | |
282 | request = compat_urllib_request.Request(self._LANG_URL) | |
283 | try: | |
284 | self.report_lang() | |
285 | compat_urllib_request.urlopen(request).read() | |
286 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
287 | self._downloader.report_warning(u'unable to set language: %s' % compat_str(err)) | |
288 | return | |
289 | ||
290 | # No authentication to be performed | |
291 | if username is None: | |
292 | return | |
293 | ||
294 | request = compat_urllib_request.Request(self._LOGIN_URL) | |
295 | try: | |
296 | login_page = compat_urllib_request.urlopen(request).read().decode('utf-8') | |
297 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
298 | self._downloader.report_warning(u'unable to fetch login page: %s' % compat_str(err)) | |
299 | return | |
300 | ||
301 | galx = None | |
302 | dsh = None | |
303 | match = re.search(re.compile(r'<input.+?name="GALX".+?value="(.+?)"', re.DOTALL), login_page) | |
304 | if match: | |
305 | galx = match.group(1) | |
306 | ||
307 | match = re.search(re.compile(r'<input.+?name="dsh".+?value="(.+?)"', re.DOTALL), login_page) | |
308 | if match: | |
309 | dsh = match.group(1) | |
310 | ||
311 | # Log in | |
312 | login_form_strs = { | |
313 | u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1', | |
314 | u'Email': username, | |
315 | u'GALX': galx, | |
316 | u'Passwd': password, | |
317 | u'PersistentCookie': u'yes', | |
318 | u'_utf8': u'霱', | |
319 | u'bgresponse': u'js_disabled', | |
320 | u'checkConnection': u'', | |
321 | u'checkedDomains': u'youtube', | |
322 | u'dnConn': u'', | |
323 | u'dsh': dsh, | |
324 | u'pstMsg': u'0', | |
325 | u'rmShown': u'1', | |
326 | u'secTok': u'', | |
327 | u'signIn': u'Sign in', | |
328 | u'timeStmp': u'', | |
329 | u'service': u'youtube', | |
330 | u'uilel': u'3', | |
331 | u'hl': u'en_US', | |
332 | } | |
333 | # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode | |
334 | # chokes on unicode | |
335 | login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items()) | |
336 | login_data = compat_urllib_parse.urlencode(login_form).encode('ascii') | |
337 | request = compat_urllib_request.Request(self._LOGIN_URL, login_data) | |
338 | try: | |
339 | self.report_login() | |
340 | login_results = compat_urllib_request.urlopen(request).read().decode('utf-8') | |
341 | if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None: | |
342 | self._downloader.report_warning(u'unable to log in: bad username or password') | |
343 | return | |
344 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
345 | self._downloader.report_warning(u'unable to log in: %s' % compat_str(err)) | |
346 | return | |
347 | ||
348 | # Confirm age | |
349 | age_form = { | |
350 | 'next_url': '/', | |
351 | 'action_confirm': 'Confirm', | |
352 | } | |
353 | request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form)) | |
354 | try: | |
355 | self.report_age_confirmation() | |
93d3a642 | 356 | compat_urllib_request.urlopen(request).read().decode('utf-8') |
c5e8d7af PH |
357 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: |
358 | raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err)) | |
359 | ||
360 | def _extract_id(self, url): | |
361 | mobj = re.match(self._VALID_URL, url, re.VERBOSE) | |
362 | if mobj is None: | |
363 | raise ExtractorError(u'Invalid URL: %s' % url) | |
364 | video_id = mobj.group(2) | |
365 | return video_id | |
366 | ||
367 | def _real_extract(self, url): | |
368 | # Extract original video URL from URL with redirection, like age verification, using next_url parameter | |
369 | mobj = re.search(self._NEXT_URL_RE, url) | |
370 | if mobj: | |
371 | url = 'https://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/') | |
372 | video_id = self._extract_id(url) | |
373 | ||
374 | # Get video webpage | |
375 | self.report_video_webpage_download(video_id) | |
376 | url = 'https://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id | |
377 | request = compat_urllib_request.Request(url) | |
378 | try: | |
379 | video_webpage_bytes = compat_urllib_request.urlopen(request).read() | |
380 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
381 | raise ExtractorError(u'Unable to download video webpage: %s' % compat_str(err)) | |
382 | ||
383 | video_webpage = video_webpage_bytes.decode('utf-8', 'ignore') | |
384 | ||
385 | # Attempt to extract SWF player URL | |
386 | mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) | |
387 | if mobj is not None: | |
388 | player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) | |
389 | else: | |
390 | player_url = None | |
391 | ||
392 | # Get video info | |
393 | self.report_video_info_webpage_download(video_id) | |
394 | for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: | |
395 | video_info_url = ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' | |
396 | % (video_id, el_type)) | |
397 | video_info_webpage = self._download_webpage(video_info_url, video_id, | |
398 | note=False, | |
399 | errnote='unable to download video info webpage') | |
400 | video_info = compat_parse_qs(video_info_webpage) | |
401 | if 'token' in video_info: | |
402 | break | |
403 | if 'token' not in video_info: | |
404 | if 'reason' in video_info: | |
405 | raise ExtractorError(u'YouTube said: %s' % video_info['reason'][0]) | |
406 | else: | |
407 | raise ExtractorError(u'"token" parameter not in video info for unknown reason') | |
408 | ||
409 | # Check for "rental" videos | |
410 | if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: | |
411 | raise ExtractorError(u'"rental" videos not supported') | |
412 | ||
413 | # Start extracting information | |
414 | self.report_information_extraction(video_id) | |
415 | ||
416 | # uploader | |
417 | if 'author' not in video_info: | |
418 | raise ExtractorError(u'Unable to extract uploader name') | |
419 | video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0]) | |
420 | ||
421 | # uploader_id | |
422 | video_uploader_id = None | |
423 | mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage) | |
424 | if mobj is not None: | |
425 | video_uploader_id = mobj.group(1) | |
426 | else: | |
427 | self._downloader.report_warning(u'unable to extract uploader nickname') | |
428 | ||
429 | # title | |
430 | if 'title' not in video_info: | |
431 | raise ExtractorError(u'Unable to extract video title') | |
432 | video_title = compat_urllib_parse.unquote_plus(video_info['title'][0]) | |
433 | ||
434 | # thumbnail image | |
435 | if 'thumbnail_url' not in video_info: | |
436 | self._downloader.report_warning(u'unable to extract video thumbnail') | |
437 | video_thumbnail = '' | |
438 | else: # don't panic if we can't find it | |
439 | video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0]) | |
440 | ||
441 | # upload date | |
442 | upload_date = None | |
443 | mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL) | |
444 | if mobj is not None: | |
445 | upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) | |
446 | upload_date = unified_strdate(upload_date) | |
447 | ||
448 | # description | |
449 | video_description = get_element_by_id("eow-description", video_webpage) | |
450 | if video_description: | |
451 | video_description = clean_html(video_description) | |
452 | else: | |
453 | fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage) | |
454 | if fd_mobj: | |
455 | video_description = unescapeHTML(fd_mobj.group(1)) | |
456 | else: | |
457 | video_description = u'' | |
458 | ||
459 | # subtitles | |
460 | video_subtitles = None | |
461 | ||
462 | if self._downloader.params.get('writesubtitles', False): | |
463 | video_subtitles = self._extract_subtitle(video_id) | |
464 | if video_subtitles: | |
465 | (sub_error, sub_lang, sub) = video_subtitles[0] | |
466 | if sub_error: | |
b004821f JMF |
467 | self._downloader.report_warning(sub_error) |
468 | ||
469 | if self._downloader.params.get('writeautomaticsub', False): | |
470 | video_subtitles = self._request_automatic_caption(video_id, video_webpage) | |
471 | (sub_error, sub_lang, sub) = video_subtitles[0] | |
472 | if sub_error: | |
473 | self._downloader.report_warning(sub_error) | |
c5e8d7af PH |
474 | |
475 | if self._downloader.params.get('allsubtitles', False): | |
476 | video_subtitles = self._extract_all_subtitles(video_id) | |
477 | for video_subtitle in video_subtitles: | |
478 | (sub_error, sub_lang, sub) = video_subtitle | |
479 | if sub_error: | |
480 | self._downloader.report_warning(sub_error) | |
481 | ||
482 | if self._downloader.params.get('listsubtitles', False): | |
93d3a642 | 483 | self._list_available_subtitles(video_id) |
c5e8d7af PH |
484 | return |
485 | ||
486 | if 'length_seconds' not in video_info: | |
487 | self._downloader.report_warning(u'unable to extract video duration') | |
488 | video_duration = '' | |
489 | else: | |
490 | video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]) | |
491 | ||
c5e8d7af PH |
492 | # Decide which formats to download |
493 | req_format = self._downloader.params.get('format', None) | |
494 | ||
495 | try: | |
496 | mobj = re.search(r';ytplayer.config = ({.*?});', video_webpage) | |
50be92c1 PH |
497 | if not mobj: |
498 | raise ValueError('Could not find vevo ID') | |
c5e8d7af PH |
499 | info = json.loads(mobj.group(1)) |
500 | args = info['args'] | |
7ce7e394 JMF |
501 | # Easy way to know if the 's' value is in url_encoded_fmt_stream_map |
502 | # this signatures are encrypted | |
503 | m_s = re.search(r'[&,]s=', args['url_encoded_fmt_stream_map']) | |
504 | if m_s is not None: | |
505 | self.to_screen(u'%s: Encrypted signatures detected.' % video_id) | |
c5e8d7af PH |
506 | video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']] |
507 | except ValueError: | |
508 | pass | |
509 | ||
510 | if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): | |
511 | self.report_rtmp_download() | |
512 | video_url_list = [(None, video_info['conn'][0])] | |
513 | elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: | |
514 | url_map = {} | |
515 | for url_data_str in video_info['url_encoded_fmt_stream_map'][0].split(','): | |
516 | url_data = compat_parse_qs(url_data_str) | |
517 | if 'itag' in url_data and 'url' in url_data: | |
518 | url = url_data['url'][0] | |
519 | if 'sig' in url_data: | |
520 | url += '&signature=' + url_data['sig'][0] | |
521 | elif 's' in url_data: | |
522 | signature = self._decrypt_signature(url_data['s'][0]) | |
523 | url += '&signature=' + signature | |
524 | if 'ratebypass' not in url: | |
525 | url += '&ratebypass=yes' | |
526 | url_map[url_data['itag'][0]] = url | |
527 | ||
528 | format_limit = self._downloader.params.get('format_limit', None) | |
529 | available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats | |
530 | if format_limit is not None and format_limit in available_formats: | |
531 | format_list = available_formats[available_formats.index(format_limit):] | |
532 | else: | |
533 | format_list = available_formats | |
534 | existing_formats = [x for x in format_list if x in url_map] | |
535 | if len(existing_formats) == 0: | |
536 | raise ExtractorError(u'no known formats available for video') | |
537 | if self._downloader.params.get('listformats', None): | |
538 | self._print_formats(existing_formats) | |
539 | return | |
540 | if req_format is None or req_format == 'best': | |
541 | video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality | |
542 | elif req_format == 'worst': | |
543 | video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality | |
544 | elif req_format in ('-1', 'all'): | |
545 | video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats | |
546 | else: | |
547 | # Specific formats. We pick the first in a slash-delimeted sequence. | |
548 | # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. | |
549 | req_formats = req_format.split('/') | |
550 | video_url_list = None | |
551 | for rf in req_formats: | |
552 | if rf in url_map: | |
553 | video_url_list = [(rf, url_map[rf])] | |
554 | break | |
555 | if video_url_list is None: | |
556 | raise ExtractorError(u'requested format not available') | |
557 | else: | |
558 | raise ExtractorError(u'no conn or url_encoded_fmt_stream_map information found in video info') | |
559 | ||
560 | results = [] | |
561 | for format_param, video_real_url in video_url_list: | |
562 | # Extension | |
563 | video_extension = self._video_extensions.get(format_param, 'flv') | |
564 | ||
565 | video_format = '{0} - {1}'.format(format_param if format_param else video_extension, | |
566 | self._video_dimensions.get(format_param, '???')) | |
567 | ||
568 | results.append({ | |
569 | 'id': video_id, | |
570 | 'url': video_real_url, | |
571 | 'uploader': video_uploader, | |
572 | 'uploader_id': video_uploader_id, | |
573 | 'upload_date': upload_date, | |
574 | 'title': video_title, | |
575 | 'ext': video_extension, | |
576 | 'format': video_format, | |
577 | 'thumbnail': video_thumbnail, | |
578 | 'description': video_description, | |
579 | 'player_url': player_url, | |
580 | 'subtitles': video_subtitles, | |
581 | 'duration': video_duration | |
582 | }) | |
583 | return results | |
584 | ||
585 | class YoutubePlaylistIE(InfoExtractor): | |
586 | """Information Extractor for YouTube playlists.""" | |
587 | ||
588 | _VALID_URL = r"""(?: | |
589 | (?:https?://)? | |
590 | (?:\w+\.)? | |
591 | youtube\.com/ | |
592 | (?: | |
593 | (?:course|view_play_list|my_playlists|artist|playlist|watch) | |
594 | \? (?:.*?&)*? (?:p|a|list)= | |
595 | | p/ | |
596 | ) | |
597 | ((?:PL|EC|UU)?[0-9A-Za-z-_]{10,}) | |
598 | .* | |
599 | | | |
600 | ((?:PL|EC|UU)[0-9A-Za-z-_]{10,}) | |
601 | )""" | |
602 | _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/playlists/%s?max-results=%i&start-index=%i&v=2&alt=json&safeSearch=none' | |
603 | _MAX_RESULTS = 50 | |
604 | IE_NAME = u'youtube:playlist' | |
605 | ||
606 | @classmethod | |
607 | def suitable(cls, url): | |
608 | """Receives a URL and returns True if suitable for this IE.""" | |
609 | return re.match(cls._VALID_URL, url, re.VERBOSE) is not None | |
610 | ||
611 | def _real_extract(self, url): | |
612 | # Extract playlist id | |
613 | mobj = re.match(self._VALID_URL, url, re.VERBOSE) | |
614 | if mobj is None: | |
615 | raise ExtractorError(u'Invalid URL: %s' % url) | |
616 | ||
617 | # Download playlist videos from API | |
618 | playlist_id = mobj.group(1) or mobj.group(2) | |
619 | page_num = 1 | |
620 | videos = [] | |
621 | ||
622 | while True: | |
623 | url = self._TEMPLATE_URL % (playlist_id, self._MAX_RESULTS, self._MAX_RESULTS * (page_num - 1) + 1) | |
624 | page = self._download_webpage(url, playlist_id, u'Downloading page #%s' % page_num) | |
625 | ||
626 | try: | |
627 | response = json.loads(page) | |
628 | except ValueError as err: | |
629 | raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err)) | |
630 | ||
631 | if 'feed' not in response: | |
632 | raise ExtractorError(u'Got a malformed response from YouTube API') | |
633 | playlist_title = response['feed']['title']['$t'] | |
634 | if 'entry' not in response['feed']: | |
635 | # Number of videos is a multiple of self._MAX_RESULTS | |
636 | break | |
637 | ||
638 | for entry in response['feed']['entry']: | |
639 | index = entry['yt$position']['$t'] | |
640 | if 'media$group' in entry and 'media$player' in entry['media$group']: | |
641 | videos.append((index, entry['media$group']['media$player']['url'])) | |
642 | ||
643 | if len(response['feed']['entry']) < self._MAX_RESULTS: | |
644 | break | |
645 | page_num += 1 | |
646 | ||
647 | videos = [v[1] for v in sorted(videos)] | |
648 | ||
649 | url_results = [self.url_result(url, 'Youtube') for url in videos] | |
650 | return [self.playlist_result(url_results, playlist_id, playlist_title)] | |
651 | ||
652 | ||
653 | class YoutubeChannelIE(InfoExtractor): | |
654 | """Information Extractor for YouTube channels.""" | |
655 | ||
656 | _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)" | |
657 | _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en' | |
658 | _MORE_PAGES_INDICATOR = 'yt-uix-load-more' | |
659 | _MORE_PAGES_URL = 'http://www.youtube.com/channel_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s' | |
660 | IE_NAME = u'youtube:channel' | |
661 | ||
662 | def extract_videos_from_page(self, page): | |
663 | ids_in_page = [] | |
664 | for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page): | |
665 | if mobj.group(1) not in ids_in_page: | |
666 | ids_in_page.append(mobj.group(1)) | |
667 | return ids_in_page | |
668 | ||
669 | def _real_extract(self, url): | |
670 | # Extract channel id | |
671 | mobj = re.match(self._VALID_URL, url) | |
672 | if mobj is None: | |
673 | raise ExtractorError(u'Invalid URL: %s' % url) | |
674 | ||
675 | # Download channel page | |
676 | channel_id = mobj.group(1) | |
677 | video_ids = [] | |
678 | pagenum = 1 | |
679 | ||
680 | url = self._TEMPLATE_URL % (channel_id, pagenum) | |
681 | page = self._download_webpage(url, channel_id, | |
682 | u'Downloading page #%s' % pagenum) | |
683 | ||
684 | # Extract video identifiers | |
685 | ids_in_page = self.extract_videos_from_page(page) | |
686 | video_ids.extend(ids_in_page) | |
687 | ||
688 | # Download any subsequent channel pages using the json-based channel_ajax query | |
689 | if self._MORE_PAGES_INDICATOR in page: | |
690 | while True: | |
691 | pagenum = pagenum + 1 | |
692 | ||
693 | url = self._MORE_PAGES_URL % (pagenum, channel_id) | |
694 | page = self._download_webpage(url, channel_id, | |
695 | u'Downloading page #%s' % pagenum) | |
696 | ||
697 | page = json.loads(page) | |
698 | ||
699 | ids_in_page = self.extract_videos_from_page(page['content_html']) | |
700 | video_ids.extend(ids_in_page) | |
701 | ||
702 | if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']: | |
703 | break | |
704 | ||
705 | self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids))) | |
706 | ||
707 | urls = ['http://www.youtube.com/watch?v=%s' % id for id in video_ids] | |
708 | url_entries = [self.url_result(url, 'Youtube') for url in urls] | |
709 | return [self.playlist_result(url_entries, channel_id)] | |
710 | ||
711 | ||
712 | class YoutubeUserIE(InfoExtractor): | |
713 | """Information Extractor for YouTube users.""" | |
714 | ||
715 | _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)' | |
716 | _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s' | |
717 | _GDATA_PAGE_SIZE = 50 | |
718 | _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' | |
719 | _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]' | |
720 | IE_NAME = u'youtube:user' | |
721 | ||
722 | def _real_extract(self, url): | |
723 | # Extract username | |
724 | mobj = re.match(self._VALID_URL, url) | |
725 | if mobj is None: | |
726 | raise ExtractorError(u'Invalid URL: %s' % url) | |
727 | ||
728 | username = mobj.group(1) | |
729 | ||
730 | # Download video ids using YouTube Data API. Result size per | |
731 | # query is limited (currently to 50 videos) so we need to query | |
732 | # page by page until there are no video ids - it means we got | |
733 | # all of them. | |
734 | ||
735 | video_ids = [] | |
736 | pagenum = 0 | |
737 | ||
738 | while True: | |
739 | start_index = pagenum * self._GDATA_PAGE_SIZE + 1 | |
740 | ||
741 | gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index) | |
742 | page = self._download_webpage(gdata_url, username, | |
743 | u'Downloading video ids from %d to %d' % (start_index, start_index + self._GDATA_PAGE_SIZE)) | |
744 | ||
745 | # Extract video identifiers | |
746 | ids_in_page = [] | |
747 | ||
748 | for mobj in re.finditer(self._VIDEO_INDICATOR, page): | |
749 | if mobj.group(1) not in ids_in_page: | |
750 | ids_in_page.append(mobj.group(1)) | |
751 | ||
752 | video_ids.extend(ids_in_page) | |
753 | ||
754 | # A little optimization - if current page is not | |
755 | # "full", ie. does not contain PAGE_SIZE video ids then | |
756 | # we can assume that this page is the last one - there | |
757 | # are no more ids on further pages - no need to query | |
758 | # again. | |
759 | ||
760 | if len(ids_in_page) < self._GDATA_PAGE_SIZE: | |
761 | break | |
762 | ||
763 | pagenum += 1 | |
764 | ||
765 | urls = ['http://www.youtube.com/watch?v=%s' % video_id for video_id in video_ids] | |
766 | url_results = [self.url_result(url, 'Youtube') for url in urls] | |
767 | return [self.playlist_result(url_results, playlist_title = username)] | |
b05654f0 PH |
768 | |
769 | class YoutubeSearchIE(SearchInfoExtractor): | |
770 | """Information Extractor for YouTube search queries.""" | |
771 | _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc' | |
772 | _MAX_RESULTS = 1000 | |
773 | IE_NAME = u'youtube:search' | |
774 | _SEARCH_KEY = 'ytsearch' | |
775 | ||
776 | def report_download_page(self, query, pagenum): | |
777 | """Report attempt to download search page with given number.""" | |
778 | self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum)) | |
779 | ||
780 | def _get_n_results(self, query, n): | |
781 | """Get a specified number of results for a query""" | |
782 | ||
783 | video_ids = [] | |
784 | pagenum = 0 | |
785 | limit = n | |
786 | ||
787 | while (50 * pagenum) < limit: | |
788 | self.report_download_page(query, pagenum+1) | |
789 | result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1) | |
790 | request = compat_urllib_request.Request(result_url) | |
791 | try: | |
792 | data = compat_urllib_request.urlopen(request).read().decode('utf-8') | |
793 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
794 | raise ExtractorError(u'Unable to download API page: %s' % compat_str(err)) | |
795 | api_response = json.loads(data)['data'] | |
796 | ||
797 | if not 'items' in api_response: | |
798 | raise ExtractorError(u'[youtube] No video results') | |
799 | ||
800 | new_ids = list(video['id'] for video in api_response['items']) | |
801 | video_ids += new_ids | |
802 | ||
803 | limit = min(n, api_response['totalItems']) | |
804 | pagenum += 1 | |
805 | ||
806 | if len(video_ids) > n: | |
807 | video_ids = video_ids[:n] | |
808 | videos = [self.url_result('http://www.youtube.com/watch?v=%s' % id, 'Youtube') for id in video_ids] | |
809 | return self.playlist_result(videos, query) |