]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/youtube.py
[youtube:user] Support another URL form
[yt-dlp.git] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import random
10 import re
11 import time
12 import traceback
13
14 from .common import InfoExtractor, SearchInfoExtractor
15 from ..jsinterp import JSInterpreter
16 from ..swfinterp import SWFInterpreter
17 from ..compat import (
18 compat_chr,
19 compat_parse_qs,
20 compat_urllib_parse_unquote,
21 compat_urllib_parse_unquote_plus,
22 compat_urllib_parse_urlencode,
23 compat_urllib_parse_urlparse,
24 compat_urlparse,
25 compat_str,
26 )
27 from ..utils import (
28 clean_html,
29 error_to_compat_str,
30 ExtractorError,
31 float_or_none,
32 get_element_by_attribute,
33 get_element_by_id,
34 int_or_none,
35 mimetype2ext,
36 orderedSet,
37 parse_duration,
38 remove_quotes,
39 remove_start,
40 sanitized_Request,
41 smuggle_url,
42 str_to_int,
43 unescapeHTML,
44 unified_strdate,
45 unsmuggle_url,
46 uppercase_escape,
47 urlencode_postdata,
48 ISO3166Utils,
49 )
50
51
52 class YoutubeBaseInfoExtractor(InfoExtractor):
53 """Provide base functions for Youtube extractors"""
54 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
55 _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
56 _NETRC_MACHINE = 'youtube'
57 # If True it will raise an error if no login info is provided
58 _LOGIN_REQUIRED = False
59
60 def _set_language(self):
61 self._set_cookie(
62 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
63 # YouTube sets the expire time to about two months
64 expire_time=time.time() + 2 * 30 * 24 * 3600)
65
66 def _ids_to_results(self, ids):
67 return [
68 self.url_result(vid_id, 'Youtube', video_id=vid_id)
69 for vid_id in ids]
70
71 def _login(self):
72 """
73 Attempt to log in to YouTube.
74 True is returned if successful or skipped.
75 False is returned if login failed.
76
77 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
78 """
79 (username, password) = self._get_login_info()
80 # No authentication to be performed
81 if username is None:
82 if self._LOGIN_REQUIRED:
83 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
84 return True
85
86 login_page = self._download_webpage(
87 self._LOGIN_URL, None,
88 note='Downloading login page',
89 errnote='unable to fetch login page', fatal=False)
90 if login_page is False:
91 return
92
93 galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
94 login_page, 'Login GALX parameter')
95
96 # Log in
97 login_form_strs = {
98 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
99 'Email': username,
100 'GALX': galx,
101 'Passwd': password,
102
103 'PersistentCookie': 'yes',
104 '_utf8': '霱',
105 'bgresponse': 'js_disabled',
106 'checkConnection': '',
107 'checkedDomains': 'youtube',
108 'dnConn': '',
109 'pstMsg': '0',
110 'rmShown': '1',
111 'secTok': '',
112 'signIn': 'Sign in',
113 'timeStmp': '',
114 'service': 'youtube',
115 'uilel': '3',
116 'hl': 'en_US',
117 }
118
119 login_data = urlencode_postdata(login_form_strs)
120
121 req = sanitized_Request(self._LOGIN_URL, login_data)
122 login_results = self._download_webpage(
123 req, None,
124 note='Logging in', errnote='unable to log in', fatal=False)
125 if login_results is False:
126 return False
127
128 error_msg = self._html_search_regex(
129 r'<[^>]+id="errormsg_0_Passwd"[^>]*>([^<]+)<',
130 login_results, 'error message', default=None)
131 if error_msg:
132 raise ExtractorError('Unable to login: %s' % error_msg, expected=True)
133
134 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
135 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
136
137 # Two-Factor
138 # TODO add SMS and phone call support - these require making a request and then prompting the user
139
140 if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
141 tfa_code = self._get_tfa_info('2-step verification code')
142
143 if not tfa_code:
144 self._downloader.report_warning(
145 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
146 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
147 return False
148
149 tfa_code = remove_start(tfa_code, 'G-')
150
151 tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
152
153 tfa_form_strs.update({
154 'Pin': tfa_code,
155 'TrustDevice': 'on',
156 })
157
158 tfa_data = urlencode_postdata(tfa_form_strs)
159
160 tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
161 tfa_results = self._download_webpage(
162 tfa_req, None,
163 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
164
165 if tfa_results is False:
166 return False
167
168 if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
169 self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
170 return False
171 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
172 self._downloader.report_warning('unable to log in - did the page structure change?')
173 return False
174 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
175 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
176 return False
177
178 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
179 self._downloader.report_warning('unable to log in: bad username or password')
180 return False
181 return True
182
183 def _real_initialize(self):
184 if self._downloader is None:
185 return
186 self._set_language()
187 if not self._login():
188 return
189
190
191 class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
192 # Extract entries from page with "Load more" button
193 def _entries(self, page, playlist_id):
194 more_widget_html = content_html = page
195 for page_num in itertools.count(1):
196 for entry in self._process_page(content_html):
197 yield entry
198
199 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
200 if not mobj:
201 break
202
203 more = self._download_json(
204 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
205 'Downloading page #%s' % page_num,
206 transform_source=uppercase_escape)
207 content_html = more['content_html']
208 if not content_html.strip():
209 # Some webpages show a "Load more" button but they don't
210 # have more videos
211 break
212 more_widget_html = more['load_more_widget_html']
213
214
215 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
216 def _process_page(self, content):
217 for video_id, video_title in self.extract_videos_from_page(content):
218 yield self.url_result(video_id, 'Youtube', video_id, video_title)
219
220 def extract_videos_from_page(self, page):
221 ids_in_page = []
222 titles_in_page = []
223 for mobj in re.finditer(self._VIDEO_RE, page):
224 # The link with index 0 is not the first video of the playlist (not sure if still actual)
225 if 'index' in mobj.groupdict() and mobj.group('id') == '0':
226 continue
227 video_id = mobj.group('id')
228 video_title = unescapeHTML(mobj.group('title'))
229 if video_title:
230 video_title = video_title.strip()
231 try:
232 idx = ids_in_page.index(video_id)
233 if video_title and not titles_in_page[idx]:
234 titles_in_page[idx] = video_title
235 except ValueError:
236 ids_in_page.append(video_id)
237 titles_in_page.append(video_title)
238 return zip(ids_in_page, titles_in_page)
239
240
241 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
242 def _process_page(self, content):
243 for playlist_id in orderedSet(re.findall(
244 r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
245 content)):
246 yield self.url_result(
247 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
248
249 def _real_extract(self, url):
250 playlist_id = self._match_id(url)
251 webpage = self._download_webpage(url, playlist_id)
252 title = self._og_search_title(webpage, fatal=False)
253 return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
254
255
256 class YoutubeIE(YoutubeBaseInfoExtractor):
257 IE_DESC = 'YouTube.com'
258 _VALID_URL = r"""(?x)^
259 (
260 (?:https?://|//) # http(s):// or protocol-independent URL
261 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
262 (?:www\.)?deturl\.com/www\.youtube\.com/|
263 (?:www\.)?pwnyoutube\.com/|
264 (?:www\.)?yourepeat\.com/|
265 tube\.majestyc\.net/|
266 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
267 (?:.*?\#/)? # handle anchor (#/) redirect urls
268 (?: # the various things that can precede the ID:
269 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
270 |(?: # or the v= param in all its forms
271 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
272 (?:\?|\#!?) # the params delimiter ? or # or #!
273 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
274 v=
275 )
276 ))
277 |(?:
278 youtu\.be| # just youtu.be/xxxx
279 vid\.plus| # or vid.plus/xxxx
280 zwearz\.com/watch| # or zwearz.com/watch/xxxx
281 )/
282 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
283 )
284 )? # all until now is optional -> you can pass the naked ID
285 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
286 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
287 (?(1).+)? # if we found the ID, everything can follow
288 $"""
289 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
290 _formats = {
291 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
292 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
293 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
294 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
295 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
296 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
297 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
298 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
299 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
300 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
301 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
302 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
303 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
304 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
305 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
306 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
307 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
308 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
309
310
311 # 3D videos
312 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
313 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
314 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
315 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
316 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
317 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
318 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
319
320 # Apple HTTP Live Streaming
321 '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
322 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
323 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
324 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
325 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
326 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
327 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
328 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
329
330 # DASH mp4 video
331 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
332 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
333 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
334 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
335 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
336 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
337 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
338 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
339 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
340 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
341 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
342
343 # Dash mp4 audio
344 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
345 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
346 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
347 '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
348 '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
349
350 # Dash webm
351 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
352 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
353 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
354 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
355 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
356 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
357 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9', 'preference': -40},
358 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
359 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
360 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
361 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
362 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
363 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
364 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
365 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
366 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
367 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
368 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
369 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
370 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
371 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
372 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
373
374 # Dash webm audio
375 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
376 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
377
378 # Dash webm audio with opus inside
379 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
380 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
381 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
382
383 # RTMP (unnamed)
384 '_rtmp': {'protocol': 'rtmp'},
385 }
386 _SUBTITLE_FORMATS = ('ttml', 'vtt')
387
388 IE_NAME = 'youtube'
389 _TESTS = [
390 {
391 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
392 'info_dict': {
393 'id': 'BaW_jenozKc',
394 'ext': 'mp4',
395 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
396 'uploader': 'Philipp Hagemeister',
397 'uploader_id': 'phihag',
398 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
399 'upload_date': '20121002',
400 'license': 'Standard YouTube License',
401 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
402 'categories': ['Science & Technology'],
403 'tags': ['youtube-dl'],
404 'like_count': int,
405 'dislike_count': int,
406 'start_time': 1,
407 'end_time': 9,
408 }
409 },
410 {
411 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
412 'note': 'Test generic use_cipher_signature video (#897)',
413 'info_dict': {
414 'id': 'UxxajLWwzqY',
415 'ext': 'mp4',
416 'upload_date': '20120506',
417 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
418 'alt_title': 'I Love It (feat. Charli XCX)',
419 'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
420 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
421 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
422 'iconic ep', 'iconic', 'love', 'it'],
423 'uploader': 'Icona Pop',
424 'uploader_id': 'IconaPop',
425 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IconaPop',
426 'license': 'Standard YouTube License',
427 'creator': 'Icona Pop',
428 }
429 },
430 {
431 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
432 'note': 'Test VEVO video with age protection (#956)',
433 'info_dict': {
434 'id': '07FYdnEawAQ',
435 'ext': 'mp4',
436 'upload_date': '20130703',
437 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
438 'alt_title': 'Tunnel Vision',
439 'description': 'md5:64249768eec3bc4276236606ea996373',
440 'uploader': 'justintimberlakeVEVO',
441 'uploader_id': 'justintimberlakeVEVO',
442 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
443 'license': 'Standard YouTube License',
444 'creator': 'Justin Timberlake',
445 'age_limit': 18,
446 }
447 },
448 {
449 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
450 'note': 'Embed-only video (#1746)',
451 'info_dict': {
452 'id': 'yZIXLfi8CZQ',
453 'ext': 'mp4',
454 'upload_date': '20120608',
455 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
456 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
457 'uploader': 'SET India',
458 'uploader_id': 'setindia',
459 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/setindia',
460 'license': 'Standard YouTube License',
461 'age_limit': 18,
462 }
463 },
464 {
465 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
466 'note': 'Use the first video ID in the URL',
467 'info_dict': {
468 'id': 'BaW_jenozKc',
469 'ext': 'mp4',
470 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
471 'uploader': 'Philipp Hagemeister',
472 'uploader_id': 'phihag',
473 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
474 'upload_date': '20121002',
475 'license': 'Standard YouTube License',
476 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
477 'categories': ['Science & Technology'],
478 'tags': ['youtube-dl'],
479 'like_count': int,
480 'dislike_count': int,
481 },
482 'params': {
483 'skip_download': True,
484 },
485 },
486 {
487 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
488 'note': '256k DASH audio (format 141) via DASH manifest',
489 'info_dict': {
490 'id': 'a9LDPn-MO4I',
491 'ext': 'm4a',
492 'upload_date': '20121002',
493 'uploader_id': '8KVIDEO',
494 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
495 'description': '',
496 'uploader': '8KVIDEO',
497 'license': 'Standard YouTube License',
498 'title': 'UHDTV TEST 8K VIDEO.mp4'
499 },
500 'params': {
501 'youtube_include_dash_manifest': True,
502 'format': '141',
503 },
504 },
505 # DASH manifest with encrypted signature
506 {
507 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
508 'info_dict': {
509 'id': 'IB3lcPjvWLA',
510 'ext': 'm4a',
511 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
512 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
513 'uploader': 'AfrojackVEVO',
514 'uploader_id': 'AfrojackVEVO',
515 'upload_date': '20131011',
516 'license': 'Standard YouTube License',
517 },
518 'params': {
519 'youtube_include_dash_manifest': True,
520 'format': '141',
521 },
522 },
523 # JS player signature function name containing $
524 {
525 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
526 'info_dict': {
527 'id': 'nfWlot6h_JM',
528 'ext': 'm4a',
529 'title': 'Taylor Swift - Shake It Off',
530 'alt_title': 'Shake It Off',
531 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
532 'uploader': 'TaylorSwiftVEVO',
533 'uploader_id': 'TaylorSwiftVEVO',
534 'upload_date': '20140818',
535 'license': 'Standard YouTube License',
536 'creator': 'Taylor Swift',
537 },
538 'params': {
539 'youtube_include_dash_manifest': True,
540 'format': '141',
541 },
542 },
543 # Controversy video
544 {
545 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
546 'info_dict': {
547 'id': 'T4XJQO3qol8',
548 'ext': 'mp4',
549 'upload_date': '20100909',
550 'uploader': 'The Amazing Atheist',
551 'uploader_id': 'TheAmazingAtheist',
552 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
553 'license': 'Standard YouTube License',
554 'title': 'Burning Everyone\'s Koran',
555 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
556 }
557 },
558 # Normal age-gate video (No vevo, embed allowed)
559 {
560 'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
561 'info_dict': {
562 'id': 'HtVdAasjOgU',
563 'ext': 'mp4',
564 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
565 'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
566 'uploader': 'The Witcher',
567 'uploader_id': 'WitcherGame',
568 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
569 'upload_date': '20140605',
570 'license': 'Standard YouTube License',
571 'age_limit': 18,
572 },
573 },
574 # Age-gate video with encrypted signature
575 {
576 'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
577 'info_dict': {
578 'id': '6kLq3WMV1nU',
579 'ext': 'mp4',
580 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
581 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
582 'uploader': 'LloydVEVO',
583 'uploader_id': 'LloydVEVO',
584 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
585 'upload_date': '20110629',
586 'license': 'Standard YouTube License',
587 'age_limit': 18,
588 },
589 },
590 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
591 {
592 'url': '__2ABJjxzNo',
593 'info_dict': {
594 'id': '__2ABJjxzNo',
595 'ext': 'mp4',
596 'upload_date': '20100430',
597 'uploader_id': 'deadmau5',
598 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/deadmau5',
599 'creator': 'deadmau5',
600 'description': 'md5:12c56784b8032162bb936a5f76d55360',
601 'uploader': 'deadmau5',
602 'license': 'Standard YouTube License',
603 'title': 'Deadmau5 - Some Chords (HD)',
604 'alt_title': 'Some Chords',
605 },
606 'expected_warnings': [
607 'DASH manifest missing',
608 ]
609 },
610 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
611 {
612 'url': 'lqQg6PlCWgI',
613 'info_dict': {
614 'id': 'lqQg6PlCWgI',
615 'ext': 'mp4',
616 'upload_date': '20150827',
617 'uploader_id': 'olympic',
618 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/olympic',
619 'license': 'Standard YouTube License',
620 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
621 'uploader': 'Olympics',
622 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
623 },
624 'params': {
625 'skip_download': 'requires avconv',
626 }
627 },
628 # Non-square pixels
629 {
630 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
631 'info_dict': {
632 'id': '_b-2C3KPAM0',
633 'ext': 'mp4',
634 'stretched_ratio': 16 / 9.,
635 'upload_date': '20110310',
636 'uploader_id': 'AllenMeow',
637 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
638 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
639 'uploader': '孫艾倫',
640 'license': 'Standard YouTube License',
641 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
642 },
643 },
644 # url_encoded_fmt_stream_map is empty string
645 {
646 'url': 'qEJwOuvDf7I',
647 'info_dict': {
648 'id': 'qEJwOuvDf7I',
649 'ext': 'webm',
650 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
651 'description': '',
652 'upload_date': '20150404',
653 'uploader_id': 'spbelect',
654 'uploader': 'Наблюдатели Петербурга',
655 },
656 'params': {
657 'skip_download': 'requires avconv',
658 },
659 'skip': 'This live event has ended.',
660 },
661 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
662 {
663 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
664 'info_dict': {
665 'id': 'FIl7x6_3R5Y',
666 'ext': 'mp4',
667 'title': 'md5:7b81415841e02ecd4313668cde88737a',
668 'description': 'md5:116377fd2963b81ec4ce64b542173306',
669 'upload_date': '20150625',
670 'uploader_id': 'dorappi2000',
671 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
672 'uploader': 'dorappi2000',
673 'license': 'Standard YouTube License',
674 'formats': 'mincount:33',
675 },
676 },
677 # DASH manifest with segment_list
678 {
679 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
680 'md5': '8ce563a1d667b599d21064e982ab9e31',
681 'info_dict': {
682 'id': 'CsmdDsKjzN8',
683 'ext': 'mp4',
684 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
685 'uploader': 'Airtek',
686 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
687 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
688 'license': 'Standard YouTube License',
689 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
690 },
691 'params': {
692 'youtube_include_dash_manifest': True,
693 'format': '135', # bestvideo
694 }
695 },
696 {
697 # Multifeed videos (multiple cameras), URL is for Main Camera
698 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
699 'info_dict': {
700 'id': 'jqWvoWXjCVs',
701 'title': 'teamPGP: Rocket League Noob Stream',
702 'description': 'md5:dc7872fb300e143831327f1bae3af010',
703 },
704 'playlist': [{
705 'info_dict': {
706 'id': 'jqWvoWXjCVs',
707 'ext': 'mp4',
708 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
709 'description': 'md5:dc7872fb300e143831327f1bae3af010',
710 'upload_date': '20150721',
711 'uploader': 'Beer Games Beer',
712 'uploader_id': 'beergamesbeer',
713 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
714 'license': 'Standard YouTube License',
715 },
716 }, {
717 'info_dict': {
718 'id': '6h8e8xoXJzg',
719 'ext': 'mp4',
720 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
721 'description': 'md5:dc7872fb300e143831327f1bae3af010',
722 'upload_date': '20150721',
723 'uploader': 'Beer Games Beer',
724 'uploader_id': 'beergamesbeer',
725 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
726 'license': 'Standard YouTube License',
727 },
728 }, {
729 'info_dict': {
730 'id': 'PUOgX5z9xZw',
731 'ext': 'mp4',
732 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
733 'description': 'md5:dc7872fb300e143831327f1bae3af010',
734 'upload_date': '20150721',
735 'uploader': 'Beer Games Beer',
736 'uploader_id': 'beergamesbeer',
737 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
738 'license': 'Standard YouTube License',
739 },
740 }, {
741 'info_dict': {
742 'id': 'teuwxikvS5k',
743 'ext': 'mp4',
744 'title': 'teamPGP: Rocket League Noob Stream (zim)',
745 'description': 'md5:dc7872fb300e143831327f1bae3af010',
746 'upload_date': '20150721',
747 'uploader': 'Beer Games Beer',
748 'uploader_id': 'beergamesbeer',
749 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
750 'license': 'Standard YouTube License',
751 },
752 }],
753 'params': {
754 'skip_download': True,
755 },
756 },
757 {
758 # Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
759 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
760 'info_dict': {
761 'id': 'gVfLd0zydlo',
762 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
763 },
764 'playlist_count': 2,
765 },
766 {
767 'url': 'http://vid.plus/FlRa-iH7PGw',
768 'only_matching': True,
769 },
770 {
771 'url': 'http://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
772 'only_matching': True,
773 },
774 {
775 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
776 # Also tests cut-off URL expansion in video description (see
777 # https://github.com/rg3/youtube-dl/issues/1892,
778 # https://github.com/rg3/youtube-dl/issues/8164)
779 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
780 'info_dict': {
781 'id': 'lsguqyKfVQg',
782 'ext': 'mp4',
783 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
784 'alt_title': 'Dark Walk',
785 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
786 'upload_date': '20151119',
787 'uploader_id': 'IronSoulElf',
788 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
789 'uploader': 'IronSoulElf',
790 'license': 'Standard YouTube License',
791 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
792 },
793 'params': {
794 'skip_download': True,
795 },
796 },
797 {
798 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
799 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
800 'only_matching': True,
801 },
802 {
803 # Video with yt:stretch=17:0
804 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
805 'info_dict': {
806 'id': 'Q39EVAstoRM',
807 'ext': 'mp4',
808 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
809 'description': 'md5:ee18a25c350637c8faff806845bddee9',
810 'upload_date': '20151107',
811 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
812 'uploader': 'CH GAMER DROID',
813 },
814 'params': {
815 'skip_download': True,
816 },
817 },
818 {
819 # Video licensed under Creative Commons
820 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
821 'info_dict': {
822 'id': 'M4gD1WSo5mA',
823 'ext': 'mp4',
824 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
825 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
826 'upload_date': '20150127',
827 'uploader_id': 'BerkmanCenter',
828 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
829 'uploader': 'BerkmanCenter',
830 'license': 'Creative Commons Attribution license (reuse allowed)',
831 },
832 'params': {
833 'skip_download': True,
834 },
835 },
836 {
837 # Channel-like uploader_url
838 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
839 'info_dict': {
840 'id': 'eQcmzGIKrzg',
841 'ext': 'mp4',
842 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
843 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
844 'upload_date': '20151119',
845 'uploader': 'Bernie 2016',
846 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
847 'uploader_url': 're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
848 'license': 'Creative Commons Attribution license (reuse allowed)',
849 },
850 'params': {
851 'skip_download': True,
852 },
853 },
854 {
855 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
856 'only_matching': True,
857 }
858 ]
859
860 def __init__(self, *args, **kwargs):
861 super(YoutubeIE, self).__init__(*args, **kwargs)
862 self._player_cache = {}
863
864 def report_video_info_webpage_download(self, video_id):
865 """Report attempt to download video info webpage."""
866 self.to_screen('%s: Downloading video info webpage' % video_id)
867
868 def report_information_extraction(self, video_id):
869 """Report attempt to extract video information."""
870 self.to_screen('%s: Extracting video information' % video_id)
871
872 def report_unavailable_format(self, video_id, format):
873 """Report extracted video URL."""
874 self.to_screen('%s: Format %s not available' % (video_id, format))
875
876 def report_rtmp_download(self):
877 """Indicate the download will use the RTMP protocol."""
878 self.to_screen('RTMP download detected')
879
880 def _signature_cache_id(self, example_sig):
881 """ Return a string representation of a signature """
882 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
883
884 def _extract_signature_function(self, video_id, player_url, example_sig):
885 id_m = re.match(
886 r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
887 player_url)
888 if not id_m:
889 raise ExtractorError('Cannot identify player %r' % player_url)
890 player_type = id_m.group('ext')
891 player_id = id_m.group('id')
892
893 # Read from filesystem cache
894 func_id = '%s_%s_%s' % (
895 player_type, player_id, self._signature_cache_id(example_sig))
896 assert os.path.basename(func_id) == func_id
897
898 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
899 if cache_spec is not None:
900 return lambda s: ''.join(s[i] for i in cache_spec)
901
902 download_note = (
903 'Downloading player %s' % player_url
904 if self._downloader.params.get('verbose') else
905 'Downloading %s player %s' % (player_type, player_id)
906 )
907 if player_type == 'js':
908 code = self._download_webpage(
909 player_url, video_id,
910 note=download_note,
911 errnote='Download of %s failed' % player_url)
912 res = self._parse_sig_js(code)
913 elif player_type == 'swf':
914 urlh = self._request_webpage(
915 player_url, video_id,
916 note=download_note,
917 errnote='Download of %s failed' % player_url)
918 code = urlh.read()
919 res = self._parse_sig_swf(code)
920 else:
921 assert False, 'Invalid player type %r' % player_type
922
923 test_string = ''.join(map(compat_chr, range(len(example_sig))))
924 cache_res = res(test_string)
925 cache_spec = [ord(c) for c in cache_res]
926
927 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
928 return res
929
930 def _print_sig_code(self, func, example_sig):
931 def gen_sig_code(idxs):
932 def _genslice(start, end, step):
933 starts = '' if start == 0 else str(start)
934 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
935 steps = '' if step == 1 else (':%d' % step)
936 return 's[%s%s%s]' % (starts, ends, steps)
937
938 step = None
939 # Quelch pyflakes warnings - start will be set when step is set
940 start = '(Never used)'
941 for i, prev in zip(idxs[1:], idxs[:-1]):
942 if step is not None:
943 if i - prev == step:
944 continue
945 yield _genslice(start, prev, step)
946 step = None
947 continue
948 if i - prev in [-1, 1]:
949 step = i - prev
950 start = prev
951 continue
952 else:
953 yield 's[%d]' % prev
954 if step is None:
955 yield 's[%d]' % i
956 else:
957 yield _genslice(start, i, step)
958
959 test_string = ''.join(map(compat_chr, range(len(example_sig))))
960 cache_res = func(test_string)
961 cache_spec = [ord(c) for c in cache_res]
962 expr_code = ' + '.join(gen_sig_code(cache_spec))
963 signature_id_tuple = '(%s)' % (
964 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
965 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
966 ' return %s\n') % (signature_id_tuple, expr_code)
967 self.to_screen('Extracted signature function:\n' + code)
968
969 def _parse_sig_js(self, jscode):
970 funcname = self._search_regex(
971 r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
972 'Initial JS player signature function name')
973
974 jsi = JSInterpreter(jscode)
975 initial_function = jsi.extract_function(funcname)
976 return lambda s: initial_function([s])
977
978 def _parse_sig_swf(self, file_contents):
979 swfi = SWFInterpreter(file_contents)
980 TARGET_CLASSNAME = 'SignatureDecipher'
981 searched_class = swfi.extract_class(TARGET_CLASSNAME)
982 initial_function = swfi.extract_function(searched_class, 'decipher')
983 return lambda s: initial_function([s])
984
985 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
986 """Turn the encrypted s field into a working signature"""
987
988 if player_url is None:
989 raise ExtractorError('Cannot decrypt signature without player_url')
990
991 if player_url.startswith('//'):
992 player_url = 'https:' + player_url
993 try:
994 player_id = (player_url, self._signature_cache_id(s))
995 if player_id not in self._player_cache:
996 func = self._extract_signature_function(
997 video_id, player_url, s
998 )
999 self._player_cache[player_id] = func
1000 func = self._player_cache[player_id]
1001 if self._downloader.params.get('youtube_print_sig_code'):
1002 self._print_sig_code(func, s)
1003 return func(s)
1004 except Exception as e:
1005 tb = traceback.format_exc()
1006 raise ExtractorError(
1007 'Signature extraction failed: ' + tb, cause=e)
1008
1009 def _get_subtitles(self, video_id, webpage):
1010 try:
1011 subs_doc = self._download_xml(
1012 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
1013 video_id, note=False)
1014 except ExtractorError as err:
1015 self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
1016 return {}
1017
1018 sub_lang_list = {}
1019 for track in subs_doc.findall('track'):
1020 lang = track.attrib['lang_code']
1021 if lang in sub_lang_list:
1022 continue
1023 sub_formats = []
1024 for ext in self._SUBTITLE_FORMATS:
1025 params = compat_urllib_parse_urlencode({
1026 'lang': lang,
1027 'v': video_id,
1028 'fmt': ext,
1029 'name': track.attrib['name'].encode('utf-8'),
1030 })
1031 sub_formats.append({
1032 'url': 'https://www.youtube.com/api/timedtext?' + params,
1033 'ext': ext,
1034 })
1035 sub_lang_list[lang] = sub_formats
1036 if not sub_lang_list:
1037 self._downloader.report_warning('video doesn\'t have subtitles')
1038 return {}
1039 return sub_lang_list
1040
1041 def _get_ytplayer_config(self, video_id, webpage):
1042 patterns = (
1043 # User data may contain arbitrary character sequences that may affect
1044 # JSON extraction with regex, e.g. when '};' is contained the second
1045 # regex won't capture the whole JSON. Yet working around by trying more
1046 # concrete regex first keeping in mind proper quoted string handling
1047 # to be implemented in future that will replace this workaround (see
1048 # https://github.com/rg3/youtube-dl/issues/7468,
1049 # https://github.com/rg3/youtube-dl/pull/7599)
1050 r';ytplayer\.config\s*=\s*({.+?});ytplayer',
1051 r';ytplayer\.config\s*=\s*({.+?});',
1052 )
1053 config = self._search_regex(
1054 patterns, webpage, 'ytplayer.config', default=None)
1055 if config:
1056 return self._parse_json(
1057 uppercase_escape(config), video_id, fatal=False)
1058
1059 def _get_automatic_captions(self, video_id, webpage):
1060 """We need the webpage for getting the captions url, pass it as an
1061 argument to speed up the process."""
1062 self.to_screen('%s: Looking for automatic captions' % video_id)
1063 player_config = self._get_ytplayer_config(video_id, webpage)
1064 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
1065 if not player_config:
1066 self._downloader.report_warning(err_msg)
1067 return {}
1068 try:
1069 args = player_config['args']
1070 caption_url = args.get('ttsurl')
1071 if caption_url:
1072 timestamp = args['timestamp']
1073 # We get the available subtitles
1074 list_params = compat_urllib_parse_urlencode({
1075 'type': 'list',
1076 'tlangs': 1,
1077 'asrs': 1,
1078 })
1079 list_url = caption_url + '&' + list_params
1080 caption_list = self._download_xml(list_url, video_id)
1081 original_lang_node = caption_list.find('track')
1082 if original_lang_node is None:
1083 self._downloader.report_warning('Video doesn\'t have automatic captions')
1084 return {}
1085 original_lang = original_lang_node.attrib['lang_code']
1086 caption_kind = original_lang_node.attrib.get('kind', '')
1087
1088 sub_lang_list = {}
1089 for lang_node in caption_list.findall('target'):
1090 sub_lang = lang_node.attrib['lang_code']
1091 sub_formats = []
1092 for ext in self._SUBTITLE_FORMATS:
1093 params = compat_urllib_parse_urlencode({
1094 'lang': original_lang,
1095 'tlang': sub_lang,
1096 'fmt': ext,
1097 'ts': timestamp,
1098 'kind': caption_kind,
1099 })
1100 sub_formats.append({
1101 'url': caption_url + '&' + params,
1102 'ext': ext,
1103 })
1104 sub_lang_list[sub_lang] = sub_formats
1105 return sub_lang_list
1106
1107 # Some videos don't provide ttsurl but rather caption_tracks and
1108 # caption_translation_languages (e.g. 20LmZk1hakA)
1109 caption_tracks = args['caption_tracks']
1110 caption_translation_languages = args['caption_translation_languages']
1111 caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
1112 parsed_caption_url = compat_urllib_parse_urlparse(caption_url)
1113 caption_qs = compat_parse_qs(parsed_caption_url.query)
1114
1115 sub_lang_list = {}
1116 for lang in caption_translation_languages.split(','):
1117 lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
1118 sub_lang = lang_qs.get('lc', [None])[0]
1119 if not sub_lang:
1120 continue
1121 sub_formats = []
1122 for ext in self._SUBTITLE_FORMATS:
1123 caption_qs.update({
1124 'tlang': [sub_lang],
1125 'fmt': [ext],
1126 })
1127 sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
1128 query=compat_urllib_parse_urlencode(caption_qs, True)))
1129 sub_formats.append({
1130 'url': sub_url,
1131 'ext': ext,
1132 })
1133 sub_lang_list[sub_lang] = sub_formats
1134 return sub_lang_list
1135 # An extractor error can be raise by the download process if there are
1136 # no automatic captions but there are subtitles
1137 except (KeyError, ExtractorError):
1138 self._downloader.report_warning(err_msg)
1139 return {}
1140
1141 def _mark_watched(self, video_id, video_info):
1142 playback_url = video_info.get('videostats_playback_base_url', [None])[0]
1143 if not playback_url:
1144 return
1145 parsed_playback_url = compat_urlparse.urlparse(playback_url)
1146 qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1147
1148 # cpn generation algorithm is reverse engineered from base.js.
1149 # In fact it works even with dummy cpn.
1150 CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1151 cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1152
1153 qs.update({
1154 'ver': ['2'],
1155 'cpn': [cpn],
1156 })
1157 playback_url = compat_urlparse.urlunparse(
1158 parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1159
1160 self._download_webpage(
1161 playback_url, video_id, 'Marking watched',
1162 'Unable to mark watched', fatal=False)
1163
1164 @classmethod
1165 def extract_id(cls, url):
1166 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1167 if mobj is None:
1168 raise ExtractorError('Invalid URL: %s' % url)
1169 video_id = mobj.group(2)
1170 return video_id
1171
1172 def _extract_from_m3u8(self, manifest_url, video_id):
1173 url_map = {}
1174
1175 def _get_urls(_manifest):
1176 lines = _manifest.split('\n')
1177 urls = filter(lambda l: l and not l.startswith('#'),
1178 lines)
1179 return urls
1180 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
1181 formats_urls = _get_urls(manifest)
1182 for format_url in formats_urls:
1183 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
1184 url_map[itag] = format_url
1185 return url_map
1186
1187 def _extract_annotations(self, video_id):
1188 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
1189 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
1190
1191 def _real_extract(self, url):
1192 url, smuggled_data = unsmuggle_url(url, {})
1193
1194 proto = (
1195 'http' if self._downloader.params.get('prefer_insecure', False)
1196 else 'https')
1197
1198 start_time = None
1199 end_time = None
1200 parsed_url = compat_urllib_parse_urlparse(url)
1201 for component in [parsed_url.fragment, parsed_url.query]:
1202 query = compat_parse_qs(component)
1203 if start_time is None and 't' in query:
1204 start_time = parse_duration(query['t'][0])
1205 if start_time is None and 'start' in query:
1206 start_time = parse_duration(query['start'][0])
1207 if end_time is None and 'end' in query:
1208 end_time = parse_duration(query['end'][0])
1209
1210 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1211 mobj = re.search(self._NEXT_URL_RE, url)
1212 if mobj:
1213 url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1214 video_id = self.extract_id(url)
1215
1216 # Get video webpage
1217 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1218 video_webpage = self._download_webpage(url, video_id)
1219
1220 # Attempt to extract SWF player URL
1221 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1222 if mobj is not None:
1223 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1224 else:
1225 player_url = None
1226
1227 dash_mpds = []
1228
1229 def add_dash_mpd(video_info):
1230 dash_mpd = video_info.get('dashmpd')
1231 if dash_mpd and dash_mpd[0] not in dash_mpds:
1232 dash_mpds.append(dash_mpd[0])
1233
1234 # Get video info
1235 embed_webpage = None
1236 is_live = None
1237 if re.search(r'player-age-gate-content">', video_webpage) is not None:
1238 age_gate = True
1239 # We simulate the access to the video from www.youtube.com/v/{video_id}
1240 # this can be viewed without login into Youtube
1241 url = proto + '://www.youtube.com/embed/%s' % video_id
1242 embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1243 data = compat_urllib_parse_urlencode({
1244 'video_id': video_id,
1245 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1246 'sts': self._search_regex(
1247 r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1248 })
1249 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1250 video_info_webpage = self._download_webpage(
1251 video_info_url, video_id,
1252 note='Refetching age-gated info webpage',
1253 errnote='unable to download video info webpage')
1254 video_info = compat_parse_qs(video_info_webpage)
1255 add_dash_mpd(video_info)
1256 else:
1257 age_gate = False
1258 video_info = None
1259 # Try looking directly into the video webpage
1260 ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1261 if ytplayer_config:
1262 args = ytplayer_config['args']
1263 if args.get('url_encoded_fmt_stream_map'):
1264 # Convert to the same format returned by compat_parse_qs
1265 video_info = dict((k, [v]) for k, v in args.items())
1266 add_dash_mpd(video_info)
1267 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1268 is_live = True
1269 if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1270 # We also try looking in get_video_info since it may contain different dashmpd
1271 # URL that points to a DASH manifest with possibly different itag set (some itags
1272 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1273 # manifest pointed by get_video_info's dashmpd).
1274 # The general idea is to take a union of itags of both DASH manifests (for example
1275 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1276 self.report_video_info_webpage_download(video_id)
1277 for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1278 video_info_url = (
1279 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1280 % (proto, video_id, el_type))
1281 video_info_webpage = self._download_webpage(
1282 video_info_url,
1283 video_id, note=False,
1284 errnote='unable to download video info webpage')
1285 get_video_info = compat_parse_qs(video_info_webpage)
1286 if get_video_info.get('use_cipher_signature') != ['True']:
1287 add_dash_mpd(get_video_info)
1288 if not video_info:
1289 video_info = get_video_info
1290 if 'token' in get_video_info:
1291 # Different get_video_info requests may report different results, e.g.
1292 # some may report video unavailability, but some may serve it without
1293 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1294 # the original webpage as well as el=info and el=embedded get_video_info
1295 # requests report video unavailability due to geo restriction while
1296 # el=detailpage succeeds and returns valid data). This is probably
1297 # due to YouTube measures against IP ranges of hosting providers.
1298 # Working around by preferring the first succeeded video_info containing
1299 # the token if no such video_info yet was found.
1300 if 'token' not in video_info:
1301 video_info = get_video_info
1302 break
1303 if 'token' not in video_info:
1304 if 'reason' in video_info:
1305 if 'The uploader has not made this video available in your country.' in video_info['reason']:
1306 regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
1307 if regions_allowed:
1308 raise ExtractorError('YouTube said: This video is available in %s only' % (
1309 ', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
1310 expected=True)
1311 raise ExtractorError(
1312 'YouTube said: %s' % video_info['reason'][0],
1313 expected=True, video_id=video_id)
1314 else:
1315 raise ExtractorError(
1316 '"token" parameter not in video info for unknown reason',
1317 video_id=video_id)
1318
1319 # title
1320 if 'title' in video_info:
1321 video_title = video_info['title'][0]
1322 else:
1323 self._downloader.report_warning('Unable to extract video title')
1324 video_title = '_'
1325
1326 # description
1327 video_description = get_element_by_id("eow-description", video_webpage)
1328 if video_description:
1329 video_description = re.sub(r'''(?x)
1330 <a\s+
1331 (?:[a-zA-Z-]+="[^"]*"\s+)*?
1332 (?:title|href)="([^"]+)"\s+
1333 (?:[a-zA-Z-]+="[^"]*"\s+)*?
1334 class="(?:yt-uix-redirect-link|yt-uix-sessionlink[^"]*)"[^>]*>
1335 [^<]+\.{3}\s*
1336 </a>
1337 ''', r'\1', video_description)
1338 video_description = clean_html(video_description)
1339 else:
1340 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
1341 if fd_mobj:
1342 video_description = unescapeHTML(fd_mobj.group(1))
1343 else:
1344 video_description = ''
1345
1346 if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
1347 if not self._downloader.params.get('noplaylist'):
1348 entries = []
1349 feed_ids = []
1350 multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
1351 for feed in multifeed_metadata_list.split(','):
1352 # Unquote should take place before split on comma (,) since textual
1353 # fields may contain comma as well (see
1354 # https://github.com/rg3/youtube-dl/issues/8536)
1355 feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
1356 entries.append({
1357 '_type': 'url_transparent',
1358 'ie_key': 'Youtube',
1359 'url': smuggle_url(
1360 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1361 {'force_singlefeed': True}),
1362 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1363 })
1364 feed_ids.append(feed_data['id'][0])
1365 self.to_screen(
1366 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1367 % (', '.join(feed_ids), video_id))
1368 return self.playlist_result(entries, video_id, video_title, video_description)
1369 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1370
1371 if 'view_count' in video_info:
1372 view_count = int(video_info['view_count'][0])
1373 else:
1374 view_count = None
1375
1376 # Check for "rental" videos
1377 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1378 raise ExtractorError('"rental" videos not supported')
1379
1380 # Start extracting information
1381 self.report_information_extraction(video_id)
1382
1383 # uploader
1384 if 'author' not in video_info:
1385 raise ExtractorError('Unable to extract uploader name')
1386 video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
1387
1388 # uploader_id
1389 video_uploader_id = None
1390 video_uploader_url = None
1391 mobj = re.search(
1392 r'<link itemprop="url" href="(?P<uploader_url>https?://www.youtube.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
1393 video_webpage)
1394 if mobj is not None:
1395 video_uploader_id = mobj.group('uploader_id')
1396 video_uploader_url = mobj.group('uploader_url')
1397 else:
1398 self._downloader.report_warning('unable to extract uploader nickname')
1399
1400 # thumbnail image
1401 # We try first to get a high quality image:
1402 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
1403 video_webpage, re.DOTALL)
1404 if m_thumb is not None:
1405 video_thumbnail = m_thumb.group(1)
1406 elif 'thumbnail_url' not in video_info:
1407 self._downloader.report_warning('unable to extract video thumbnail')
1408 video_thumbnail = None
1409 else: # don't panic if we can't find it
1410 video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
1411
1412 # upload date
1413 upload_date = self._html_search_meta(
1414 'datePublished', video_webpage, 'upload date', default=None)
1415 if not upload_date:
1416 upload_date = self._search_regex(
1417 [r'(?s)id="eow-date.*?>(.*?)</span>',
1418 r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
1419 video_webpage, 'upload date', default=None)
1420 if upload_date:
1421 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1422 upload_date = unified_strdate(upload_date)
1423
1424 video_license = self._html_search_regex(
1425 r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
1426 video_webpage, 'license', default=None)
1427
1428 m_music = re.search(
1429 r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li',
1430 video_webpage)
1431 if m_music:
1432 video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
1433 video_creator = clean_html(m_music.group('creator'))
1434 else:
1435 video_alt_title = video_creator = None
1436
1437 m_cat_container = self._search_regex(
1438 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
1439 video_webpage, 'categories', default=None)
1440 if m_cat_container:
1441 category = self._html_search_regex(
1442 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
1443 default=None)
1444 video_categories = None if category is None else [category]
1445 else:
1446 video_categories = None
1447
1448 video_tags = [
1449 unescapeHTML(m.group('content'))
1450 for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
1451
1452 def _extract_count(count_name):
1453 return str_to_int(self._search_regex(
1454 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
1455 % re.escape(count_name),
1456 video_webpage, count_name, default=None))
1457
1458 like_count = _extract_count('like')
1459 dislike_count = _extract_count('dislike')
1460
1461 # subtitles
1462 video_subtitles = self.extract_subtitles(video_id, video_webpage)
1463 automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
1464
1465 if 'length_seconds' not in video_info:
1466 self._downloader.report_warning('unable to extract video duration')
1467 video_duration = None
1468 else:
1469 video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
1470
1471 # annotations
1472 video_annotations = None
1473 if self._downloader.params.get('writeannotations', False):
1474 video_annotations = self._extract_annotations(video_id)
1475
1476 def _map_to_format_list(urlmap):
1477 formats = []
1478 for itag, video_real_url in urlmap.items():
1479 dct = {
1480 'format_id': itag,
1481 'url': video_real_url,
1482 'player_url': player_url,
1483 }
1484 if itag in self._formats:
1485 dct.update(self._formats[itag])
1486 formats.append(dct)
1487 return formats
1488
1489 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1490 self.report_rtmp_download()
1491 formats = [{
1492 'format_id': '_rtmp',
1493 'protocol': 'rtmp',
1494 'url': video_info['conn'][0],
1495 'player_url': player_url,
1496 }]
1497 elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
1498 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1499 if 'rtmpe%3Dyes' in encoded_url_map:
1500 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
1501 formats_spec = {}
1502 fmt_list = video_info.get('fmt_list', [''])[0]
1503 if fmt_list:
1504 for fmt in fmt_list.split(','):
1505 spec = fmt.split('/')
1506 if len(spec) > 1:
1507 width_height = spec[1].split('x')
1508 if len(width_height) == 2:
1509 formats_spec[spec[0]] = {
1510 'resolution': spec[1],
1511 'width': int_or_none(width_height[0]),
1512 'height': int_or_none(width_height[1]),
1513 }
1514 formats = []
1515 for url_data_str in encoded_url_map.split(','):
1516 url_data = compat_parse_qs(url_data_str)
1517 if 'itag' not in url_data or 'url' not in url_data:
1518 continue
1519 format_id = url_data['itag'][0]
1520 url = url_data['url'][0]
1521
1522 if 'sig' in url_data:
1523 url += '&signature=' + url_data['sig'][0]
1524 elif 's' in url_data:
1525 encrypted_sig = url_data['s'][0]
1526 ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
1527
1528 jsplayer_url_json = self._search_regex(
1529 ASSETS_RE,
1530 embed_webpage if age_gate else video_webpage,
1531 'JS player URL (1)', default=None)
1532 if not jsplayer_url_json and not age_gate:
1533 # We need the embed website after all
1534 if embed_webpage is None:
1535 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
1536 embed_webpage = self._download_webpage(
1537 embed_url, video_id, 'Downloading embed webpage')
1538 jsplayer_url_json = self._search_regex(
1539 ASSETS_RE, embed_webpage, 'JS player URL')
1540
1541 player_url = json.loads(jsplayer_url_json)
1542 if player_url is None:
1543 player_url_json = self._search_regex(
1544 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
1545 video_webpage, 'age gate player URL')
1546 player_url = json.loads(player_url_json)
1547
1548 if self._downloader.params.get('verbose'):
1549 if player_url is None:
1550 player_version = 'unknown'
1551 player_desc = 'unknown'
1552 else:
1553 if player_url.endswith('swf'):
1554 player_version = self._search_regex(
1555 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
1556 'flash player', fatal=False)
1557 player_desc = 'flash player %s' % player_version
1558 else:
1559 player_version = self._search_regex(
1560 [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
1561 player_url,
1562 'html5 player', fatal=False)
1563 player_desc = 'html5 player %s' % player_version
1564
1565 parts_sizes = self._signature_cache_id(encrypted_sig)
1566 self.to_screen('{%s} signature length %s, %s' %
1567 (format_id, parts_sizes, player_desc))
1568
1569 signature = self._decrypt_signature(
1570 encrypted_sig, video_id, player_url, age_gate)
1571 url += '&signature=' + signature
1572 if 'ratebypass' not in url:
1573 url += '&ratebypass=yes'
1574
1575 dct = {
1576 'format_id': format_id,
1577 'url': url,
1578 'player_url': player_url,
1579 }
1580 if format_id in self._formats:
1581 dct.update(self._formats[format_id])
1582 if format_id in formats_spec:
1583 dct.update(formats_spec[format_id])
1584
1585 # Some itags are not included in DASH manifest thus corresponding formats will
1586 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1587 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1588 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
1589 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
1590
1591 more_fields = {
1592 'filesize': int_or_none(url_data.get('clen', [None])[0]),
1593 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
1594 'width': width,
1595 'height': height,
1596 'fps': int_or_none(url_data.get('fps', [None])[0]),
1597 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
1598 }
1599 for key, value in more_fields.items():
1600 if value:
1601 dct[key] = value
1602 type_ = url_data.get('type', [None])[0]
1603 if type_:
1604 type_split = type_.split(';')
1605 kind_ext = type_split[0].split('/')
1606 if len(kind_ext) == 2:
1607 kind, _ = kind_ext
1608 dct['ext'] = mimetype2ext(type_split[0])
1609 if kind in ('audio', 'video'):
1610 codecs = None
1611 for mobj in re.finditer(
1612 r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
1613 if mobj.group('key') == 'codecs':
1614 codecs = mobj.group('val')
1615 break
1616 if codecs:
1617 codecs = codecs.split(',')
1618 if len(codecs) == 2:
1619 acodec, vcodec = codecs[1], codecs[0]
1620 else:
1621 acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
1622 dct.update({
1623 'acodec': acodec,
1624 'vcodec': vcodec,
1625 })
1626 formats.append(dct)
1627 elif video_info.get('hlsvp'):
1628 manifest_url = video_info['hlsvp'][0]
1629 url_map = self._extract_from_m3u8(manifest_url, video_id)
1630 formats = _map_to_format_list(url_map)
1631 # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
1632 for a_format in formats:
1633 a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
1634 else:
1635 unavailable_message = self._html_search_regex(
1636 r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
1637 video_webpage, 'unavailable message', default=None)
1638 if unavailable_message:
1639 raise ExtractorError(unavailable_message, expected=True)
1640 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
1641
1642 # Look for the DASH manifest
1643 if self._downloader.params.get('youtube_include_dash_manifest', True):
1644 dash_mpd_fatal = True
1645 for mpd_url in dash_mpds:
1646 dash_formats = {}
1647 try:
1648 def decrypt_sig(mobj):
1649 s = mobj.group(1)
1650 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
1651 return '/signature/%s' % dec_s
1652
1653 mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
1654
1655 for df in self._extract_mpd_formats(
1656 mpd_url, video_id, fatal=dash_mpd_fatal,
1657 formats_dict=self._formats):
1658 # Do not overwrite DASH format found in some previous DASH manifest
1659 if df['format_id'] not in dash_formats:
1660 dash_formats[df['format_id']] = df
1661 # Additional DASH manifests may end up in HTTP Error 403 therefore
1662 # allow them to fail without bug report message if we already have
1663 # some DASH manifest succeeded. This is temporary workaround to reduce
1664 # burst of bug reports until we figure out the reason and whether it
1665 # can be fixed at all.
1666 dash_mpd_fatal = False
1667 except (ExtractorError, KeyError) as e:
1668 self.report_warning(
1669 'Skipping DASH manifest: %r' % e, video_id)
1670 if dash_formats:
1671 # Remove the formats we found through non-DASH, they
1672 # contain less info and it can be wrong, because we use
1673 # fixed values (for example the resolution). See
1674 # https://github.com/rg3/youtube-dl/issues/5774 for an
1675 # example.
1676 formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
1677 formats.extend(dash_formats.values())
1678
1679 # Check for malformed aspect ratio
1680 stretched_m = re.search(
1681 r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
1682 video_webpage)
1683 if stretched_m:
1684 w = float(stretched_m.group('w'))
1685 h = float(stretched_m.group('h'))
1686 # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
1687 # We will only process correct ratios.
1688 if w > 0 and h > 0:
1689 ratio = w / h
1690 for f in formats:
1691 if f.get('vcodec') != 'none':
1692 f['stretched_ratio'] = ratio
1693
1694 self._sort_formats(formats)
1695
1696 self.mark_watched(video_id, video_info)
1697
1698 return {
1699 'id': video_id,
1700 'uploader': video_uploader,
1701 'uploader_id': video_uploader_id,
1702 'uploader_url': video_uploader_url,
1703 'upload_date': upload_date,
1704 'license': video_license,
1705 'creator': video_creator,
1706 'title': video_title,
1707 'alt_title': video_alt_title,
1708 'thumbnail': video_thumbnail,
1709 'description': video_description,
1710 'categories': video_categories,
1711 'tags': video_tags,
1712 'subtitles': video_subtitles,
1713 'automatic_captions': automatic_captions,
1714 'duration': video_duration,
1715 'age_limit': 18 if age_gate else 0,
1716 'annotations': video_annotations,
1717 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1718 'view_count': view_count,
1719 'like_count': like_count,
1720 'dislike_count': dislike_count,
1721 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
1722 'formats': formats,
1723 'is_live': is_live,
1724 'start_time': start_time,
1725 'end_time': end_time,
1726 }
1727
1728
1729 class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
1730 IE_DESC = 'YouTube.com playlists'
1731 _VALID_URL = r"""(?x)(?:
1732 (?:https?://)?
1733 (?:\w+\.)?
1734 youtube\.com/
1735 (?:
1736 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
1737 \? (?:.*?[&;])*? (?:p|a|list)=
1738 | p/
1739 )
1740 (
1741 (?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
1742 # Top tracks, they can also include dots
1743 |(?:MC)[\w\.]*
1744 )
1745 .*
1746 |
1747 ((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
1748 )"""
1749 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
1750 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
1751 IE_NAME = 'youtube:playlist'
1752 _TESTS = [{
1753 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1754 'info_dict': {
1755 'title': 'ytdl test PL',
1756 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1757 },
1758 'playlist_count': 3,
1759 }, {
1760 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1761 'info_dict': {
1762 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1763 'title': 'YDL_Empty_List',
1764 },
1765 'playlist_count': 0,
1766 }, {
1767 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1768 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1769 'info_dict': {
1770 'title': '29C3: Not my department',
1771 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1772 },
1773 'playlist_count': 95,
1774 }, {
1775 'note': 'issue #673',
1776 'url': 'PLBB231211A4F62143',
1777 'info_dict': {
1778 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1779 'id': 'PLBB231211A4F62143',
1780 },
1781 'playlist_mincount': 26,
1782 }, {
1783 'note': 'Large playlist',
1784 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1785 'info_dict': {
1786 'title': 'Uploads from Cauchemar',
1787 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
1788 },
1789 'playlist_mincount': 799,
1790 }, {
1791 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1792 'info_dict': {
1793 'title': 'YDL_safe_search',
1794 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1795 },
1796 'playlist_count': 2,
1797 }, {
1798 'note': 'embedded',
1799 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1800 'playlist_count': 4,
1801 'info_dict': {
1802 'title': 'JODA15',
1803 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1804 }
1805 }, {
1806 'note': 'Embedded SWF player',
1807 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1808 'playlist_count': 4,
1809 'info_dict': {
1810 'title': 'JODA7',
1811 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
1812 }
1813 }, {
1814 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
1815 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
1816 'info_dict': {
1817 'title': 'Uploads from Interstellar Movie',
1818 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
1819 },
1820 'playlist_mincout': 21,
1821 }]
1822
1823 def _real_initialize(self):
1824 self._login()
1825
1826 def _extract_mix(self, playlist_id):
1827 # The mixes are generated from a single video
1828 # the id of the playlist is just 'RD' + video_id
1829 ids = []
1830 last_id = playlist_id[-11:]
1831 for n in itertools.count(1):
1832 url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
1833 webpage = self._download_webpage(
1834 url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
1835 new_ids = orderedSet(re.findall(
1836 r'''(?xs)data-video-username=".*?".*?
1837 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
1838 webpage))
1839 # Fetch new pages until all the videos are repeated, it seems that
1840 # there are always 51 unique videos.
1841 new_ids = [_id for _id in new_ids if _id not in ids]
1842 if not new_ids:
1843 break
1844 ids.extend(new_ids)
1845 last_id = ids[-1]
1846
1847 url_results = self._ids_to_results(ids)
1848
1849 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
1850 title_span = (
1851 search_title('playlist-title') or
1852 search_title('title long-title') or
1853 search_title('title'))
1854 title = clean_html(title_span)
1855
1856 return self.playlist_result(url_results, playlist_id, title)
1857
1858 def _extract_playlist(self, playlist_id):
1859 url = self._TEMPLATE_URL % playlist_id
1860 page = self._download_webpage(url, playlist_id)
1861
1862 for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
1863 match = match.strip()
1864 # Check if the playlist exists or is private
1865 if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
1866 raise ExtractorError(
1867 'The playlist doesn\'t exist or is private, use --username or '
1868 '--netrc to access it.',
1869 expected=True)
1870 elif re.match(r'[^<]*Invalid parameters[^<]*', match):
1871 raise ExtractorError(
1872 'Invalid parameters. Maybe URL is incorrect.',
1873 expected=True)
1874 elif re.match(r'[^<]*Choose your language[^<]*', match):
1875 continue
1876 else:
1877 self.report_warning('Youtube gives an alert message: ' + match)
1878
1879 playlist_title = self._html_search_regex(
1880 r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
1881 page, 'title')
1882
1883 return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
1884
1885 def _check_download_just_video(self, url, playlist_id):
1886 # Check if it's a video-specific URL
1887 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
1888 if 'v' in query_dict:
1889 video_id = query_dict['v'][0]
1890 if self._downloader.params.get('noplaylist'):
1891 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1892 return self.url_result(video_id, 'Youtube', video_id=video_id)
1893 else:
1894 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
1895
1896 def _real_extract(self, url):
1897 # Extract playlist id
1898 mobj = re.match(self._VALID_URL, url)
1899 if mobj is None:
1900 raise ExtractorError('Invalid URL: %s' % url)
1901 playlist_id = mobj.group(1) or mobj.group(2)
1902
1903 video = self._check_download_just_video(url, playlist_id)
1904 if video:
1905 return video
1906
1907 if playlist_id.startswith(('RD', 'UL', 'PU')):
1908 # Mixes require a custom extraction process
1909 return self._extract_mix(playlist_id)
1910
1911 return self._extract_playlist(playlist_id)
1912
1913
1914 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
1915 IE_DESC = 'YouTube.com channels'
1916 _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
1917 _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
1918 _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
1919 IE_NAME = 'youtube:channel'
1920 _TESTS = [{
1921 'note': 'paginated channel',
1922 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1923 'playlist_mincount': 91,
1924 'info_dict': {
1925 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
1926 'title': 'Uploads from lex will',
1927 }
1928 }, {
1929 'note': 'Age restricted channel',
1930 # from https://www.youtube.com/user/DeusExOfficial
1931 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
1932 'playlist_mincount': 64,
1933 'info_dict': {
1934 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
1935 'title': 'Uploads from Deus Ex',
1936 },
1937 }]
1938
1939 @classmethod
1940 def suitable(cls, url):
1941 return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
1942 else super(YoutubeChannelIE, cls).suitable(url))
1943
1944 def _real_extract(self, url):
1945 channel_id = self._match_id(url)
1946
1947 url = self._TEMPLATE_URL % channel_id
1948
1949 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
1950 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
1951 # otherwise fallback on channel by page extraction
1952 channel_page = self._download_webpage(
1953 url + '?view=57', channel_id,
1954 'Downloading channel page', fatal=False)
1955 if channel_page is False:
1956 channel_playlist_id = False
1957 else:
1958 channel_playlist_id = self._html_search_meta(
1959 'channelId', channel_page, 'channel id', default=None)
1960 if not channel_playlist_id:
1961 channel_playlist_id = self._search_regex(
1962 r'data-(?:channel-external-|yt)id="([^"]+)"',
1963 channel_page, 'channel id', default=None)
1964 if channel_playlist_id and channel_playlist_id.startswith('UC'):
1965 playlist_id = 'UU' + channel_playlist_id[2:]
1966 return self.url_result(
1967 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
1968
1969 channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
1970 autogenerated = re.search(r'''(?x)
1971 class="[^"]*?(?:
1972 channel-header-autogenerated-label|
1973 yt-channel-title-autogenerated
1974 )[^"]*"''', channel_page) is not None
1975
1976 if autogenerated:
1977 # The videos are contained in a single page
1978 # the ajax pages can't be used, they are empty
1979 entries = [
1980 self.url_result(
1981 video_id, 'Youtube', video_id=video_id,
1982 video_title=video_title)
1983 for video_id, video_title in self.extract_videos_from_page(channel_page)]
1984 return self.playlist_result(entries, channel_id)
1985
1986 return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
1987
1988
1989 class YoutubeUserIE(YoutubeChannelIE):
1990 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
1991 _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:user/|c/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
1992 _TEMPLATE_URL = 'https://www.youtube.com/user/%s/videos'
1993 IE_NAME = 'youtube:user'
1994
1995 _TESTS = [{
1996 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
1997 'playlist_mincount': 320,
1998 'info_dict': {
1999 'title': 'TheLinuxFoundation',
2000 }
2001 }, {
2002 'url': 'ytuser:phihag',
2003 'only_matching': True,
2004 }, {
2005 'url': 'https://www.youtube.com/c/gametrailers',
2006 'only_matching': True,
2007 }]
2008
2009 @classmethod
2010 def suitable(cls, url):
2011 # Don't return True if the url can be extracted with other youtube
2012 # extractor, the regex would is too permissive and it would match.
2013 other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
2014 if any(ie.suitable(url) for ie in other_yt_ies):
2015 return False
2016 else:
2017 return super(YoutubeUserIE, cls).suitable(url)
2018
2019
2020 class YoutubeLiveIE(YoutubeBaseInfoExtractor):
2021 IE_DESC = 'YouTube.com live streams'
2022 _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+))/live'
2023 IE_NAME = 'youtube:live'
2024
2025 _TESTS = [{
2026 'url': 'http://www.youtube.com/user/TheYoungTurks/live',
2027 'info_dict': {
2028 'id': 'a48o2S1cPoo',
2029 'ext': 'mp4',
2030 'title': 'The Young Turks - Live Main Show',
2031 'uploader': 'The Young Turks',
2032 'uploader_id': 'TheYoungTurks',
2033 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
2034 'upload_date': '20150715',
2035 'license': 'Standard YouTube License',
2036 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
2037 'categories': ['News & Politics'],
2038 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
2039 'like_count': int,
2040 'dislike_count': int,
2041 },
2042 'params': {
2043 'skip_download': True,
2044 },
2045 }, {
2046 'url': 'http://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
2047 'only_matching': True,
2048 }]
2049
2050 def _real_extract(self, url):
2051 mobj = re.match(self._VALID_URL, url)
2052 channel_id = mobj.group('id')
2053 base_url = mobj.group('base_url')
2054 webpage = self._download_webpage(url, channel_id, fatal=False)
2055 if webpage:
2056 page_type = self._og_search_property(
2057 'type', webpage, 'page type', default=None)
2058 video_id = self._html_search_meta(
2059 'videoId', webpage, 'video id', default=None)
2060 if page_type == 'video' and video_id and re.match(r'^[0-9A-Za-z_-]{11}$', video_id):
2061 return self.url_result(video_id, YoutubeIE.ie_key())
2062 return self.url_result(base_url)
2063
2064
2065 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
2066 IE_DESC = 'YouTube.com user/channel playlists'
2067 _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
2068 IE_NAME = 'youtube:playlists'
2069
2070 _TESTS = [{
2071 'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
2072 'playlist_mincount': 4,
2073 'info_dict': {
2074 'id': 'ThirstForScience',
2075 'title': 'Thirst for Science',
2076 },
2077 }, {
2078 # with "Load more" button
2079 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
2080 'playlist_mincount': 70,
2081 'info_dict': {
2082 'id': 'igorkle1',
2083 'title': 'Игорь Клейнер',
2084 },
2085 }, {
2086 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
2087 'playlist_mincount': 17,
2088 'info_dict': {
2089 'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
2090 'title': 'Chem Player',
2091 },
2092 }]
2093
2094
2095 class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
2096 IE_DESC = 'YouTube.com searches'
2097 # there doesn't appear to be a real limit, for example if you search for
2098 # 'python' you get more than 8.000.000 results
2099 _MAX_RESULTS = float('inf')
2100 IE_NAME = 'youtube:search'
2101 _SEARCH_KEY = 'ytsearch'
2102 _EXTRA_QUERY_ARGS = {}
2103 _TESTS = []
2104
2105 def _get_n_results(self, query, n):
2106 """Get a specified number of results for a query"""
2107
2108 videos = []
2109 limit = n
2110
2111 for pagenum in itertools.count(1):
2112 url_query = {
2113 'search_query': query.encode('utf-8'),
2114 'page': pagenum,
2115 'spf': 'navigate',
2116 }
2117 url_query.update(self._EXTRA_QUERY_ARGS)
2118 result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
2119 data = self._download_json(
2120 result_url, video_id='query "%s"' % query,
2121 note='Downloading page %s' % pagenum,
2122 errnote='Unable to download API page')
2123 html_content = data[1]['body']['content']
2124
2125 if 'class="search-message' in html_content:
2126 raise ExtractorError(
2127 '[youtube] No video results', expected=True)
2128
2129 new_videos = self._ids_to_results(orderedSet(re.findall(
2130 r'href="/watch\?v=(.{11})', html_content)))
2131 videos += new_videos
2132 if not new_videos or len(videos) > limit:
2133 break
2134
2135 if len(videos) > n:
2136 videos = videos[:n]
2137 return self.playlist_result(videos, query)
2138
2139
2140 class YoutubeSearchDateIE(YoutubeSearchIE):
2141 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
2142 _SEARCH_KEY = 'ytsearchdate'
2143 IE_DESC = 'YouTube.com searches, newest videos first'
2144 _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
2145
2146
2147 class YoutubeSearchURLIE(YoutubePlaylistBaseInfoExtractor):
2148 IE_DESC = 'YouTube.com search URLs'
2149 IE_NAME = 'youtube:search_url'
2150 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
2151 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
2152 _TESTS = [{
2153 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
2154 'playlist_mincount': 5,
2155 'info_dict': {
2156 'title': 'youtube-dl test video',
2157 }
2158 }, {
2159 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
2160 'only_matching': True,
2161 }]
2162
2163 def _real_extract(self, url):
2164 mobj = re.match(self._VALID_URL, url)
2165 query = compat_urllib_parse_unquote_plus(mobj.group('query'))
2166 webpage = self._download_webpage(url, query)
2167 return self.playlist_result(self._process_page(webpage), playlist_title=query)
2168
2169
2170 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
2171 IE_DESC = 'YouTube.com (multi-season) shows'
2172 _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
2173 IE_NAME = 'youtube:show'
2174 _TESTS = [{
2175 'url': 'https://www.youtube.com/show/airdisasters',
2176 'playlist_mincount': 5,
2177 'info_dict': {
2178 'id': 'airdisasters',
2179 'title': 'Air Disasters',
2180 }
2181 }]
2182
2183 def _real_extract(self, url):
2184 playlist_id = self._match_id(url)
2185 return super(YoutubeShowIE, self)._real_extract(
2186 'https://www.youtube.com/show/%s/playlists' % playlist_id)
2187
2188
2189 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
2190 """
2191 Base class for feed extractors
2192 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
2193 """
2194 _LOGIN_REQUIRED = True
2195
2196 @property
2197 def IE_NAME(self):
2198 return 'youtube:%s' % self._FEED_NAME
2199
2200 def _real_initialize(self):
2201 self._login()
2202
2203 def _real_extract(self, url):
2204 page = self._download_webpage(
2205 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
2206
2207 # The extraction process is the same as for playlists, but the regex
2208 # for the video ids doesn't contain an index
2209 ids = []
2210 more_widget_html = content_html = page
2211 for page_num in itertools.count(1):
2212 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
2213
2214 # 'recommended' feed has infinite 'load more' and each new portion spins
2215 # the same videos in (sometimes) slightly different order, so we'll check
2216 # for unicity and break when portion has no new videos
2217 new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
2218 if not new_ids:
2219 break
2220
2221 ids.extend(new_ids)
2222
2223 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
2224 if not mobj:
2225 break
2226
2227 more = self._download_json(
2228 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
2229 'Downloading page #%s' % page_num,
2230 transform_source=uppercase_escape)
2231 content_html = more['content_html']
2232 more_widget_html = more['load_more_widget_html']
2233
2234 return self.playlist_result(
2235 self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
2236
2237
2238 class YoutubeWatchLaterIE(YoutubePlaylistIE):
2239 IE_NAME = 'youtube:watchlater'
2240 IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
2241 _VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
2242
2243 _TESTS = [{
2244 'url': 'https://www.youtube.com/playlist?list=WL',
2245 'only_matching': True,
2246 }, {
2247 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
2248 'only_matching': True,
2249 }]
2250
2251 def _real_extract(self, url):
2252 video = self._check_download_just_video(url, 'WL')
2253 if video:
2254 return video
2255 return self._extract_playlist('WL')
2256
2257
2258 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
2259 IE_NAME = 'youtube:favorites'
2260 IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
2261 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2262 _LOGIN_REQUIRED = True
2263
2264 def _real_extract(self, url):
2265 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2266 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
2267 return self.url_result(playlist_id, 'YoutubePlaylist')
2268
2269
2270 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
2271 IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2272 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2273 _FEED_NAME = 'recommended'
2274 _PLAYLIST_TITLE = 'Youtube Recommended videos'
2275
2276
2277 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
2278 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2279 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2280 _FEED_NAME = 'subscriptions'
2281 _PLAYLIST_TITLE = 'Youtube Subscriptions'
2282
2283
2284 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
2285 IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
2286 _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
2287 _FEED_NAME = 'history'
2288 _PLAYLIST_TITLE = 'Youtube History'
2289
2290
2291 class YoutubeTruncatedURLIE(InfoExtractor):
2292 IE_NAME = 'youtube:truncated_url'
2293 IE_DESC = False # Do not list
2294 _VALID_URL = r'''(?x)
2295 (?:https?://)?
2296 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2297 (?:watch\?(?:
2298 feature=[a-z_]+|
2299 annotation_id=annotation_[^&]+|
2300 x-yt-cl=[0-9]+|
2301 hl=[^&]*|
2302 t=[0-9]+
2303 )?
2304 |
2305 attribution_link\?a=[^&]+
2306 )
2307 $
2308 '''
2309
2310 _TESTS = [{
2311 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
2312 'only_matching': True,
2313 }, {
2314 'url': 'http://www.youtube.com/watch?',
2315 'only_matching': True,
2316 }, {
2317 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2318 'only_matching': True,
2319 }, {
2320 'url': 'https://www.youtube.com/watch?feature=foo',
2321 'only_matching': True,
2322 }, {
2323 'url': 'https://www.youtube.com/watch?hl=en-GB',
2324 'only_matching': True,
2325 }, {
2326 'url': 'https://www.youtube.com/watch?t=2372',
2327 'only_matching': True,
2328 }]
2329
2330 def _real_extract(self, url):
2331 raise ExtractorError(
2332 'Did you forget to quote the URL? Remember that & is a meta '
2333 'character in most shells, so you want to put the URL in quotes, '
2334 'like youtube-dl '
2335 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2336 ' or simply youtube-dl BaW_jenozKc .',
2337 expected=True)
2338
2339
2340 class YoutubeTruncatedIDIE(InfoExtractor):
2341 IE_NAME = 'youtube:truncated_id'
2342 IE_DESC = False # Do not list
2343 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2344
2345 _TESTS = [{
2346 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2347 'only_matching': True,
2348 }]
2349
2350 def _real_extract(self, url):
2351 video_id = self._match_id(url)
2352 raise ExtractorError(
2353 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
2354 expected=True)