]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/youtube.py
[youtube] Move more tests to extractors
[yt-dlp.git] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import re
10 import traceback
11
12 from .common import InfoExtractor, SearchInfoExtractor
13 from .subtitles import SubtitlesInfoExtractor
14 from ..jsinterp import JSInterpreter
15 from ..swfinterp import SWFInterpreter
16 from ..utils import (
17 compat_chr,
18 compat_parse_qs,
19 compat_urllib_parse,
20 compat_urllib_request,
21 compat_urlparse,
22 compat_str,
23
24 clean_html,
25 get_element_by_id,
26 get_element_by_attribute,
27 ExtractorError,
28 int_or_none,
29 PagedList,
30 unescapeHTML,
31 unified_strdate,
32 orderedSet,
33 uppercase_escape,
34 )
35
36 class YoutubeBaseInfoExtractor(InfoExtractor):
37 """Provide base functions for Youtube extractors"""
38 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
39 _TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
40 _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
41 _AGE_URL = 'https://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
42 _NETRC_MACHINE = 'youtube'
43 # If True it will raise an error if no login info is provided
44 _LOGIN_REQUIRED = False
45
46 def _set_language(self):
47 return bool(self._download_webpage(
48 self._LANG_URL, None,
49 note='Setting language', errnote='unable to set language',
50 fatal=False))
51
52 def _login(self):
53 """
54 Attempt to log in to YouTube.
55 True is returned if successful or skipped.
56 False is returned if login failed.
57
58 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
59 """
60 (username, password) = self._get_login_info()
61 # No authentication to be performed
62 if username is None:
63 if self._LOGIN_REQUIRED:
64 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
65 return True
66
67 login_page = self._download_webpage(
68 self._LOGIN_URL, None,
69 note='Downloading login page',
70 errnote='unable to fetch login page', fatal=False)
71 if login_page is False:
72 return
73
74 galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
75 login_page, 'Login GALX parameter')
76
77 # Log in
78 login_form_strs = {
79 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
80 'Email': username,
81 'GALX': galx,
82 'Passwd': password,
83
84 'PersistentCookie': 'yes',
85 '_utf8': '霱',
86 'bgresponse': 'js_disabled',
87 'checkConnection': '',
88 'checkedDomains': 'youtube',
89 'dnConn': '',
90 'pstMsg': '0',
91 'rmShown': '1',
92 'secTok': '',
93 'signIn': 'Sign in',
94 'timeStmp': '',
95 'service': 'youtube',
96 'uilel': '3',
97 'hl': 'en_US',
98 }
99
100 # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
101 # chokes on unicode
102 login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
103 login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
104
105 req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
106 login_results = self._download_webpage(
107 req, None,
108 note='Logging in', errnote='unable to log in', fatal=False)
109 if login_results is False:
110 return False
111
112 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
113 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
114
115 # Two-Factor
116 # TODO add SMS and phone call support - these require making a request and then prompting the user
117
118 if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None:
119 tfa_code = self._get_tfa_info()
120
121 if tfa_code is None:
122 self._downloader.report_warning('Two-factor authentication required. Provide it with --twofactor <code>')
123 self._downloader.report_warning('(Note that only TOTP (Google Authenticator App) codes work at this time.)')
124 return False
125
126 # Unlike the first login form, secTok and timeStmp are both required for the TFA form
127
128 match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
129 if match is None:
130 self._downloader.report_warning('Failed to get secTok - did the page structure change?')
131 secTok = match.group(1)
132 match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
133 if match is None:
134 self._downloader.report_warning('Failed to get timeStmp - did the page structure change?')
135 timeStmp = match.group(1)
136
137 tfa_form_strs = {
138 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
139 'smsToken': '',
140 'smsUserPin': tfa_code,
141 'smsVerifyPin': 'Verify',
142
143 'PersistentCookie': 'yes',
144 'checkConnection': '',
145 'checkedDomains': 'youtube',
146 'pstMsg': '1',
147 'secTok': secTok,
148 'timeStmp': timeStmp,
149 'service': 'youtube',
150 'hl': 'en_US',
151 }
152 tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in tfa_form_strs.items())
153 tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
154
155 tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
156 tfa_results = self._download_webpage(
157 tfa_req, None,
158 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
159
160 if tfa_results is False:
161 return False
162
163 if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None:
164 self._downloader.report_warning('Two-factor code expired. Please try again, or use a one-use backup code instead.')
165 return False
166 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
167 self._downloader.report_warning('unable to log in - did the page structure change?')
168 return False
169 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
170 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
171 return False
172
173 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
174 self._downloader.report_warning('unable to log in: bad username or password')
175 return False
176 return True
177
178 def _confirm_age(self):
179 age_form = {
180 'next_url': '/',
181 'action_confirm': 'Confirm',
182 }
183 req = compat_urllib_request.Request(self._AGE_URL,
184 compat_urllib_parse.urlencode(age_form).encode('ascii'))
185
186 self._download_webpage(
187 req, None,
188 note='Confirming age', errnote='Unable to confirm age')
189 return True
190
191 def _real_initialize(self):
192 if self._downloader is None:
193 return
194 if not self._set_language():
195 return
196 if not self._login():
197 return
198 self._confirm_age()
199
200
201 class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
202 IE_DESC = 'YouTube.com'
203 _VALID_URL = r"""(?x)^
204 (
205 (?:https?://|//) # http(s):// or protocol-independent URL
206 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
207 (?:www\.)?deturl\.com/www\.youtube\.com/|
208 (?:www\.)?pwnyoutube\.com/|
209 (?:www\.)?yourepeat\.com/|
210 tube\.majestyc\.net/|
211 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
212 (?:.*?\#/)? # handle anchor (#/) redirect urls
213 (?: # the various things that can precede the ID:
214 (?:(?:v|embed|e)/) # v/ or embed/ or e/
215 |(?: # or the v= param in all its forms
216 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
217 (?:\?|\#!?) # the params delimiter ? or # or #!
218 (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
219 v=
220 )
221 ))
222 |youtu\.be/ # just youtu.be/xxxx
223 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
224 )
225 )? # all until now is optional -> you can pass the naked ID
226 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
227 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
228 (?(1).+)? # if we found the ID, everything can follow
229 $"""
230 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
231 _formats = {
232 '5': {'ext': 'flv', 'width': 400, 'height': 240},
233 '6': {'ext': 'flv', 'width': 450, 'height': 270},
234 '13': {'ext': '3gp'},
235 '17': {'ext': '3gp', 'width': 176, 'height': 144},
236 '18': {'ext': 'mp4', 'width': 640, 'height': 360},
237 '22': {'ext': 'mp4', 'width': 1280, 'height': 720},
238 '34': {'ext': 'flv', 'width': 640, 'height': 360},
239 '35': {'ext': 'flv', 'width': 854, 'height': 480},
240 '36': {'ext': '3gp', 'width': 320, 'height': 240},
241 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
242 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
243 '43': {'ext': 'webm', 'width': 640, 'height': 360},
244 '44': {'ext': 'webm', 'width': 854, 'height': 480},
245 '45': {'ext': 'webm', 'width': 1280, 'height': 720},
246 '46': {'ext': 'webm', 'width': 1920, 'height': 1080},
247
248
249 # 3d videos
250 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
251 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
252 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
253 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
254 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
255 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
256 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
257
258 # Apple HTTP Live Streaming
259 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
260 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
261 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
262 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
263 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
264 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
265 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
266
267 # DASH mp4 video
268 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
269 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
270 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
271 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
272 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
273 '138': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
274 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
275 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
276
277 # Dash mp4 audio
278 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
279 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 128, 'preference': -50},
280 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50},
281
282 # Dash webm
283 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
284 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
285 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
286 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
287 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
288 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
289 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
290 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
291 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
292 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
293 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
294 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
295 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
296 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
297 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
298
299 # Dash webm audio
300 '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
301 '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
302
303 # RTMP (unnamed)
304 '_rtmp': {'protocol': 'rtmp'},
305 }
306
307 IE_NAME = 'youtube'
308 _TESTS = [
309 {
310 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc',
311 'info_dict': {
312 'id': 'BaW_jenozKc',
313 'ext': 'mp4',
314 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
315 'uploader': 'Philipp Hagemeister',
316 'uploader_id': 'phihag',
317 'upload_date': '20121002',
318 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
319 'categories': ['Science & Technology'],
320 'like_count': int,
321 'dislike_count': int,
322 }
323 },
324 {
325 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
326 'note': 'Test generic use_cipher_signature video (#897)',
327 'info_dict': {
328 'id': 'UxxajLWwzqY',
329 'ext': 'mp4',
330 'upload_date': '20120506',
331 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
332 'description': 'md5:fea86fda2d5a5784273df5c7cc994d9f',
333 'uploader': 'Icona Pop',
334 'uploader_id': 'IconaPop',
335 }
336 },
337 {
338 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
339 'note': 'Test VEVO video with age protection (#956)',
340 'info_dict': {
341 'id': '07FYdnEawAQ',
342 'ext': 'mp4',
343 'upload_date': '20130703',
344 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
345 'description': 'md5:64249768eec3bc4276236606ea996373',
346 'uploader': 'justintimberlakeVEVO',
347 'uploader_id': 'justintimberlakeVEVO',
348 }
349 },
350 {
351 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
352 'note': 'Embed-only video (#1746)',
353 'info_dict': {
354 'id': 'yZIXLfi8CZQ',
355 'ext': 'mp4',
356 'upload_date': '20120608',
357 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
358 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
359 'uploader': 'SET India',
360 'uploader_id': 'setindia'
361 }
362 },
363 {
364 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
365 'note': '256k DASH audio (format 141) via DASH manifest',
366 'info_dict': {
367 'id': 'a9LDPn-MO4I',
368 'ext': 'm4a',
369 'upload_date': '20121002',
370 'uploader_id': '8KVIDEO',
371 'description': '',
372 'uploader': '8KVIDEO',
373 'title': 'UHDTV TEST 8K VIDEO.mp4'
374 },
375 'params': {
376 'youtube_include_dash_manifest': True,
377 'format': '141',
378 },
379 },
380 # DASH manifest with encrypted signature
381 {
382 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
383 'info_dict': {
384 'id': 'IB3lcPjvWLA',
385 'ext': 'm4a',
386 'title': 'Afrojack - The Spark ft. Spree Wilson',
387 'description': 'md5:9717375db5a9a3992be4668bbf3bc0a8',
388 'uploader': 'AfrojackVEVO',
389 'uploader_id': 'AfrojackVEVO',
390 'upload_date': '20131011',
391 },
392 'params': {
393 'youtube_include_dash_manifest': True,
394 'format': '141',
395 },
396 },
397 ]
398
399 def __init__(self, *args, **kwargs):
400 super(YoutubeIE, self).__init__(*args, **kwargs)
401 self._player_cache = {}
402
403 def report_video_info_webpage_download(self, video_id):
404 """Report attempt to download video info webpage."""
405 self.to_screen('%s: Downloading video info webpage' % video_id)
406
407 def report_information_extraction(self, video_id):
408 """Report attempt to extract video information."""
409 self.to_screen('%s: Extracting video information' % video_id)
410
411 def report_unavailable_format(self, video_id, format):
412 """Report extracted video URL."""
413 self.to_screen('%s: Format %s not available' % (video_id, format))
414
415 def report_rtmp_download(self):
416 """Indicate the download will use the RTMP protocol."""
417 self.to_screen('RTMP download detected')
418
419 def _signature_cache_id(self, example_sig):
420 """ Return a string representation of a signature """
421 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
422
423 def _extract_signature_function(self, video_id, player_url, example_sig):
424 id_m = re.match(
425 r'.*-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$',
426 player_url)
427 if not id_m:
428 raise ExtractorError('Cannot identify player %r' % player_url)
429 player_type = id_m.group('ext')
430 player_id = id_m.group('id')
431
432 # Read from filesystem cache
433 func_id = '%s_%s_%s' % (
434 player_type, player_id, self._signature_cache_id(example_sig))
435 assert os.path.basename(func_id) == func_id
436
437 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
438 if cache_spec is not None:
439 return lambda s: ''.join(s[i] for i in cache_spec)
440
441 if player_type == 'js':
442 code = self._download_webpage(
443 player_url, video_id,
444 note='Downloading %s player %s' % (player_type, player_id),
445 errnote='Download of %s failed' % player_url)
446 res = self._parse_sig_js(code)
447 elif player_type == 'swf':
448 urlh = self._request_webpage(
449 player_url, video_id,
450 note='Downloading %s player %s' % (player_type, player_id),
451 errnote='Download of %s failed' % player_url)
452 code = urlh.read()
453 res = self._parse_sig_swf(code)
454 else:
455 assert False, 'Invalid player type %r' % player_type
456
457 if cache_spec is None:
458 test_string = ''.join(map(compat_chr, range(len(example_sig))))
459 cache_res = res(test_string)
460 cache_spec = [ord(c) for c in cache_res]
461
462 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
463 return res
464
465 def _print_sig_code(self, func, example_sig):
466 def gen_sig_code(idxs):
467 def _genslice(start, end, step):
468 starts = '' if start == 0 else str(start)
469 ends = (':%d' % (end+step)) if end + step >= 0 else ':'
470 steps = '' if step == 1 else (':%d' % step)
471 return 's[%s%s%s]' % (starts, ends, steps)
472
473 step = None
474 start = '(Never used)' # Quelch pyflakes warnings - start will be
475 # set as soon as step is set
476 for i, prev in zip(idxs[1:], idxs[:-1]):
477 if step is not None:
478 if i - prev == step:
479 continue
480 yield _genslice(start, prev, step)
481 step = None
482 continue
483 if i - prev in [-1, 1]:
484 step = i - prev
485 start = prev
486 continue
487 else:
488 yield 's[%d]' % prev
489 if step is None:
490 yield 's[%d]' % i
491 else:
492 yield _genslice(start, i, step)
493
494 test_string = ''.join(map(compat_chr, range(len(example_sig))))
495 cache_res = func(test_string)
496 cache_spec = [ord(c) for c in cache_res]
497 expr_code = ' + '.join(gen_sig_code(cache_spec))
498 signature_id_tuple = '(%s)' % (
499 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
500 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
501 ' return %s\n') % (signature_id_tuple, expr_code)
502 self.to_screen('Extracted signature function:\n' + code)
503
504 def _parse_sig_js(self, jscode):
505 funcname = self._search_regex(
506 r'signature=([$a-zA-Z]+)', jscode,
507 'Initial JS player signature function name')
508
509 jsi = JSInterpreter(jscode)
510 initial_function = jsi.extract_function(funcname)
511 return lambda s: initial_function([s])
512
513 def _parse_sig_swf(self, file_contents):
514 swfi = SWFInterpreter(file_contents)
515 TARGET_CLASSNAME = 'SignatureDecipher'
516 searched_class = swfi.extract_class(TARGET_CLASSNAME)
517 initial_function = swfi.extract_function(searched_class, 'decipher')
518 return lambda s: initial_function([s])
519
520 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
521 """Turn the encrypted s field into a working signature"""
522
523 if player_url is None:
524 raise ExtractorError('Cannot decrypt signature without player_url')
525
526 if player_url.startswith('//'):
527 player_url = 'https:' + player_url
528 try:
529 player_id = (player_url, self._signature_cache_id(s))
530 if player_id not in self._player_cache:
531 func = self._extract_signature_function(
532 video_id, player_url, s
533 )
534 self._player_cache[player_id] = func
535 func = self._player_cache[player_id]
536 if self._downloader.params.get('youtube_print_sig_code'):
537 self._print_sig_code(func, s)
538 return func(s)
539 except Exception as e:
540 tb = traceback.format_exc()
541 raise ExtractorError(
542 'Signature extraction failed: ' + tb, cause=e)
543
544 def _get_available_subtitles(self, video_id, webpage):
545 try:
546 sub_list = self._download_webpage(
547 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
548 video_id, note=False)
549 except ExtractorError as err:
550 self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
551 return {}
552 lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
553
554 sub_lang_list = {}
555 for l in lang_list:
556 lang = l[1]
557 if lang in sub_lang_list:
558 continue
559 params = compat_urllib_parse.urlencode({
560 'lang': lang,
561 'v': video_id,
562 'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
563 'name': unescapeHTML(l[0]).encode('utf-8'),
564 })
565 url = 'https://www.youtube.com/api/timedtext?' + params
566 sub_lang_list[lang] = url
567 if not sub_lang_list:
568 self._downloader.report_warning('video doesn\'t have subtitles')
569 return {}
570 return sub_lang_list
571
572 def _get_available_automatic_caption(self, video_id, webpage):
573 """We need the webpage for getting the captions url, pass it as an
574 argument to speed up the process."""
575 sub_format = self._downloader.params.get('subtitlesformat', 'srt')
576 self.to_screen('%s: Looking for automatic captions' % video_id)
577 mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
578 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
579 if mobj is None:
580 self._downloader.report_warning(err_msg)
581 return {}
582 player_config = json.loads(mobj.group(1))
583 try:
584 args = player_config[u'args']
585 caption_url = args[u'ttsurl']
586 timestamp = args[u'timestamp']
587 # We get the available subtitles
588 list_params = compat_urllib_parse.urlencode({
589 'type': 'list',
590 'tlangs': 1,
591 'asrs': 1,
592 })
593 list_url = caption_url + '&' + list_params
594 caption_list = self._download_xml(list_url, video_id)
595 original_lang_node = caption_list.find('track')
596 if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
597 self._downloader.report_warning('Video doesn\'t have automatic captions')
598 return {}
599 original_lang = original_lang_node.attrib['lang_code']
600
601 sub_lang_list = {}
602 for lang_node in caption_list.findall('target'):
603 sub_lang = lang_node.attrib['lang_code']
604 params = compat_urllib_parse.urlencode({
605 'lang': original_lang,
606 'tlang': sub_lang,
607 'fmt': sub_format,
608 'ts': timestamp,
609 'kind': 'asr',
610 })
611 sub_lang_list[sub_lang] = caption_url + '&' + params
612 return sub_lang_list
613 # An extractor error can be raise by the download process if there are
614 # no automatic captions but there are subtitles
615 except (KeyError, ExtractorError):
616 self._downloader.report_warning(err_msg)
617 return {}
618
619 @classmethod
620 def extract_id(cls, url):
621 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
622 if mobj is None:
623 raise ExtractorError('Invalid URL: %s' % url)
624 video_id = mobj.group(2)
625 return video_id
626
627 def _extract_from_m3u8(self, manifest_url, video_id):
628 url_map = {}
629 def _get_urls(_manifest):
630 lines = _manifest.split('\n')
631 urls = filter(lambda l: l and not l.startswith('#'),
632 lines)
633 return urls
634 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
635 formats_urls = _get_urls(manifest)
636 for format_url in formats_urls:
637 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
638 url_map[itag] = format_url
639 return url_map
640
641 def _extract_annotations(self, video_id):
642 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
643 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
644
645 def _real_extract(self, url):
646 proto = (
647 'http' if self._downloader.params.get('prefer_insecure', False)
648 else 'https')
649
650 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
651 mobj = re.search(self._NEXT_URL_RE, url)
652 if mobj:
653 url = proto + '://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
654 video_id = self.extract_id(url)
655
656 # Get video webpage
657 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
658 video_webpage = self._download_webpage(url, video_id)
659
660 # Attempt to extract SWF player URL
661 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
662 if mobj is not None:
663 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
664 else:
665 player_url = None
666
667 # Get video info
668 self.report_video_info_webpage_download(video_id)
669 if re.search(r'player-age-gate-content">', video_webpage) is not None:
670 self.report_age_confirmation()
671 age_gate = True
672 # We simulate the access to the video from www.youtube.com/v/{video_id}
673 # this can be viewed without login into Youtube
674 data = compat_urllib_parse.urlencode({
675 'video_id': video_id,
676 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
677 'sts': self._search_regex(
678 r'"sts"\s*:\s*(\d+)', video_webpage, 'sts'),
679 })
680 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
681 video_info_webpage = self._download_webpage(video_info_url, video_id,
682 note=False,
683 errnote='unable to download video info webpage')
684 video_info = compat_parse_qs(video_info_webpage)
685 else:
686 age_gate = False
687 for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
688 video_info_url = (proto + '://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
689 % (video_id, el_type))
690 video_info_webpage = self._download_webpage(video_info_url, video_id,
691 note=False,
692 errnote='unable to download video info webpage')
693 video_info = compat_parse_qs(video_info_webpage)
694 if 'token' in video_info:
695 break
696 if 'token' not in video_info:
697 if 'reason' in video_info:
698 raise ExtractorError(
699 'YouTube said: %s' % video_info['reason'][0],
700 expected=True, video_id=video_id)
701 else:
702 raise ExtractorError(
703 '"token" parameter not in video info for unknown reason',
704 video_id=video_id)
705
706 if 'view_count' in video_info:
707 view_count = int(video_info['view_count'][0])
708 else:
709 view_count = None
710
711 # Check for "rental" videos
712 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
713 raise ExtractorError('"rental" videos not supported')
714
715 # Start extracting information
716 self.report_information_extraction(video_id)
717
718 # uploader
719 if 'author' not in video_info:
720 raise ExtractorError('Unable to extract uploader name')
721 video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
722
723 # uploader_id
724 video_uploader_id = None
725 mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
726 if mobj is not None:
727 video_uploader_id = mobj.group(1)
728 else:
729 self._downloader.report_warning('unable to extract uploader nickname')
730
731 # title
732 if 'title' in video_info:
733 video_title = video_info['title'][0]
734 else:
735 self._downloader.report_warning('Unable to extract video title')
736 video_title = '_'
737
738 # thumbnail image
739 # We try first to get a high quality image:
740 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
741 video_webpage, re.DOTALL)
742 if m_thumb is not None:
743 video_thumbnail = m_thumb.group(1)
744 elif 'thumbnail_url' not in video_info:
745 self._downloader.report_warning('unable to extract video thumbnail')
746 video_thumbnail = None
747 else: # don't panic if we can't find it
748 video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
749
750 # upload date
751 upload_date = None
752 mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage)
753 if mobj is None:
754 mobj = re.search(
755 r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>',
756 video_webpage)
757 if mobj is not None:
758 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
759 upload_date = unified_strdate(upload_date)
760
761 m_cat_container = self._search_regex(
762 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
763 video_webpage, 'categories', fatal=False)
764 if m_cat_container:
765 category = self._html_search_regex(
766 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
767 default=None)
768 video_categories = None if category is None else [category]
769 else:
770 video_categories = None
771
772 # description
773 video_description = get_element_by_id("eow-description", video_webpage)
774 if video_description:
775 video_description = re.sub(r'''(?x)
776 <a\s+
777 (?:[a-zA-Z-]+="[^"]+"\s+)*?
778 title="([^"]+)"\s+
779 (?:[a-zA-Z-]+="[^"]+"\s+)*?
780 class="yt-uix-redirect-link"\s*>
781 [^<]+
782 </a>
783 ''', r'\1', video_description)
784 video_description = clean_html(video_description)
785 else:
786 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
787 if fd_mobj:
788 video_description = unescapeHTML(fd_mobj.group(1))
789 else:
790 video_description = ''
791
792 def _extract_count(count_name):
793 count = self._search_regex(
794 r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name),
795 video_webpage, count_name, default=None)
796 if count is not None:
797 return int(count.replace(',', ''))
798 return None
799 like_count = _extract_count('like')
800 dislike_count = _extract_count('dislike')
801
802 # subtitles
803 video_subtitles = self.extract_subtitles(video_id, video_webpage)
804
805 if self._downloader.params.get('listsubtitles', False):
806 self._list_available_subtitles(video_id, video_webpage)
807 return
808
809 if 'length_seconds' not in video_info:
810 self._downloader.report_warning('unable to extract video duration')
811 video_duration = None
812 else:
813 video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]))
814
815 # annotations
816 video_annotations = None
817 if self._downloader.params.get('writeannotations', False):
818 video_annotations = self._extract_annotations(video_id)
819
820 # Decide which formats to download
821 try:
822 mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
823 if not mobj:
824 raise ValueError('Could not find vevo ID')
825 json_code = uppercase_escape(mobj.group(1))
826 ytplayer_config = json.loads(json_code)
827 args = ytplayer_config['args']
828 # Easy way to know if the 's' value is in url_encoded_fmt_stream_map
829 # this signatures are encrypted
830 if 'url_encoded_fmt_stream_map' not in args:
831 raise ValueError('No stream_map present') # caught below
832 re_signature = re.compile(r'[&,]s=')
833 m_s = re_signature.search(args['url_encoded_fmt_stream_map'])
834 if m_s is not None:
835 self.to_screen('%s: Encrypted signatures detected.' % video_id)
836 video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
837 m_s = re_signature.search(args.get('adaptive_fmts', ''))
838 if m_s is not None:
839 if 'adaptive_fmts' in video_info:
840 video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
841 else:
842 video_info['adaptive_fmts'] = [args['adaptive_fmts']]
843 except ValueError:
844 pass
845
846 def _map_to_format_list(urlmap):
847 formats = []
848 for itag, video_real_url in urlmap.items():
849 dct = {
850 'format_id': itag,
851 'url': video_real_url,
852 'player_url': player_url,
853 }
854 if itag in self._formats:
855 dct.update(self._formats[itag])
856 formats.append(dct)
857 return formats
858
859 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
860 self.report_rtmp_download()
861 formats = [{
862 'format_id': '_rtmp',
863 'protocol': 'rtmp',
864 'url': video_info['conn'][0],
865 'player_url': player_url,
866 }]
867 elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
868 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
869 if 'rtmpe%3Dyes' in encoded_url_map:
870 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
871 url_map = {}
872 for url_data_str in encoded_url_map.split(','):
873 url_data = compat_parse_qs(url_data_str)
874 if 'itag' not in url_data or 'url' not in url_data:
875 continue
876 format_id = url_data['itag'][0]
877 url = url_data['url'][0]
878
879 if 'sig' in url_data:
880 url += '&signature=' + url_data['sig'][0]
881 elif 's' in url_data:
882 encrypted_sig = url_data['s'][0]
883
884 if not age_gate:
885 jsplayer_url_json = self._search_regex(
886 r'"assets":.+?"js":\s*("[^"]+")',
887 video_webpage, 'JS player URL')
888 player_url = json.loads(jsplayer_url_json)
889 if player_url is None:
890 player_url_json = self._search_regex(
891 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
892 video_webpage, 'age gate player URL')
893 player_url = json.loads(player_url_json)
894
895 if self._downloader.params.get('verbose'):
896 if player_url is None:
897 player_version = 'unknown'
898 player_desc = 'unknown'
899 else:
900 if player_url.endswith('swf'):
901 player_version = self._search_regex(
902 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
903 'flash player', fatal=False)
904 player_desc = 'flash player %s' % player_version
905 else:
906 player_version = self._search_regex(
907 r'html5player-([^/]+?)(?:/html5player)?\.js',
908 player_url,
909 'html5 player', fatal=False)
910 player_desc = 'html5 player %s' % player_version
911
912 parts_sizes = self._signature_cache_id(encrypted_sig)
913 self.to_screen('{%s} signature length %s, %s' %
914 (format_id, parts_sizes, player_desc))
915
916 signature = self._decrypt_signature(
917 encrypted_sig, video_id, player_url, age_gate)
918 url += '&signature=' + signature
919 if 'ratebypass' not in url:
920 url += '&ratebypass=yes'
921 url_map[format_id] = url
922 formats = _map_to_format_list(url_map)
923 elif video_info.get('hlsvp'):
924 manifest_url = video_info['hlsvp'][0]
925 url_map = self._extract_from_m3u8(manifest_url, video_id)
926 formats = _map_to_format_list(url_map)
927 else:
928 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
929
930 # Look for the DASH manifest
931 if (self._downloader.params.get('youtube_include_dash_manifest', False)):
932 try:
933 # The DASH manifest used needs to be the one from the original video_webpage.
934 # The one found in get_video_info seems to be using different signatures.
935 # However, in the case of an age restriction there won't be any embedded dashmpd in the video_webpage.
936 # Luckily, it seems, this case uses some kind of default signature (len == 86), so the
937 # combination of get_video_info and the _static_decrypt_signature() decryption fallback will work here.
938 if age_gate:
939 dash_manifest_url = video_info.get('dashmpd')[0]
940 else:
941 dash_manifest_url = ytplayer_config['args']['dashmpd']
942 def decrypt_sig(mobj):
943 s = mobj.group(1)
944 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
945 return '/signature/%s' % dec_s
946 dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
947 dash_doc = self._download_xml(
948 dash_manifest_url, video_id,
949 note='Downloading DASH manifest',
950 errnote='Could not download DASH manifest')
951 for r in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
952 url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
953 if url_el is None:
954 continue
955 format_id = r.attrib['id']
956 video_url = url_el.text
957 filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
958 f = {
959 'format_id': format_id,
960 'url': video_url,
961 'width': int_or_none(r.attrib.get('width')),
962 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
963 'asr': int_or_none(r.attrib.get('audioSamplingRate')),
964 'filesize': filesize,
965 }
966 try:
967 existing_format = next(
968 fo for fo in formats
969 if fo['format_id'] == format_id)
970 except StopIteration:
971 f.update(self._formats.get(format_id, {}))
972 formats.append(f)
973 else:
974 existing_format.update(f)
975
976 except (ExtractorError, KeyError) as e:
977 self.report_warning('Skipping DASH manifest: %s' % e, video_id)
978
979 self._sort_formats(formats)
980
981 return {
982 'id': video_id,
983 'uploader': video_uploader,
984 'uploader_id': video_uploader_id,
985 'upload_date': upload_date,
986 'title': video_title,
987 'thumbnail': video_thumbnail,
988 'description': video_description,
989 'categories': video_categories,
990 'subtitles': video_subtitles,
991 'duration': video_duration,
992 'age_limit': 18 if age_gate else 0,
993 'annotations': video_annotations,
994 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
995 'view_count': view_count,
996 'like_count': like_count,
997 'dislike_count': dislike_count,
998 'formats': formats,
999 }
1000
1001 class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
1002 IE_DESC = 'YouTube.com playlists'
1003 _VALID_URL = r"""(?x)(?:
1004 (?:https?://)?
1005 (?:\w+\.)?
1006 youtube\.com/
1007 (?:
1008 (?:course|view_play_list|my_playlists|artist|playlist|watch)
1009 \? (?:.*?&)*? (?:p|a|list)=
1010 | p/
1011 )
1012 (
1013 (?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
1014 # Top tracks, they can also include dots
1015 |(?:MC)[\w\.]*
1016 )
1017 .*
1018 |
1019 ((?:PL|LL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,})
1020 )"""
1021 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
1022 _MORE_PAGES_INDICATOR = r'data-link-type="next"'
1023 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
1024 IE_NAME = 'youtube:playlist'
1025 _TESTS = [{
1026 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1027 'info_dict': {
1028 'title': 'ytdl test PL',
1029 },
1030 'playlist_count': 3,
1031 }, {
1032 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1033 'info_dict': {
1034 'title': 'YDL_Empty_List',
1035 },
1036 'playlist_count': 0,
1037 }, {
1038 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1039 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1040 'info_dict': {
1041 'title': '29C3: Not my department',
1042 },
1043 'playlist_count': 95,
1044 }, {
1045 'note': 'issue #673',
1046 'url': 'PLBB231211A4F62143',
1047 'info_dict': {
1048 'title': 'Team Fortress 2 (Class-based LP)',
1049 },
1050 'playlist_mincount': 26,
1051 }, {
1052 'note': 'Large playlist',
1053 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1054 'info_dict': {
1055 'title': 'Uploads from Cauchemar',
1056 },
1057 'playlist_mincount': 799,
1058 }, {
1059 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1060 'info_dict': {
1061 'title': 'YDL_safe_search',
1062 },
1063 'playlist_count': 2,
1064 }]
1065
1066 def _real_initialize(self):
1067 self._login()
1068
1069 def _ids_to_results(self, ids):
1070 return [
1071 self.url_result(vid_id, 'Youtube', video_id=vid_id)
1072 for vid_id in ids]
1073
1074 def _extract_mix(self, playlist_id):
1075 # The mixes are generated from a a single video
1076 # the id of the playlist is just 'RD' + video_id
1077 url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
1078 webpage = self._download_webpage(
1079 url, playlist_id, 'Downloading Youtube mix')
1080 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
1081 title_span = (
1082 search_title('playlist-title') or
1083 search_title('title long-title') or
1084 search_title('title'))
1085 title = clean_html(title_span)
1086 ids = orderedSet(re.findall(
1087 r'''(?xs)data-video-username=".*?".*?
1088 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
1089 webpage))
1090 url_results = self._ids_to_results(ids)
1091
1092 return self.playlist_result(url_results, playlist_id, title)
1093
1094 def _real_extract(self, url):
1095 # Extract playlist id
1096 mobj = re.match(self._VALID_URL, url)
1097 if mobj is None:
1098 raise ExtractorError('Invalid URL: %s' % url)
1099 playlist_id = mobj.group(1) or mobj.group(2)
1100
1101 # Check if it's a video-specific URL
1102 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
1103 if 'v' in query_dict:
1104 video_id = query_dict['v'][0]
1105 if self._downloader.params.get('noplaylist'):
1106 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1107 return self.url_result(video_id, 'Youtube', video_id=video_id)
1108 else:
1109 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
1110
1111 if playlist_id.startswith('RD'):
1112 # Mixes require a custom extraction process
1113 return self._extract_mix(playlist_id)
1114 if playlist_id.startswith('TL'):
1115 raise ExtractorError('For downloading YouTube.com top lists, use '
1116 'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
1117
1118 url = self._TEMPLATE_URL % playlist_id
1119 page = self._download_webpage(url, playlist_id)
1120 more_widget_html = content_html = page
1121
1122 # Check if the playlist exists or is private
1123 if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None:
1124 raise ExtractorError(
1125 'The playlist doesn\'t exist or is private, use --username or '
1126 '--netrc to access it.',
1127 expected=True)
1128
1129 # Extract the video ids from the playlist pages
1130 ids = []
1131
1132 for page_num in itertools.count(1):
1133 matches = re.finditer(self._VIDEO_RE, content_html)
1134 # We remove the duplicates and the link with index 0
1135 # (it's not the first video of the playlist)
1136 new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
1137 ids.extend(new_ids)
1138
1139 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
1140 if not mobj:
1141 break
1142
1143 more = self._download_json(
1144 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
1145 'Downloading page #%s' % page_num,
1146 transform_source=uppercase_escape)
1147 content_html = more['content_html']
1148 more_widget_html = more['load_more_widget_html']
1149
1150 playlist_title = self._html_search_regex(
1151 r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
1152 page, 'title')
1153
1154 url_results = self._ids_to_results(ids)
1155 return self.playlist_result(url_results, playlist_id, playlist_title)
1156
1157
1158 class YoutubeTopListIE(YoutubePlaylistIE):
1159 IE_NAME = 'youtube:toplist'
1160 IE_DESC = ('YouTube.com top lists, "yttoplist:{channel}:{list title}"'
1161 ' (Example: "yttoplist:music:Top Tracks")')
1162 _VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'
1163 _TESTS = [{
1164 'url': 'yttoplist:music:Trending',
1165 'playlist_mincount': 5,
1166 'skip': 'Only works for logged-in users',
1167 }]
1168
1169 def _real_extract(self, url):
1170 mobj = re.match(self._VALID_URL, url)
1171 channel = mobj.group('chann')
1172 title = mobj.group('title')
1173 query = compat_urllib_parse.urlencode({'title': title})
1174 channel_page = self._download_webpage(
1175 'https://www.youtube.com/%s' % channel, title)
1176 link = self._html_search_regex(
1177 r'''(?x)
1178 <a\s+href="([^"]+)".*?>\s*
1179 <span\s+class="branded-page-module-title-text">\s*
1180 <span[^>]*>.*?%s.*?</span>''' % re.escape(query),
1181 channel_page, 'list')
1182 url = compat_urlparse.urljoin('https://www.youtube.com/', link)
1183
1184 video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"'
1185 ids = []
1186 # sometimes the webpage doesn't contain the videos
1187 # retry until we get them
1188 for i in itertools.count(0):
1189 msg = 'Downloading Youtube mix'
1190 if i > 0:
1191 msg += ', retry #%d' % i
1192
1193 webpage = self._download_webpage(url, title, msg)
1194 ids = orderedSet(re.findall(video_re, webpage))
1195 if ids:
1196 break
1197 url_results = self._ids_to_results(ids)
1198 return self.playlist_result(url_results, playlist_title=title)
1199
1200
1201 class YoutubeChannelIE(InfoExtractor):
1202 IE_DESC = 'YouTube.com channels'
1203 _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
1204 _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
1205 _MORE_PAGES_URL = 'https://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
1206 IE_NAME = 'youtube:channel'
1207 _TESTS = [{
1208 'note': 'paginated channel',
1209 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1210 'playlist_mincount': 91,
1211 }]
1212
1213 def extract_videos_from_page(self, page):
1214 ids_in_page = []
1215 for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
1216 if mobj.group(1) not in ids_in_page:
1217 ids_in_page.append(mobj.group(1))
1218 return ids_in_page
1219
1220 def _real_extract(self, url):
1221 # Extract channel id
1222 mobj = re.match(self._VALID_URL, url)
1223 if mobj is None:
1224 raise ExtractorError('Invalid URL: %s' % url)
1225
1226 # Download channel page
1227 channel_id = mobj.group(1)
1228 video_ids = []
1229 url = 'https://www.youtube.com/channel/%s/videos' % channel_id
1230 channel_page = self._download_webpage(url, channel_id)
1231 autogenerated = re.search(r'''(?x)
1232 class="[^"]*?(?:
1233 channel-header-autogenerated-label|
1234 yt-channel-title-autogenerated
1235 )[^"]*"''', channel_page) is not None
1236
1237 if autogenerated:
1238 # The videos are contained in a single page
1239 # the ajax pages can't be used, they are empty
1240 video_ids = self.extract_videos_from_page(channel_page)
1241 else:
1242 # Download all channel pages using the json-based channel_ajax query
1243 for pagenum in itertools.count(1):
1244 url = self._MORE_PAGES_URL % (pagenum, channel_id)
1245 page = self._download_json(
1246 url, channel_id, note='Downloading page #%s' % pagenum,
1247 transform_source=uppercase_escape)
1248
1249 ids_in_page = self.extract_videos_from_page(page['content_html'])
1250 video_ids.extend(ids_in_page)
1251
1252 if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
1253 break
1254
1255 self._downloader.to_screen('[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
1256
1257 url_entries = [self.url_result(video_id, 'Youtube', video_id=video_id)
1258 for video_id in video_ids]
1259 return self.playlist_result(url_entries, channel_id)
1260
1261
1262 class YoutubeUserIE(InfoExtractor):
1263 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
1264 _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
1265 _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
1266 _GDATA_PAGE_SIZE = 50
1267 _GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
1268 IE_NAME = 'youtube:user'
1269
1270 _TESTS = [{
1271 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
1272 'playlist_mincount': 320,
1273 'info_dict': {
1274 'title': 'TheLinuxFoundation',
1275 }
1276 }, {
1277 'url': 'ytuser:phihag',
1278 'only_matching': True,
1279 }]
1280
1281 @classmethod
1282 def suitable(cls, url):
1283 # Don't return True if the url can be extracted with other youtube
1284 # extractor, the regex would is too permissive and it would match.
1285 other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
1286 if any(ie.suitable(url) for ie in other_ies): return False
1287 else: return super(YoutubeUserIE, cls).suitable(url)
1288
1289 def _real_extract(self, url):
1290 # Extract username
1291 mobj = re.match(self._VALID_URL, url)
1292 if mobj is None:
1293 raise ExtractorError('Invalid URL: %s' % url)
1294
1295 username = mobj.group(1)
1296
1297 # Download video ids using YouTube Data API. Result size per
1298 # query is limited (currently to 50 videos) so we need to query
1299 # page by page until there are no video ids - it means we got
1300 # all of them.
1301
1302 def download_page(pagenum):
1303 start_index = pagenum * self._GDATA_PAGE_SIZE + 1
1304
1305 gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
1306 page = self._download_webpage(
1307 gdata_url, username,
1308 'Downloading video ids from %d to %d' % (
1309 start_index, start_index + self._GDATA_PAGE_SIZE))
1310
1311 try:
1312 response = json.loads(page)
1313 except ValueError as err:
1314 raise ExtractorError('Invalid JSON in API response: ' + compat_str(err))
1315 if 'entry' not in response['feed']:
1316 return
1317
1318 # Extract video identifiers
1319 entries = response['feed']['entry']
1320 for entry in entries:
1321 title = entry['title']['$t']
1322 video_id = entry['id']['$t'].split('/')[-1]
1323 yield {
1324 '_type': 'url',
1325 'url': video_id,
1326 'ie_key': 'Youtube',
1327 'id': video_id,
1328 'title': title,
1329 }
1330 url_results = PagedList(download_page, self._GDATA_PAGE_SIZE)
1331
1332 return self.playlist_result(url_results, playlist_title=username)
1333
1334
1335 class YoutubeSearchIE(SearchInfoExtractor):
1336 IE_DESC = 'YouTube.com searches'
1337 _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
1338 _MAX_RESULTS = 1000
1339 IE_NAME = 'youtube:search'
1340 _SEARCH_KEY = 'ytsearch'
1341
1342 def _get_n_results(self, query, n):
1343 """Get a specified number of results for a query"""
1344
1345 video_ids = []
1346 pagenum = 0
1347 limit = n
1348 PAGE_SIZE = 50
1349
1350 while (PAGE_SIZE * pagenum) < limit:
1351 result_url = self._API_URL % (
1352 compat_urllib_parse.quote_plus(query.encode('utf-8')),
1353 (PAGE_SIZE * pagenum) + 1)
1354 data_json = self._download_webpage(
1355 result_url, video_id='query "%s"' % query,
1356 note='Downloading page %s' % (pagenum + 1),
1357 errnote='Unable to download API page')
1358 data = json.loads(data_json)
1359 api_response = data['data']
1360
1361 if 'items' not in api_response:
1362 raise ExtractorError(
1363 '[youtube] No video results', expected=True)
1364
1365 new_ids = list(video['id'] for video in api_response['items'])
1366 video_ids += new_ids
1367
1368 limit = min(n, api_response['totalItems'])
1369 pagenum += 1
1370
1371 if len(video_ids) > n:
1372 video_ids = video_ids[:n]
1373 videos = [self.url_result(video_id, 'Youtube', video_id=video_id)
1374 for video_id in video_ids]
1375 return self.playlist_result(videos, query)
1376
1377
1378 class YoutubeSearchDateIE(YoutubeSearchIE):
1379 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
1380 _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
1381 _SEARCH_KEY = 'ytsearchdate'
1382 IE_DESC = 'YouTube.com searches, newest videos first'
1383
1384
1385 class YoutubeSearchURLIE(InfoExtractor):
1386 IE_DESC = 'YouTube.com search URLs'
1387 IE_NAME = 'youtube:search_url'
1388 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
1389 _TESTS = [{
1390 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
1391 'playlist_mincount': 5,
1392 'info_dict': {
1393 'title': 'youtube-dl test video',
1394 }
1395 }]
1396
1397 def _real_extract(self, url):
1398 mobj = re.match(self._VALID_URL, url)
1399 query = compat_urllib_parse.unquote_plus(mobj.group('query'))
1400
1401 webpage = self._download_webpage(url, query)
1402 result_code = self._search_regex(
1403 r'(?s)<ol class="item-section"(.*?)</ol>', webpage, 'result HTML')
1404
1405 part_codes = re.findall(
1406 r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
1407 entries = []
1408 for part_code in part_codes:
1409 part_title = self._html_search_regex(
1410 [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
1411 part_url_snippet = self._html_search_regex(
1412 r'(?s)href="([^"]+)"', part_code, 'item URL')
1413 part_url = compat_urlparse.urljoin(
1414 'https://www.youtube.com/', part_url_snippet)
1415 entries.append({
1416 '_type': 'url',
1417 'url': part_url,
1418 'title': part_title,
1419 })
1420
1421 return {
1422 '_type': 'playlist',
1423 'entries': entries,
1424 'title': query,
1425 }
1426
1427
1428 class YoutubeShowIE(InfoExtractor):
1429 IE_DESC = 'YouTube.com (multi-season) shows'
1430 _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
1431 IE_NAME = 'youtube:show'
1432 _TESTS = [{
1433 'url': 'http://www.youtube.com/show/airdisasters',
1434 'playlist_mincount': 3,
1435 'info_dict': {
1436 'id': 'airdisasters',
1437 'title': 'Air Disasters',
1438 }
1439 }]
1440
1441 def _real_extract(self, url):
1442 mobj = re.match(self._VALID_URL, url)
1443 playlist_id = mobj.group('id')
1444 webpage = self._download_webpage(
1445 url, playlist_id, 'Downloading show webpage')
1446 # There's one playlist for each season of the show
1447 m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
1448 self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
1449 entries = [
1450 self.url_result(
1451 'https://www.youtube.com' + season.group(1), 'YoutubePlaylist')
1452 for season in m_seasons
1453 ]
1454 title = self._og_search_title(webpage, fatal=False)
1455
1456 return {
1457 '_type': 'playlist',
1458 'id': playlist_id,
1459 'title': title,
1460 'entries': entries,
1461 }
1462
1463
1464 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
1465 """
1466 Base class for extractors that fetch info from
1467 http://www.youtube.com/feed_ajax
1468 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
1469 """
1470 _LOGIN_REQUIRED = True
1471 # use action_load_personal_feed instead of action_load_system_feed
1472 _PERSONAL_FEED = False
1473
1474 @property
1475 def _FEED_TEMPLATE(self):
1476 action = 'action_load_system_feed'
1477 if self._PERSONAL_FEED:
1478 action = 'action_load_personal_feed'
1479 return 'https://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action, self._FEED_NAME)
1480
1481 @property
1482 def IE_NAME(self):
1483 return 'youtube:%s' % self._FEED_NAME
1484
1485 def _real_initialize(self):
1486 self._login()
1487
1488 def _real_extract(self, url):
1489 feed_entries = []
1490 paging = 0
1491 for i in itertools.count(1):
1492 info = self._download_json(self._FEED_TEMPLATE % paging,
1493 '%s feed' % self._FEED_NAME,
1494 'Downloading page %s' % i)
1495 feed_html = info.get('feed_html') or info.get('content_html')
1496 load_more_widget_html = info.get('load_more_widget_html') or feed_html
1497 m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
1498 ids = orderedSet(m.group(1) for m in m_ids)
1499 feed_entries.extend(
1500 self.url_result(video_id, 'Youtube', video_id=video_id)
1501 for video_id in ids)
1502 mobj = re.search(
1503 r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
1504 load_more_widget_html)
1505 if mobj is None:
1506 break
1507 paging = mobj.group('paging')
1508 return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
1509
1510 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
1511 IE_DESC = 'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
1512 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
1513 _FEED_NAME = 'recommended'
1514 _PLAYLIST_TITLE = 'Youtube Recommended videos'
1515
1516 class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
1517 IE_DESC = 'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
1518 _VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
1519 _FEED_NAME = 'watch_later'
1520 _PLAYLIST_TITLE = 'Youtube Watch Later'
1521 _PERSONAL_FEED = True
1522
1523 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
1524 IE_DESC = 'Youtube watch history, "ythistory" keyword (requires authentication)'
1525 _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
1526 _FEED_NAME = 'history'
1527 _PERSONAL_FEED = True
1528 _PLAYLIST_TITLE = 'Youtube Watch History'
1529
1530 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
1531 IE_NAME = 'youtube:favorites'
1532 IE_DESC = 'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
1533 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
1534 _LOGIN_REQUIRED = True
1535
1536 def _real_extract(self, url):
1537 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
1538 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
1539 return self.url_result(playlist_id, 'YoutubePlaylist')
1540
1541
1542 class YoutubeSubscriptionsIE(YoutubePlaylistIE):
1543 IE_NAME = 'youtube:subscriptions'
1544 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
1545 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
1546 _TESTS = []
1547
1548 def _real_extract(self, url):
1549 title = 'Youtube Subscriptions'
1550 page = self._download_webpage('https://www.youtube.com/feed/subscriptions', title)
1551
1552 # The extraction process is the same as for playlists, but the regex
1553 # for the video ids doesn't contain an index
1554 ids = []
1555 more_widget_html = content_html = page
1556
1557 for page_num in itertools.count(1):
1558 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
1559 new_ids = orderedSet(matches)
1560 ids.extend(new_ids)
1561
1562 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
1563 if not mobj:
1564 break
1565
1566 more = self._download_json(
1567 'https://youtube.com/%s' % mobj.group('more'), title,
1568 'Downloading page #%s' % page_num,
1569 transform_source=uppercase_escape)
1570 content_html = more['content_html']
1571 more_widget_html = more['load_more_widget_html']
1572
1573 return {
1574 '_type': 'playlist',
1575 'title': title,
1576 'entries': self._ids_to_results(ids),
1577 }
1578
1579
1580 class YoutubeTruncatedURLIE(InfoExtractor):
1581 IE_NAME = 'youtube:truncated_url'
1582 IE_DESC = False # Do not list
1583 _VALID_URL = r'''(?x)
1584 (?:https?://)?[^/]+/watch\?(?:
1585 feature=[a-z_]+|
1586 annotation_id=annotation_[^&]+
1587 )?$|
1588 (?:https?://)?(?:www\.)?youtube\.com/attribution_link\?a=[^&]+$
1589 '''
1590
1591 _TESTS = [{
1592 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
1593 'only_matching': True,
1594 }, {
1595 'url': 'http://www.youtube.com/watch?',
1596 'only_matching': True,
1597 }]
1598
1599 def _real_extract(self, url):
1600 raise ExtractorError(
1601 'Did you forget to quote the URL? Remember that & is a meta '
1602 'character in most shells, so you want to put the URL in quotes, '
1603 'like youtube-dl '
1604 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
1605 ' or simply youtube-dl BaW_jenozKc .',
1606 expected=True)