]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/youtube.py
Merge pull request #3865 from diffycat/jpopsuki
[yt-dlp.git] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import re
10 import traceback
11
12 from .common import InfoExtractor, SearchInfoExtractor
13 from .subtitles import SubtitlesInfoExtractor
14 from ..jsinterp import JSInterpreter
15 from ..swfinterp import SWFInterpreter
16 from ..utils import (
17 compat_chr,
18 compat_parse_qs,
19 compat_urllib_parse,
20 compat_urllib_request,
21 compat_urlparse,
22 compat_str,
23
24 clean_html,
25 get_element_by_id,
26 get_element_by_attribute,
27 ExtractorError,
28 int_or_none,
29 OnDemandPagedList,
30 unescapeHTML,
31 unified_strdate,
32 orderedSet,
33 uppercase_escape,
34 )
35
36 class YoutubeBaseInfoExtractor(InfoExtractor):
37 """Provide base functions for Youtube extractors"""
38 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
39 _TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
40 _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
41 _AGE_URL = 'https://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
42 _NETRC_MACHINE = 'youtube'
43 # If True it will raise an error if no login info is provided
44 _LOGIN_REQUIRED = False
45
46 def _set_language(self):
47 return bool(self._download_webpage(
48 self._LANG_URL, None,
49 note='Setting language', errnote='unable to set language',
50 fatal=False))
51
52 def _login(self):
53 """
54 Attempt to log in to YouTube.
55 True is returned if successful or skipped.
56 False is returned if login failed.
57
58 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
59 """
60 (username, password) = self._get_login_info()
61 # No authentication to be performed
62 if username is None:
63 if self._LOGIN_REQUIRED:
64 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
65 return True
66
67 login_page = self._download_webpage(
68 self._LOGIN_URL, None,
69 note='Downloading login page',
70 errnote='unable to fetch login page', fatal=False)
71 if login_page is False:
72 return
73
74 galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
75 login_page, 'Login GALX parameter')
76
77 # Log in
78 login_form_strs = {
79 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
80 'Email': username,
81 'GALX': galx,
82 'Passwd': password,
83
84 'PersistentCookie': 'yes',
85 '_utf8': '霱',
86 'bgresponse': 'js_disabled',
87 'checkConnection': '',
88 'checkedDomains': 'youtube',
89 'dnConn': '',
90 'pstMsg': '0',
91 'rmShown': '1',
92 'secTok': '',
93 'signIn': 'Sign in',
94 'timeStmp': '',
95 'service': 'youtube',
96 'uilel': '3',
97 'hl': 'en_US',
98 }
99
100 # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
101 # chokes on unicode
102 login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
103 login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
104
105 req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
106 login_results = self._download_webpage(
107 req, None,
108 note='Logging in', errnote='unable to log in', fatal=False)
109 if login_results is False:
110 return False
111
112 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
113 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
114
115 # Two-Factor
116 # TODO add SMS and phone call support - these require making a request and then prompting the user
117
118 if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None:
119 tfa_code = self._get_tfa_info()
120
121 if tfa_code is None:
122 self._downloader.report_warning('Two-factor authentication required. Provide it with --twofactor <code>')
123 self._downloader.report_warning('(Note that only TOTP (Google Authenticator App) codes work at this time.)')
124 return False
125
126 # Unlike the first login form, secTok and timeStmp are both required for the TFA form
127
128 match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
129 if match is None:
130 self._downloader.report_warning('Failed to get secTok - did the page structure change?')
131 secTok = match.group(1)
132 match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
133 if match is None:
134 self._downloader.report_warning('Failed to get timeStmp - did the page structure change?')
135 timeStmp = match.group(1)
136
137 tfa_form_strs = {
138 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
139 'smsToken': '',
140 'smsUserPin': tfa_code,
141 'smsVerifyPin': 'Verify',
142
143 'PersistentCookie': 'yes',
144 'checkConnection': '',
145 'checkedDomains': 'youtube',
146 'pstMsg': '1',
147 'secTok': secTok,
148 'timeStmp': timeStmp,
149 'service': 'youtube',
150 'hl': 'en_US',
151 }
152 tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in tfa_form_strs.items())
153 tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
154
155 tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
156 tfa_results = self._download_webpage(
157 tfa_req, None,
158 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
159
160 if tfa_results is False:
161 return False
162
163 if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None:
164 self._downloader.report_warning('Two-factor code expired. Please try again, or use a one-use backup code instead.')
165 return False
166 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
167 self._downloader.report_warning('unable to log in - did the page structure change?')
168 return False
169 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
170 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
171 return False
172
173 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
174 self._downloader.report_warning('unable to log in: bad username or password')
175 return False
176 return True
177
178 def _confirm_age(self):
179 age_form = {
180 'next_url': '/',
181 'action_confirm': 'Confirm',
182 }
183 req = compat_urllib_request.Request(self._AGE_URL,
184 compat_urllib_parse.urlencode(age_form).encode('ascii'))
185
186 self._download_webpage(
187 req, None,
188 note='Confirming age', errnote='Unable to confirm age')
189 return True
190
191 def _real_initialize(self):
192 if self._downloader is None:
193 return
194 if not self._set_language():
195 return
196 if not self._login():
197 return
198 self._confirm_age()
199
200
201 class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
202 IE_DESC = 'YouTube.com'
203 _VALID_URL = r"""(?x)^
204 (
205 (?:https?://|//) # http(s):// or protocol-independent URL
206 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
207 (?:www\.)?deturl\.com/www\.youtube\.com/|
208 (?:www\.)?pwnyoutube\.com/|
209 (?:www\.)?yourepeat\.com/|
210 tube\.majestyc\.net/|
211 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
212 (?:.*?\#/)? # handle anchor (#/) redirect urls
213 (?: # the various things that can precede the ID:
214 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
215 |(?: # or the v= param in all its forms
216 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
217 (?:\?|\#!?) # the params delimiter ? or # or #!
218 (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
219 v=
220 )
221 ))
222 |youtu\.be/ # just youtu.be/xxxx
223 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
224 )
225 )? # all until now is optional -> you can pass the naked ID
226 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
227 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
228 (?(1).+)? # if we found the ID, everything can follow
229 $"""
230 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
231 _formats = {
232 '5': {'ext': 'flv', 'width': 400, 'height': 240},
233 '6': {'ext': 'flv', 'width': 450, 'height': 270},
234 '13': {'ext': '3gp'},
235 '17': {'ext': '3gp', 'width': 176, 'height': 144},
236 '18': {'ext': 'mp4', 'width': 640, 'height': 360},
237 '22': {'ext': 'mp4', 'width': 1280, 'height': 720},
238 '34': {'ext': 'flv', 'width': 640, 'height': 360},
239 '35': {'ext': 'flv', 'width': 854, 'height': 480},
240 '36': {'ext': '3gp', 'width': 320, 'height': 240},
241 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
242 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
243 '43': {'ext': 'webm', 'width': 640, 'height': 360},
244 '44': {'ext': 'webm', 'width': 854, 'height': 480},
245 '45': {'ext': 'webm', 'width': 1280, 'height': 720},
246 '46': {'ext': 'webm', 'width': 1920, 'height': 1080},
247
248
249 # 3d videos
250 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
251 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
252 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
253 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
254 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
255 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
256 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
257
258 # Apple HTTP Live Streaming
259 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
260 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
261 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
262 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
263 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
264 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
265 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
266
267 # DASH mp4 video
268 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
269 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
270 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
271 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
272 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
273 '138': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
274 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
275 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
276
277 # Dash mp4 audio
278 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
279 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 128, 'preference': -50},
280 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50},
281
282 # Dash webm
283 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
284 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
285 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
286 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
287 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
288 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
289 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
290 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
291 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
292 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
293 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
294 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
295 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
296 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
297 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
298
299 # Dash webm audio
300 '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
301 '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
302
303 # RTMP (unnamed)
304 '_rtmp': {'protocol': 'rtmp'},
305 }
306
307 IE_NAME = 'youtube'
308 _TESTS = [
309 {
310 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc',
311 'info_dict': {
312 'id': 'BaW_jenozKc',
313 'ext': 'mp4',
314 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
315 'uploader': 'Philipp Hagemeister',
316 'uploader_id': 'phihag',
317 'upload_date': '20121002',
318 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
319 'categories': ['Science & Technology'],
320 'like_count': int,
321 'dislike_count': int,
322 }
323 },
324 {
325 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
326 'note': 'Test generic use_cipher_signature video (#897)',
327 'info_dict': {
328 'id': 'UxxajLWwzqY',
329 'ext': 'mp4',
330 'upload_date': '20120506',
331 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
332 'description': 'md5:fea86fda2d5a5784273df5c7cc994d9f',
333 'uploader': 'Icona Pop',
334 'uploader_id': 'IconaPop',
335 }
336 },
337 {
338 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
339 'note': 'Test VEVO video with age protection (#956)',
340 'info_dict': {
341 'id': '07FYdnEawAQ',
342 'ext': 'mp4',
343 'upload_date': '20130703',
344 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
345 'description': 'md5:64249768eec3bc4276236606ea996373',
346 'uploader': 'justintimberlakeVEVO',
347 'uploader_id': 'justintimberlakeVEVO',
348 }
349 },
350 {
351 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
352 'note': 'Embed-only video (#1746)',
353 'info_dict': {
354 'id': 'yZIXLfi8CZQ',
355 'ext': 'mp4',
356 'upload_date': '20120608',
357 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
358 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
359 'uploader': 'SET India',
360 'uploader_id': 'setindia'
361 }
362 },
363 {
364 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
365 'note': '256k DASH audio (format 141) via DASH manifest',
366 'info_dict': {
367 'id': 'a9LDPn-MO4I',
368 'ext': 'm4a',
369 'upload_date': '20121002',
370 'uploader_id': '8KVIDEO',
371 'description': '',
372 'uploader': '8KVIDEO',
373 'title': 'UHDTV TEST 8K VIDEO.mp4'
374 },
375 'params': {
376 'youtube_include_dash_manifest': True,
377 'format': '141',
378 },
379 },
380 # DASH manifest with encrypted signature
381 {
382 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
383 'info_dict': {
384 'id': 'IB3lcPjvWLA',
385 'ext': 'm4a',
386 'title': 'Afrojack - The Spark ft. Spree Wilson',
387 'description': 'md5:9717375db5a9a3992be4668bbf3bc0a8',
388 'uploader': 'AfrojackVEVO',
389 'uploader_id': 'AfrojackVEVO',
390 'upload_date': '20131011',
391 },
392 'params': {
393 'youtube_include_dash_manifest': True,
394 'format': '141',
395 },
396 },
397 ]
398
399 def __init__(self, *args, **kwargs):
400 super(YoutubeIE, self).__init__(*args, **kwargs)
401 self._player_cache = {}
402
403 def report_video_info_webpage_download(self, video_id):
404 """Report attempt to download video info webpage."""
405 self.to_screen('%s: Downloading video info webpage' % video_id)
406
407 def report_information_extraction(self, video_id):
408 """Report attempt to extract video information."""
409 self.to_screen('%s: Extracting video information' % video_id)
410
411 def report_unavailable_format(self, video_id, format):
412 """Report extracted video URL."""
413 self.to_screen('%s: Format %s not available' % (video_id, format))
414
415 def report_rtmp_download(self):
416 """Indicate the download will use the RTMP protocol."""
417 self.to_screen('RTMP download detected')
418
419 def _signature_cache_id(self, example_sig):
420 """ Return a string representation of a signature """
421 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
422
423 def _extract_signature_function(self, video_id, player_url, example_sig):
424 id_m = re.match(
425 r'.*-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$',
426 player_url)
427 if not id_m:
428 raise ExtractorError('Cannot identify player %r' % player_url)
429 player_type = id_m.group('ext')
430 player_id = id_m.group('id')
431
432 # Read from filesystem cache
433 func_id = '%s_%s_%s' % (
434 player_type, player_id, self._signature_cache_id(example_sig))
435 assert os.path.basename(func_id) == func_id
436
437 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
438 if cache_spec is not None:
439 return lambda s: ''.join(s[i] for i in cache_spec)
440
441 if player_type == 'js':
442 code = self._download_webpage(
443 player_url, video_id,
444 note='Downloading %s player %s' % (player_type, player_id),
445 errnote='Download of %s failed' % player_url)
446 res = self._parse_sig_js(code)
447 elif player_type == 'swf':
448 urlh = self._request_webpage(
449 player_url, video_id,
450 note='Downloading %s player %s' % (player_type, player_id),
451 errnote='Download of %s failed' % player_url)
452 code = urlh.read()
453 res = self._parse_sig_swf(code)
454 else:
455 assert False, 'Invalid player type %r' % player_type
456
457 if cache_spec is None:
458 test_string = ''.join(map(compat_chr, range(len(example_sig))))
459 cache_res = res(test_string)
460 cache_spec = [ord(c) for c in cache_res]
461
462 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
463 return res
464
465 def _print_sig_code(self, func, example_sig):
466 def gen_sig_code(idxs):
467 def _genslice(start, end, step):
468 starts = '' if start == 0 else str(start)
469 ends = (':%d' % (end+step)) if end + step >= 0 else ':'
470 steps = '' if step == 1 else (':%d' % step)
471 return 's[%s%s%s]' % (starts, ends, steps)
472
473 step = None
474 start = '(Never used)' # Quelch pyflakes warnings - start will be
475 # set as soon as step is set
476 for i, prev in zip(idxs[1:], idxs[:-1]):
477 if step is not None:
478 if i - prev == step:
479 continue
480 yield _genslice(start, prev, step)
481 step = None
482 continue
483 if i - prev in [-1, 1]:
484 step = i - prev
485 start = prev
486 continue
487 else:
488 yield 's[%d]' % prev
489 if step is None:
490 yield 's[%d]' % i
491 else:
492 yield _genslice(start, i, step)
493
494 test_string = ''.join(map(compat_chr, range(len(example_sig))))
495 cache_res = func(test_string)
496 cache_spec = [ord(c) for c in cache_res]
497 expr_code = ' + '.join(gen_sig_code(cache_spec))
498 signature_id_tuple = '(%s)' % (
499 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
500 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
501 ' return %s\n') % (signature_id_tuple, expr_code)
502 self.to_screen('Extracted signature function:\n' + code)
503
504 def _parse_sig_js(self, jscode):
505 funcname = self._search_regex(
506 r'signature=([$a-zA-Z]+)', jscode,
507 'Initial JS player signature function name')
508
509 jsi = JSInterpreter(jscode)
510 initial_function = jsi.extract_function(funcname)
511 return lambda s: initial_function([s])
512
513 def _parse_sig_swf(self, file_contents):
514 swfi = SWFInterpreter(file_contents)
515 TARGET_CLASSNAME = 'SignatureDecipher'
516 searched_class = swfi.extract_class(TARGET_CLASSNAME)
517 initial_function = swfi.extract_function(searched_class, 'decipher')
518 return lambda s: initial_function([s])
519
520 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
521 """Turn the encrypted s field into a working signature"""
522
523 if player_url is None:
524 raise ExtractorError('Cannot decrypt signature without player_url')
525
526 if player_url.startswith('//'):
527 player_url = 'https:' + player_url
528 try:
529 player_id = (player_url, self._signature_cache_id(s))
530 if player_id not in self._player_cache:
531 func = self._extract_signature_function(
532 video_id, player_url, s
533 )
534 self._player_cache[player_id] = func
535 func = self._player_cache[player_id]
536 if self._downloader.params.get('youtube_print_sig_code'):
537 self._print_sig_code(func, s)
538 return func(s)
539 except Exception as e:
540 tb = traceback.format_exc()
541 raise ExtractorError(
542 'Signature extraction failed: ' + tb, cause=e)
543
544 def _get_available_subtitles(self, video_id, webpage):
545 try:
546 sub_list = self._download_webpage(
547 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
548 video_id, note=False)
549 except ExtractorError as err:
550 self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
551 return {}
552 lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
553
554 sub_lang_list = {}
555 for l in lang_list:
556 lang = l[1]
557 if lang in sub_lang_list:
558 continue
559 params = compat_urllib_parse.urlencode({
560 'lang': lang,
561 'v': video_id,
562 'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
563 'name': unescapeHTML(l[0]).encode('utf-8'),
564 })
565 url = 'https://www.youtube.com/api/timedtext?' + params
566 sub_lang_list[lang] = url
567 if not sub_lang_list:
568 self._downloader.report_warning('video doesn\'t have subtitles')
569 return {}
570 return sub_lang_list
571
572 def _get_available_automatic_caption(self, video_id, webpage):
573 """We need the webpage for getting the captions url, pass it as an
574 argument to speed up the process."""
575 sub_format = self._downloader.params.get('subtitlesformat', 'srt')
576 self.to_screen('%s: Looking for automatic captions' % video_id)
577 mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
578 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
579 if mobj is None:
580 self._downloader.report_warning(err_msg)
581 return {}
582 player_config = json.loads(mobj.group(1))
583 try:
584 args = player_config[u'args']
585 caption_url = args[u'ttsurl']
586 timestamp = args[u'timestamp']
587 # We get the available subtitles
588 list_params = compat_urllib_parse.urlencode({
589 'type': 'list',
590 'tlangs': 1,
591 'asrs': 1,
592 })
593 list_url = caption_url + '&' + list_params
594 caption_list = self._download_xml(list_url, video_id)
595 original_lang_node = caption_list.find('track')
596 if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
597 self._downloader.report_warning('Video doesn\'t have automatic captions')
598 return {}
599 original_lang = original_lang_node.attrib['lang_code']
600
601 sub_lang_list = {}
602 for lang_node in caption_list.findall('target'):
603 sub_lang = lang_node.attrib['lang_code']
604 params = compat_urllib_parse.urlencode({
605 'lang': original_lang,
606 'tlang': sub_lang,
607 'fmt': sub_format,
608 'ts': timestamp,
609 'kind': 'asr',
610 })
611 sub_lang_list[sub_lang] = caption_url + '&' + params
612 return sub_lang_list
613 # An extractor error can be raise by the download process if there are
614 # no automatic captions but there are subtitles
615 except (KeyError, ExtractorError):
616 self._downloader.report_warning(err_msg)
617 return {}
618
619 @classmethod
620 def extract_id(cls, url):
621 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
622 if mobj is None:
623 raise ExtractorError('Invalid URL: %s' % url)
624 video_id = mobj.group(2)
625 return video_id
626
627 def _extract_from_m3u8(self, manifest_url, video_id):
628 url_map = {}
629 def _get_urls(_manifest):
630 lines = _manifest.split('\n')
631 urls = filter(lambda l: l and not l.startswith('#'),
632 lines)
633 return urls
634 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
635 formats_urls = _get_urls(manifest)
636 for format_url in formats_urls:
637 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
638 url_map[itag] = format_url
639 return url_map
640
641 def _extract_annotations(self, video_id):
642 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
643 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
644
645 def _real_extract(self, url):
646 proto = (
647 'http' if self._downloader.params.get('prefer_insecure', False)
648 else 'https')
649
650 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
651 mobj = re.search(self._NEXT_URL_RE, url)
652 if mobj:
653 url = proto + '://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
654 video_id = self.extract_id(url)
655
656 # Get video webpage
657 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
658 pref_cookies = [
659 c for c in self._downloader.cookiejar
660 if c.domain == '.youtube.com' and c.name == 'PREF']
661 for pc in pref_cookies:
662 if 'hl=' in pc.value:
663 pc.value = re.sub(r'hl=[^&]+', 'hl=en', pc.value)
664 else:
665 if pc.value:
666 pc.value += '&'
667 pc.value += 'hl=en'
668 video_webpage = self._download_webpage(url, video_id)
669
670 # Attempt to extract SWF player URL
671 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
672 if mobj is not None:
673 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
674 else:
675 player_url = None
676
677 # Get video info
678 self.report_video_info_webpage_download(video_id)
679 if re.search(r'player-age-gate-content">', video_webpage) is not None:
680 self.report_age_confirmation()
681 age_gate = True
682 # We simulate the access to the video from www.youtube.com/v/{video_id}
683 # this can be viewed without login into Youtube
684 data = compat_urllib_parse.urlencode({
685 'video_id': video_id,
686 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
687 'sts': self._search_regex(
688 r'"sts"\s*:\s*(\d+)', video_webpage, 'sts'),
689 })
690 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
691 video_info_webpage = self._download_webpage(video_info_url, video_id,
692 note=False,
693 errnote='unable to download video info webpage')
694 video_info = compat_parse_qs(video_info_webpage)
695 else:
696 age_gate = False
697 for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
698 video_info_url = (proto + '://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
699 % (video_id, el_type))
700 video_info_webpage = self._download_webpage(video_info_url, video_id,
701 note=False,
702 errnote='unable to download video info webpage')
703 video_info = compat_parse_qs(video_info_webpage)
704 if 'token' in video_info:
705 break
706 if 'token' not in video_info:
707 if 'reason' in video_info:
708 raise ExtractorError(
709 'YouTube said: %s' % video_info['reason'][0],
710 expected=True, video_id=video_id)
711 else:
712 raise ExtractorError(
713 '"token" parameter not in video info for unknown reason',
714 video_id=video_id)
715
716 if 'view_count' in video_info:
717 view_count = int(video_info['view_count'][0])
718 else:
719 view_count = None
720
721 # Check for "rental" videos
722 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
723 raise ExtractorError('"rental" videos not supported')
724
725 # Start extracting information
726 self.report_information_extraction(video_id)
727
728 # uploader
729 if 'author' not in video_info:
730 raise ExtractorError('Unable to extract uploader name')
731 video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
732
733 # uploader_id
734 video_uploader_id = None
735 mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
736 if mobj is not None:
737 video_uploader_id = mobj.group(1)
738 else:
739 self._downloader.report_warning('unable to extract uploader nickname')
740
741 # title
742 if 'title' in video_info:
743 video_title = video_info['title'][0]
744 else:
745 self._downloader.report_warning('Unable to extract video title')
746 video_title = '_'
747
748 # thumbnail image
749 # We try first to get a high quality image:
750 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
751 video_webpage, re.DOTALL)
752 if m_thumb is not None:
753 video_thumbnail = m_thumb.group(1)
754 elif 'thumbnail_url' not in video_info:
755 self._downloader.report_warning('unable to extract video thumbnail')
756 video_thumbnail = None
757 else: # don't panic if we can't find it
758 video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
759
760 # upload date
761 upload_date = None
762 mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage)
763 if mobj is None:
764 mobj = re.search(
765 r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>',
766 video_webpage)
767 if mobj is not None:
768 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
769 upload_date = unified_strdate(upload_date)
770
771 m_cat_container = self._search_regex(
772 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
773 video_webpage, 'categories', fatal=False)
774 if m_cat_container:
775 category = self._html_search_regex(
776 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
777 default=None)
778 video_categories = None if category is None else [category]
779 else:
780 video_categories = None
781
782 # description
783 video_description = get_element_by_id("eow-description", video_webpage)
784 if video_description:
785 video_description = re.sub(r'''(?x)
786 <a\s+
787 (?:[a-zA-Z-]+="[^"]+"\s+)*?
788 title="([^"]+)"\s+
789 (?:[a-zA-Z-]+="[^"]+"\s+)*?
790 class="yt-uix-redirect-link"\s*>
791 [^<]+
792 </a>
793 ''', r'\1', video_description)
794 video_description = clean_html(video_description)
795 else:
796 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
797 if fd_mobj:
798 video_description = unescapeHTML(fd_mobj.group(1))
799 else:
800 video_description = ''
801
802 def _extract_count(count_name):
803 count = self._search_regex(
804 r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name),
805 video_webpage, count_name, default=None)
806 if count is not None:
807 return int(count.replace(',', ''))
808 return None
809 like_count = _extract_count('like')
810 dislike_count = _extract_count('dislike')
811
812 # subtitles
813 video_subtitles = self.extract_subtitles(video_id, video_webpage)
814
815 if self._downloader.params.get('listsubtitles', False):
816 self._list_available_subtitles(video_id, video_webpage)
817 return
818
819 if 'length_seconds' not in video_info:
820 self._downloader.report_warning('unable to extract video duration')
821 video_duration = None
822 else:
823 video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]))
824
825 # annotations
826 video_annotations = None
827 if self._downloader.params.get('writeannotations', False):
828 video_annotations = self._extract_annotations(video_id)
829
830 # Decide which formats to download
831 try:
832 mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
833 if not mobj:
834 raise ValueError('Could not find vevo ID')
835 json_code = uppercase_escape(mobj.group(1))
836 ytplayer_config = json.loads(json_code)
837 args = ytplayer_config['args']
838 # Easy way to know if the 's' value is in url_encoded_fmt_stream_map
839 # this signatures are encrypted
840 if 'url_encoded_fmt_stream_map' not in args:
841 raise ValueError('No stream_map present') # caught below
842 re_signature = re.compile(r'[&,]s=')
843 m_s = re_signature.search(args['url_encoded_fmt_stream_map'])
844 if m_s is not None:
845 self.to_screen('%s: Encrypted signatures detected.' % video_id)
846 video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
847 m_s = re_signature.search(args.get('adaptive_fmts', ''))
848 if m_s is not None:
849 if 'adaptive_fmts' in video_info:
850 video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
851 else:
852 video_info['adaptive_fmts'] = [args['adaptive_fmts']]
853 except ValueError:
854 pass
855
856 def _map_to_format_list(urlmap):
857 formats = []
858 for itag, video_real_url in urlmap.items():
859 dct = {
860 'format_id': itag,
861 'url': video_real_url,
862 'player_url': player_url,
863 }
864 if itag in self._formats:
865 dct.update(self._formats[itag])
866 formats.append(dct)
867 return formats
868
869 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
870 self.report_rtmp_download()
871 formats = [{
872 'format_id': '_rtmp',
873 'protocol': 'rtmp',
874 'url': video_info['conn'][0],
875 'player_url': player_url,
876 }]
877 elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
878 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
879 if 'rtmpe%3Dyes' in encoded_url_map:
880 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
881 url_map = {}
882 for url_data_str in encoded_url_map.split(','):
883 url_data = compat_parse_qs(url_data_str)
884 if 'itag' not in url_data or 'url' not in url_data:
885 continue
886 format_id = url_data['itag'][0]
887 url = url_data['url'][0]
888
889 if 'sig' in url_data:
890 url += '&signature=' + url_data['sig'][0]
891 elif 's' in url_data:
892 encrypted_sig = url_data['s'][0]
893
894 if not age_gate:
895 jsplayer_url_json = self._search_regex(
896 r'"assets":.+?"js":\s*("[^"]+")',
897 video_webpage, 'JS player URL')
898 player_url = json.loads(jsplayer_url_json)
899 if player_url is None:
900 player_url_json = self._search_regex(
901 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
902 video_webpage, 'age gate player URL')
903 player_url = json.loads(player_url_json)
904
905 if self._downloader.params.get('verbose'):
906 if player_url is None:
907 player_version = 'unknown'
908 player_desc = 'unknown'
909 else:
910 if player_url.endswith('swf'):
911 player_version = self._search_regex(
912 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
913 'flash player', fatal=False)
914 player_desc = 'flash player %s' % player_version
915 else:
916 player_version = self._search_regex(
917 r'html5player-([^/]+?)(?:/html5player)?\.js',
918 player_url,
919 'html5 player', fatal=False)
920 player_desc = 'html5 player %s' % player_version
921
922 parts_sizes = self._signature_cache_id(encrypted_sig)
923 self.to_screen('{%s} signature length %s, %s' %
924 (format_id, parts_sizes, player_desc))
925
926 signature = self._decrypt_signature(
927 encrypted_sig, video_id, player_url, age_gate)
928 url += '&signature=' + signature
929 if 'ratebypass' not in url:
930 url += '&ratebypass=yes'
931 url_map[format_id] = url
932 formats = _map_to_format_list(url_map)
933 elif video_info.get('hlsvp'):
934 manifest_url = video_info['hlsvp'][0]
935 url_map = self._extract_from_m3u8(manifest_url, video_id)
936 formats = _map_to_format_list(url_map)
937 else:
938 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
939
940 # Look for the DASH manifest
941 if (self._downloader.params.get('youtube_include_dash_manifest', False)):
942 try:
943 # The DASH manifest used needs to be the one from the original video_webpage.
944 # The one found in get_video_info seems to be using different signatures.
945 # However, in the case of an age restriction there won't be any embedded dashmpd in the video_webpage.
946 # Luckily, it seems, this case uses some kind of default signature (len == 86), so the
947 # combination of get_video_info and the _static_decrypt_signature() decryption fallback will work here.
948 if age_gate:
949 dash_manifest_url = video_info.get('dashmpd')[0]
950 else:
951 dash_manifest_url = ytplayer_config['args']['dashmpd']
952 def decrypt_sig(mobj):
953 s = mobj.group(1)
954 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
955 return '/signature/%s' % dec_s
956 dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
957 dash_doc = self._download_xml(
958 dash_manifest_url, video_id,
959 note='Downloading DASH manifest',
960 errnote='Could not download DASH manifest')
961 for r in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
962 url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
963 if url_el is None:
964 continue
965 format_id = r.attrib['id']
966 video_url = url_el.text
967 filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
968 f = {
969 'format_id': format_id,
970 'url': video_url,
971 'width': int_or_none(r.attrib.get('width')),
972 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
973 'asr': int_or_none(r.attrib.get('audioSamplingRate')),
974 'filesize': filesize,
975 }
976 try:
977 existing_format = next(
978 fo for fo in formats
979 if fo['format_id'] == format_id)
980 except StopIteration:
981 f.update(self._formats.get(format_id, {}))
982 formats.append(f)
983 else:
984 existing_format.update(f)
985
986 except (ExtractorError, KeyError) as e:
987 self.report_warning('Skipping DASH manifest: %s' % e, video_id)
988
989 self._sort_formats(formats)
990
991 return {
992 'id': video_id,
993 'uploader': video_uploader,
994 'uploader_id': video_uploader_id,
995 'upload_date': upload_date,
996 'title': video_title,
997 'thumbnail': video_thumbnail,
998 'description': video_description,
999 'categories': video_categories,
1000 'subtitles': video_subtitles,
1001 'duration': video_duration,
1002 'age_limit': 18 if age_gate else 0,
1003 'annotations': video_annotations,
1004 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1005 'view_count': view_count,
1006 'like_count': like_count,
1007 'dislike_count': dislike_count,
1008 'formats': formats,
1009 }
1010
1011 class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
1012 IE_DESC = 'YouTube.com playlists'
1013 _VALID_URL = r"""(?x)(?:
1014 (?:https?://)?
1015 (?:\w+\.)?
1016 youtube\.com/
1017 (?:
1018 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
1019 \? (?:.*?&)*? (?:p|a|list)=
1020 | p/
1021 )
1022 (
1023 (?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
1024 # Top tracks, they can also include dots
1025 |(?:MC)[\w\.]*
1026 )
1027 .*
1028 |
1029 ((?:PL|LL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,})
1030 )"""
1031 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
1032 _MORE_PAGES_INDICATOR = r'data-link-type="next"'
1033 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
1034 IE_NAME = 'youtube:playlist'
1035 _TESTS = [{
1036 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1037 'info_dict': {
1038 'title': 'ytdl test PL',
1039 },
1040 'playlist_count': 3,
1041 }, {
1042 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1043 'info_dict': {
1044 'title': 'YDL_Empty_List',
1045 },
1046 'playlist_count': 0,
1047 }, {
1048 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1049 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1050 'info_dict': {
1051 'title': '29C3: Not my department',
1052 },
1053 'playlist_count': 95,
1054 }, {
1055 'note': 'issue #673',
1056 'url': 'PLBB231211A4F62143',
1057 'info_dict': {
1058 'title': 'Team Fortress 2 (Class-based LP)',
1059 },
1060 'playlist_mincount': 26,
1061 }, {
1062 'note': 'Large playlist',
1063 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1064 'info_dict': {
1065 'title': 'Uploads from Cauchemar',
1066 },
1067 'playlist_mincount': 799,
1068 }, {
1069 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1070 'info_dict': {
1071 'title': 'YDL_safe_search',
1072 },
1073 'playlist_count': 2,
1074 }, {
1075 'note': 'embedded',
1076 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1077 'playlist_count': 4,
1078 'info_dict': {
1079 'title': 'JODA15',
1080 }
1081 }, {
1082 'note': 'Embedded SWF player',
1083 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1084 'playlist_count': 4,
1085 'info_dict': {
1086 'title': 'JODA7',
1087 }
1088 }]
1089
1090 def _real_initialize(self):
1091 self._login()
1092
1093 def _ids_to_results(self, ids):
1094 return [
1095 self.url_result(vid_id, 'Youtube', video_id=vid_id)
1096 for vid_id in ids]
1097
1098 def _extract_mix(self, playlist_id):
1099 # The mixes are generated from a a single video
1100 # the id of the playlist is just 'RD' + video_id
1101 url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
1102 webpage = self._download_webpage(
1103 url, playlist_id, 'Downloading Youtube mix')
1104 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
1105 title_span = (
1106 search_title('playlist-title') or
1107 search_title('title long-title') or
1108 search_title('title'))
1109 title = clean_html(title_span)
1110 ids = orderedSet(re.findall(
1111 r'''(?xs)data-video-username=".*?".*?
1112 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
1113 webpage))
1114 url_results = self._ids_to_results(ids)
1115
1116 return self.playlist_result(url_results, playlist_id, title)
1117
1118 def _real_extract(self, url):
1119 # Extract playlist id
1120 mobj = re.match(self._VALID_URL, url)
1121 if mobj is None:
1122 raise ExtractorError('Invalid URL: %s' % url)
1123 playlist_id = mobj.group(1) or mobj.group(2)
1124
1125 # Check if it's a video-specific URL
1126 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
1127 if 'v' in query_dict:
1128 video_id = query_dict['v'][0]
1129 if self._downloader.params.get('noplaylist'):
1130 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1131 return self.url_result(video_id, 'Youtube', video_id=video_id)
1132 else:
1133 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
1134
1135 if playlist_id.startswith('RD'):
1136 # Mixes require a custom extraction process
1137 return self._extract_mix(playlist_id)
1138 if playlist_id.startswith('TL'):
1139 raise ExtractorError('For downloading YouTube.com top lists, use '
1140 'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
1141
1142 url = self._TEMPLATE_URL % playlist_id
1143 page = self._download_webpage(url, playlist_id)
1144 more_widget_html = content_html = page
1145
1146 # Check if the playlist exists or is private
1147 if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None:
1148 raise ExtractorError(
1149 'The playlist doesn\'t exist or is private, use --username or '
1150 '--netrc to access it.',
1151 expected=True)
1152
1153 # Extract the video ids from the playlist pages
1154 ids = []
1155
1156 for page_num in itertools.count(1):
1157 matches = re.finditer(self._VIDEO_RE, content_html)
1158 # We remove the duplicates and the link with index 0
1159 # (it's not the first video of the playlist)
1160 new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
1161 ids.extend(new_ids)
1162
1163 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
1164 if not mobj:
1165 break
1166
1167 more = self._download_json(
1168 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
1169 'Downloading page #%s' % page_num,
1170 transform_source=uppercase_escape)
1171 content_html = more['content_html']
1172 more_widget_html = more['load_more_widget_html']
1173
1174 playlist_title = self._html_search_regex(
1175 r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
1176 page, 'title')
1177
1178 url_results = self._ids_to_results(ids)
1179 return self.playlist_result(url_results, playlist_id, playlist_title)
1180
1181
1182 class YoutubeTopListIE(YoutubePlaylistIE):
1183 IE_NAME = 'youtube:toplist'
1184 IE_DESC = ('YouTube.com top lists, "yttoplist:{channel}:{list title}"'
1185 ' (Example: "yttoplist:music:Top Tracks")')
1186 _VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'
1187 _TESTS = [{
1188 'url': 'yttoplist:music:Trending',
1189 'playlist_mincount': 5,
1190 'skip': 'Only works for logged-in users',
1191 }]
1192
1193 def _real_extract(self, url):
1194 mobj = re.match(self._VALID_URL, url)
1195 channel = mobj.group('chann')
1196 title = mobj.group('title')
1197 query = compat_urllib_parse.urlencode({'title': title})
1198 channel_page = self._download_webpage(
1199 'https://www.youtube.com/%s' % channel, title)
1200 link = self._html_search_regex(
1201 r'''(?x)
1202 <a\s+href="([^"]+)".*?>\s*
1203 <span\s+class="branded-page-module-title-text">\s*
1204 <span[^>]*>.*?%s.*?</span>''' % re.escape(query),
1205 channel_page, 'list')
1206 url = compat_urlparse.urljoin('https://www.youtube.com/', link)
1207
1208 video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"'
1209 ids = []
1210 # sometimes the webpage doesn't contain the videos
1211 # retry until we get them
1212 for i in itertools.count(0):
1213 msg = 'Downloading Youtube mix'
1214 if i > 0:
1215 msg += ', retry #%d' % i
1216
1217 webpage = self._download_webpage(url, title, msg)
1218 ids = orderedSet(re.findall(video_re, webpage))
1219 if ids:
1220 break
1221 url_results = self._ids_to_results(ids)
1222 return self.playlist_result(url_results, playlist_title=title)
1223
1224
1225 class YoutubeChannelIE(InfoExtractor):
1226 IE_DESC = 'YouTube.com channels'
1227 _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
1228 _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
1229 _MORE_PAGES_URL = 'https://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
1230 IE_NAME = 'youtube:channel'
1231 _TESTS = [{
1232 'note': 'paginated channel',
1233 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1234 'playlist_mincount': 91,
1235 }]
1236
1237 def extract_videos_from_page(self, page):
1238 ids_in_page = []
1239 for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
1240 if mobj.group(1) not in ids_in_page:
1241 ids_in_page.append(mobj.group(1))
1242 return ids_in_page
1243
1244 def _real_extract(self, url):
1245 # Extract channel id
1246 mobj = re.match(self._VALID_URL, url)
1247 if mobj is None:
1248 raise ExtractorError('Invalid URL: %s' % url)
1249
1250 # Download channel page
1251 channel_id = mobj.group(1)
1252 video_ids = []
1253 url = 'https://www.youtube.com/channel/%s/videos' % channel_id
1254 channel_page = self._download_webpage(url, channel_id)
1255 autogenerated = re.search(r'''(?x)
1256 class="[^"]*?(?:
1257 channel-header-autogenerated-label|
1258 yt-channel-title-autogenerated
1259 )[^"]*"''', channel_page) is not None
1260
1261 if autogenerated:
1262 # The videos are contained in a single page
1263 # the ajax pages can't be used, they are empty
1264 video_ids = self.extract_videos_from_page(channel_page)
1265 else:
1266 # Download all channel pages using the json-based channel_ajax query
1267 for pagenum in itertools.count(1):
1268 url = self._MORE_PAGES_URL % (pagenum, channel_id)
1269 page = self._download_json(
1270 url, channel_id, note='Downloading page #%s' % pagenum,
1271 transform_source=uppercase_escape)
1272
1273 ids_in_page = self.extract_videos_from_page(page['content_html'])
1274 video_ids.extend(ids_in_page)
1275
1276 if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
1277 break
1278
1279 self._downloader.to_screen('[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
1280
1281 url_entries = [self.url_result(video_id, 'Youtube', video_id=video_id)
1282 for video_id in video_ids]
1283 return self.playlist_result(url_entries, channel_id)
1284
1285
1286 class YoutubeUserIE(InfoExtractor):
1287 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
1288 _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
1289 _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
1290 _GDATA_PAGE_SIZE = 50
1291 _GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
1292 IE_NAME = 'youtube:user'
1293
1294 _TESTS = [{
1295 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
1296 'playlist_mincount': 320,
1297 'info_dict': {
1298 'title': 'TheLinuxFoundation',
1299 }
1300 }, {
1301 'url': 'ytuser:phihag',
1302 'only_matching': True,
1303 }]
1304
1305 @classmethod
1306 def suitable(cls, url):
1307 # Don't return True if the url can be extracted with other youtube
1308 # extractor, the regex would is too permissive and it would match.
1309 other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
1310 if any(ie.suitable(url) for ie in other_ies): return False
1311 else: return super(YoutubeUserIE, cls).suitable(url)
1312
1313 def _real_extract(self, url):
1314 # Extract username
1315 mobj = re.match(self._VALID_URL, url)
1316 if mobj is None:
1317 raise ExtractorError('Invalid URL: %s' % url)
1318
1319 username = mobj.group(1)
1320
1321 # Download video ids using YouTube Data API. Result size per
1322 # query is limited (currently to 50 videos) so we need to query
1323 # page by page until there are no video ids - it means we got
1324 # all of them.
1325
1326 def download_page(pagenum):
1327 start_index = pagenum * self._GDATA_PAGE_SIZE + 1
1328
1329 gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
1330 page = self._download_webpage(
1331 gdata_url, username,
1332 'Downloading video ids from %d to %d' % (
1333 start_index, start_index + self._GDATA_PAGE_SIZE))
1334
1335 try:
1336 response = json.loads(page)
1337 except ValueError as err:
1338 raise ExtractorError('Invalid JSON in API response: ' + compat_str(err))
1339 if 'entry' not in response['feed']:
1340 return
1341
1342 # Extract video identifiers
1343 entries = response['feed']['entry']
1344 for entry in entries:
1345 title = entry['title']['$t']
1346 video_id = entry['id']['$t'].split('/')[-1]
1347 yield {
1348 '_type': 'url',
1349 'url': video_id,
1350 'ie_key': 'Youtube',
1351 'id': video_id,
1352 'title': title,
1353 }
1354 url_results = OnDemandPagedList(download_page, self._GDATA_PAGE_SIZE)
1355
1356 return self.playlist_result(url_results, playlist_title=username)
1357
1358
1359 class YoutubeSearchIE(SearchInfoExtractor):
1360 IE_DESC = 'YouTube.com searches'
1361 _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
1362 _MAX_RESULTS = 1000
1363 IE_NAME = 'youtube:search'
1364 _SEARCH_KEY = 'ytsearch'
1365
1366 def _get_n_results(self, query, n):
1367 """Get a specified number of results for a query"""
1368
1369 video_ids = []
1370 pagenum = 0
1371 limit = n
1372 PAGE_SIZE = 50
1373
1374 while (PAGE_SIZE * pagenum) < limit:
1375 result_url = self._API_URL % (
1376 compat_urllib_parse.quote_plus(query.encode('utf-8')),
1377 (PAGE_SIZE * pagenum) + 1)
1378 data_json = self._download_webpage(
1379 result_url, video_id='query "%s"' % query,
1380 note='Downloading page %s' % (pagenum + 1),
1381 errnote='Unable to download API page')
1382 data = json.loads(data_json)
1383 api_response = data['data']
1384
1385 if 'items' not in api_response:
1386 raise ExtractorError(
1387 '[youtube] No video results', expected=True)
1388
1389 new_ids = list(video['id'] for video in api_response['items'])
1390 video_ids += new_ids
1391
1392 limit = min(n, api_response['totalItems'])
1393 pagenum += 1
1394
1395 if len(video_ids) > n:
1396 video_ids = video_ids[:n]
1397 videos = [self.url_result(video_id, 'Youtube', video_id=video_id)
1398 for video_id in video_ids]
1399 return self.playlist_result(videos, query)
1400
1401
1402 class YoutubeSearchDateIE(YoutubeSearchIE):
1403 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
1404 _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
1405 _SEARCH_KEY = 'ytsearchdate'
1406 IE_DESC = 'YouTube.com searches, newest videos first'
1407
1408
1409 class YoutubeSearchURLIE(InfoExtractor):
1410 IE_DESC = 'YouTube.com search URLs'
1411 IE_NAME = 'youtube:search_url'
1412 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
1413 _TESTS = [{
1414 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
1415 'playlist_mincount': 5,
1416 'info_dict': {
1417 'title': 'youtube-dl test video',
1418 }
1419 }]
1420
1421 def _real_extract(self, url):
1422 mobj = re.match(self._VALID_URL, url)
1423 query = compat_urllib_parse.unquote_plus(mobj.group('query'))
1424
1425 webpage = self._download_webpage(url, query)
1426 result_code = self._search_regex(
1427 r'(?s)<ol class="item-section"(.*?)</ol>', webpage, 'result HTML')
1428
1429 part_codes = re.findall(
1430 r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
1431 entries = []
1432 for part_code in part_codes:
1433 part_title = self._html_search_regex(
1434 [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
1435 part_url_snippet = self._html_search_regex(
1436 r'(?s)href="([^"]+)"', part_code, 'item URL')
1437 part_url = compat_urlparse.urljoin(
1438 'https://www.youtube.com/', part_url_snippet)
1439 entries.append({
1440 '_type': 'url',
1441 'url': part_url,
1442 'title': part_title,
1443 })
1444
1445 return {
1446 '_type': 'playlist',
1447 'entries': entries,
1448 'title': query,
1449 }
1450
1451
1452 class YoutubeShowIE(InfoExtractor):
1453 IE_DESC = 'YouTube.com (multi-season) shows'
1454 _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
1455 IE_NAME = 'youtube:show'
1456 _TESTS = [{
1457 'url': 'http://www.youtube.com/show/airdisasters',
1458 'playlist_mincount': 3,
1459 'info_dict': {
1460 'id': 'airdisasters',
1461 'title': 'Air Disasters',
1462 }
1463 }]
1464
1465 def _real_extract(self, url):
1466 mobj = re.match(self._VALID_URL, url)
1467 playlist_id = mobj.group('id')
1468 webpage = self._download_webpage(
1469 url, playlist_id, 'Downloading show webpage')
1470 # There's one playlist for each season of the show
1471 m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
1472 self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
1473 entries = [
1474 self.url_result(
1475 'https://www.youtube.com' + season.group(1), 'YoutubePlaylist')
1476 for season in m_seasons
1477 ]
1478 title = self._og_search_title(webpage, fatal=False)
1479
1480 return {
1481 '_type': 'playlist',
1482 'id': playlist_id,
1483 'title': title,
1484 'entries': entries,
1485 }
1486
1487
1488 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
1489 """
1490 Base class for extractors that fetch info from
1491 http://www.youtube.com/feed_ajax
1492 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
1493 """
1494 _LOGIN_REQUIRED = True
1495 # use action_load_personal_feed instead of action_load_system_feed
1496 _PERSONAL_FEED = False
1497
1498 @property
1499 def _FEED_TEMPLATE(self):
1500 action = 'action_load_system_feed'
1501 if self._PERSONAL_FEED:
1502 action = 'action_load_personal_feed'
1503 return 'https://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action, self._FEED_NAME)
1504
1505 @property
1506 def IE_NAME(self):
1507 return 'youtube:%s' % self._FEED_NAME
1508
1509 def _real_initialize(self):
1510 self._login()
1511
1512 def _real_extract(self, url):
1513 feed_entries = []
1514 paging = 0
1515 for i in itertools.count(1):
1516 info = self._download_json(self._FEED_TEMPLATE % paging,
1517 '%s feed' % self._FEED_NAME,
1518 'Downloading page %s' % i)
1519 feed_html = info.get('feed_html') or info.get('content_html')
1520 load_more_widget_html = info.get('load_more_widget_html') or feed_html
1521 m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
1522 ids = orderedSet(m.group(1) for m in m_ids)
1523 feed_entries.extend(
1524 self.url_result(video_id, 'Youtube', video_id=video_id)
1525 for video_id in ids)
1526 mobj = re.search(
1527 r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
1528 load_more_widget_html)
1529 if mobj is None:
1530 break
1531 paging = mobj.group('paging')
1532 return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
1533
1534 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
1535 IE_DESC = 'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
1536 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
1537 _FEED_NAME = 'recommended'
1538 _PLAYLIST_TITLE = 'Youtube Recommended videos'
1539
1540 class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
1541 IE_DESC = 'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
1542 _VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
1543 _FEED_NAME = 'watch_later'
1544 _PLAYLIST_TITLE = 'Youtube Watch Later'
1545 _PERSONAL_FEED = True
1546
1547 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
1548 IE_DESC = 'Youtube watch history, "ythistory" keyword (requires authentication)'
1549 _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
1550 _FEED_NAME = 'history'
1551 _PERSONAL_FEED = True
1552 _PLAYLIST_TITLE = 'Youtube Watch History'
1553
1554 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
1555 IE_NAME = 'youtube:favorites'
1556 IE_DESC = 'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
1557 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
1558 _LOGIN_REQUIRED = True
1559
1560 def _real_extract(self, url):
1561 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
1562 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
1563 return self.url_result(playlist_id, 'YoutubePlaylist')
1564
1565
1566 class YoutubeSubscriptionsIE(YoutubePlaylistIE):
1567 IE_NAME = 'youtube:subscriptions'
1568 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
1569 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
1570 _TESTS = []
1571
1572 def _real_extract(self, url):
1573 title = 'Youtube Subscriptions'
1574 page = self._download_webpage('https://www.youtube.com/feed/subscriptions', title)
1575
1576 # The extraction process is the same as for playlists, but the regex
1577 # for the video ids doesn't contain an index
1578 ids = []
1579 more_widget_html = content_html = page
1580
1581 for page_num in itertools.count(1):
1582 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
1583 new_ids = orderedSet(matches)
1584 ids.extend(new_ids)
1585
1586 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
1587 if not mobj:
1588 break
1589
1590 more = self._download_json(
1591 'https://youtube.com/%s' % mobj.group('more'), title,
1592 'Downloading page #%s' % page_num,
1593 transform_source=uppercase_escape)
1594 content_html = more['content_html']
1595 more_widget_html = more['load_more_widget_html']
1596
1597 return {
1598 '_type': 'playlist',
1599 'title': title,
1600 'entries': self._ids_to_results(ids),
1601 }
1602
1603
1604 class YoutubeTruncatedURLIE(InfoExtractor):
1605 IE_NAME = 'youtube:truncated_url'
1606 IE_DESC = False # Do not list
1607 _VALID_URL = r'''(?x)
1608 (?:https?://)?[^/]+/watch\?(?:
1609 feature=[a-z_]+|
1610 annotation_id=annotation_[^&]+
1611 )?$|
1612 (?:https?://)?(?:www\.)?youtube\.com/attribution_link\?a=[^&]+$
1613 '''
1614
1615 _TESTS = [{
1616 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
1617 'only_matching': True,
1618 }, {
1619 'url': 'http://www.youtube.com/watch?',
1620 'only_matching': True,
1621 }]
1622
1623 def _real_extract(self, url):
1624 raise ExtractorError(
1625 'Did you forget to quote the URL? Remember that & is a meta '
1626 'character in most shells, so you want to put the URL in quotes, '
1627 'like youtube-dl '
1628 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
1629 ' or simply youtube-dl BaW_jenozKc .',
1630 expected=True)