]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/youtube.py
[youtube] Add support for formats 302 and 303 (Fixes #4060)
[yt-dlp.git] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import re
10 import traceback
11
12 from .common import InfoExtractor, SearchInfoExtractor
13 from .subtitles import SubtitlesInfoExtractor
14 from ..jsinterp import JSInterpreter
15 from ..swfinterp import SWFInterpreter
16 from ..utils import (
17 compat_chr,
18 compat_parse_qs,
19 compat_urllib_parse,
20 compat_urllib_request,
21 compat_urlparse,
22 compat_str,
23
24 clean_html,
25 get_element_by_id,
26 get_element_by_attribute,
27 ExtractorError,
28 int_or_none,
29 OnDemandPagedList,
30 unescapeHTML,
31 unified_strdate,
32 orderedSet,
33 uppercase_escape,
34 )
35
36 class YoutubeBaseInfoExtractor(InfoExtractor):
37 """Provide base functions for Youtube extractors"""
38 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
39 _TWOFACTOR_URL = 'https://accounts.google.com/SecondFactor'
40 _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
41 _AGE_URL = 'https://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
42 _NETRC_MACHINE = 'youtube'
43 # If True it will raise an error if no login info is provided
44 _LOGIN_REQUIRED = False
45
46 def _set_language(self):
47 return bool(self._download_webpage(
48 self._LANG_URL, None,
49 note='Setting language', errnote='unable to set language',
50 fatal=False))
51
52 def _login(self):
53 """
54 Attempt to log in to YouTube.
55 True is returned if successful or skipped.
56 False is returned if login failed.
57
58 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
59 """
60 (username, password) = self._get_login_info()
61 # No authentication to be performed
62 if username is None:
63 if self._LOGIN_REQUIRED:
64 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
65 return True
66
67 login_page = self._download_webpage(
68 self._LOGIN_URL, None,
69 note='Downloading login page',
70 errnote='unable to fetch login page', fatal=False)
71 if login_page is False:
72 return
73
74 galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
75 login_page, 'Login GALX parameter')
76
77 # Log in
78 login_form_strs = {
79 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
80 'Email': username,
81 'GALX': galx,
82 'Passwd': password,
83
84 'PersistentCookie': 'yes',
85 '_utf8': '霱',
86 'bgresponse': 'js_disabled',
87 'checkConnection': '',
88 'checkedDomains': 'youtube',
89 'dnConn': '',
90 'pstMsg': '0',
91 'rmShown': '1',
92 'secTok': '',
93 'signIn': 'Sign in',
94 'timeStmp': '',
95 'service': 'youtube',
96 'uilel': '3',
97 'hl': 'en_US',
98 }
99
100 # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
101 # chokes on unicode
102 login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
103 login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
104
105 req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
106 login_results = self._download_webpage(
107 req, None,
108 note='Logging in', errnote='unable to log in', fatal=False)
109 if login_results is False:
110 return False
111
112 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
113 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
114
115 # Two-Factor
116 # TODO add SMS and phone call support - these require making a request and then prompting the user
117
118 if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', login_results) is not None:
119 tfa_code = self._get_tfa_info()
120
121 if tfa_code is None:
122 self._downloader.report_warning('Two-factor authentication required. Provide it with --twofactor <code>')
123 self._downloader.report_warning('(Note that only TOTP (Google Authenticator App) codes work at this time.)')
124 return False
125
126 # Unlike the first login form, secTok and timeStmp are both required for the TFA form
127
128 match = re.search(r'id="secTok"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
129 if match is None:
130 self._downloader.report_warning('Failed to get secTok - did the page structure change?')
131 secTok = match.group(1)
132 match = re.search(r'id="timeStmp"\n\s+value=\'(.+)\'/>', login_results, re.M | re.U)
133 if match is None:
134 self._downloader.report_warning('Failed to get timeStmp - did the page structure change?')
135 timeStmp = match.group(1)
136
137 tfa_form_strs = {
138 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
139 'smsToken': '',
140 'smsUserPin': tfa_code,
141 'smsVerifyPin': 'Verify',
142
143 'PersistentCookie': 'yes',
144 'checkConnection': '',
145 'checkedDomains': 'youtube',
146 'pstMsg': '1',
147 'secTok': secTok,
148 'timeStmp': timeStmp,
149 'service': 'youtube',
150 'hl': 'en_US',
151 }
152 tfa_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in tfa_form_strs.items())
153 tfa_data = compat_urllib_parse.urlencode(tfa_form).encode('ascii')
154
155 tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
156 tfa_results = self._download_webpage(
157 tfa_req, None,
158 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
159
160 if tfa_results is False:
161 return False
162
163 if re.search(r'(?i)<form[^>]* id="gaia_secondfactorform"', tfa_results) is not None:
164 self._downloader.report_warning('Two-factor code expired. Please try again, or use a one-use backup code instead.')
165 return False
166 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
167 self._downloader.report_warning('unable to log in - did the page structure change?')
168 return False
169 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
170 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
171 return False
172
173 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
174 self._downloader.report_warning('unable to log in: bad username or password')
175 return False
176 return True
177
178 def _confirm_age(self):
179 age_form = {
180 'next_url': '/',
181 'action_confirm': 'Confirm',
182 }
183 req = compat_urllib_request.Request(self._AGE_URL,
184 compat_urllib_parse.urlencode(age_form).encode('ascii'))
185
186 self._download_webpage(
187 req, None,
188 note='Confirming age', errnote='Unable to confirm age',
189 fatal=False)
190
191 def _real_initialize(self):
192 if self._downloader is None:
193 return
194 if self._get_login_info()[0] is not None:
195 if not self._set_language():
196 return
197 if not self._login():
198 return
199 self._confirm_age()
200
201
202 class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
203 IE_DESC = 'YouTube.com'
204 _VALID_URL = r"""(?x)^
205 (
206 (?:https?://|//) # http(s):// or protocol-independent URL
207 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
208 (?:www\.)?deturl\.com/www\.youtube\.com/|
209 (?:www\.)?pwnyoutube\.com/|
210 (?:www\.)?yourepeat\.com/|
211 tube\.majestyc\.net/|
212 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
213 (?:.*?\#/)? # handle anchor (#/) redirect urls
214 (?: # the various things that can precede the ID:
215 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
216 |(?: # or the v= param in all its forms
217 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
218 (?:\?|\#!?) # the params delimiter ? or # or #!
219 (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
220 v=
221 )
222 ))
223 |youtu\.be/ # just youtu.be/xxxx
224 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
225 )
226 )? # all until now is optional -> you can pass the naked ID
227 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
228 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
229 (?(1).+)? # if we found the ID, everything can follow
230 $"""
231 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
232 _formats = {
233 '5': {'ext': 'flv', 'width': 400, 'height': 240},
234 '6': {'ext': 'flv', 'width': 450, 'height': 270},
235 '13': {'ext': '3gp'},
236 '17': {'ext': '3gp', 'width': 176, 'height': 144},
237 '18': {'ext': 'mp4', 'width': 640, 'height': 360},
238 '22': {'ext': 'mp4', 'width': 1280, 'height': 720},
239 '34': {'ext': 'flv', 'width': 640, 'height': 360},
240 '35': {'ext': 'flv', 'width': 854, 'height': 480},
241 '36': {'ext': '3gp', 'width': 320, 'height': 240},
242 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
243 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
244 '43': {'ext': 'webm', 'width': 640, 'height': 360},
245 '44': {'ext': 'webm', 'width': 854, 'height': 480},
246 '45': {'ext': 'webm', 'width': 1280, 'height': 720},
247 '46': {'ext': 'webm', 'width': 1920, 'height': 1080},
248
249
250 # 3d videos
251 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
252 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
253 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
254 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
255 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
256 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
257 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
258
259 # Apple HTTP Live Streaming
260 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
261 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
262 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
263 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
264 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
265 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
266 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
267
268 # DASH mp4 video
269 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
270 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
271 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
272 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
273 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
274 '138': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
275 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
276 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
277
278 # Dash mp4 audio
279 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
280 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 128, 'preference': -50},
281 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50},
282
283 # Dash webm
284 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
285 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
286 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
287 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
288 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
289 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
290 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'VP9'},
291 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
292 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
293 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
294 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
295 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
296 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
297 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
298 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
299 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
300 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
301 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'VP9'},
302
303 # Dash webm audio
304 '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
305 '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
306
307 # Dash mov
308 '298': {'ext': 'mov', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
309 '299': {'ext': 'mov', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
310
311 # RTMP (unnamed)
312 '_rtmp': {'protocol': 'rtmp'},
313 }
314
315 IE_NAME = 'youtube'
316 _TESTS = [
317 {
318 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc',
319 'info_dict': {
320 'id': 'BaW_jenozKc',
321 'ext': 'mp4',
322 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
323 'uploader': 'Philipp Hagemeister',
324 'uploader_id': 'phihag',
325 'upload_date': '20121002',
326 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
327 'categories': ['Science & Technology'],
328 'like_count': int,
329 'dislike_count': int,
330 }
331 },
332 {
333 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
334 'note': 'Test generic use_cipher_signature video (#897)',
335 'info_dict': {
336 'id': 'UxxajLWwzqY',
337 'ext': 'mp4',
338 'upload_date': '20120506',
339 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
340 'description': 'md5:fea86fda2d5a5784273df5c7cc994d9f',
341 'uploader': 'Icona Pop',
342 'uploader_id': 'IconaPop',
343 }
344 },
345 {
346 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
347 'note': 'Test VEVO video with age protection (#956)',
348 'info_dict': {
349 'id': '07FYdnEawAQ',
350 'ext': 'mp4',
351 'upload_date': '20130703',
352 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
353 'description': 'md5:64249768eec3bc4276236606ea996373',
354 'uploader': 'justintimberlakeVEVO',
355 'uploader_id': 'justintimberlakeVEVO',
356 }
357 },
358 {
359 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
360 'note': 'Embed-only video (#1746)',
361 'info_dict': {
362 'id': 'yZIXLfi8CZQ',
363 'ext': 'mp4',
364 'upload_date': '20120608',
365 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
366 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
367 'uploader': 'SET India',
368 'uploader_id': 'setindia'
369 }
370 },
371 {
372 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
373 'note': '256k DASH audio (format 141) via DASH manifest',
374 'info_dict': {
375 'id': 'a9LDPn-MO4I',
376 'ext': 'm4a',
377 'upload_date': '20121002',
378 'uploader_id': '8KVIDEO',
379 'description': '',
380 'uploader': '8KVIDEO',
381 'title': 'UHDTV TEST 8K VIDEO.mp4'
382 },
383 'params': {
384 'youtube_include_dash_manifest': True,
385 'format': '141',
386 },
387 },
388 # DASH manifest with encrypted signature
389 {
390 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
391 'info_dict': {
392 'id': 'IB3lcPjvWLA',
393 'ext': 'm4a',
394 'title': 'Afrojack - The Spark ft. Spree Wilson',
395 'description': 'md5:9717375db5a9a3992be4668bbf3bc0a8',
396 'uploader': 'AfrojackVEVO',
397 'uploader_id': 'AfrojackVEVO',
398 'upload_date': '20131011',
399 },
400 'params': {
401 'youtube_include_dash_manifest': True,
402 'format': '141',
403 },
404 },
405 ]
406
407 def __init__(self, *args, **kwargs):
408 super(YoutubeIE, self).__init__(*args, **kwargs)
409 self._player_cache = {}
410
411 def report_video_info_webpage_download(self, video_id):
412 """Report attempt to download video info webpage."""
413 self.to_screen('%s: Downloading video info webpage' % video_id)
414
415 def report_information_extraction(self, video_id):
416 """Report attempt to extract video information."""
417 self.to_screen('%s: Extracting video information' % video_id)
418
419 def report_unavailable_format(self, video_id, format):
420 """Report extracted video URL."""
421 self.to_screen('%s: Format %s not available' % (video_id, format))
422
423 def report_rtmp_download(self):
424 """Indicate the download will use the RTMP protocol."""
425 self.to_screen('RTMP download detected')
426
427 def _signature_cache_id(self, example_sig):
428 """ Return a string representation of a signature """
429 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
430
431 def _extract_signature_function(self, video_id, player_url, example_sig):
432 id_m = re.match(
433 r'.*-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$',
434 player_url)
435 if not id_m:
436 raise ExtractorError('Cannot identify player %r' % player_url)
437 player_type = id_m.group('ext')
438 player_id = id_m.group('id')
439
440 # Read from filesystem cache
441 func_id = '%s_%s_%s' % (
442 player_type, player_id, self._signature_cache_id(example_sig))
443 assert os.path.basename(func_id) == func_id
444
445 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
446 if cache_spec is not None:
447 return lambda s: ''.join(s[i] for i in cache_spec)
448
449 if player_type == 'js':
450 code = self._download_webpage(
451 player_url, video_id,
452 note='Downloading %s player %s' % (player_type, player_id),
453 errnote='Download of %s failed' % player_url)
454 res = self._parse_sig_js(code)
455 elif player_type == 'swf':
456 urlh = self._request_webpage(
457 player_url, video_id,
458 note='Downloading %s player %s' % (player_type, player_id),
459 errnote='Download of %s failed' % player_url)
460 code = urlh.read()
461 res = self._parse_sig_swf(code)
462 else:
463 assert False, 'Invalid player type %r' % player_type
464
465 if cache_spec is None:
466 test_string = ''.join(map(compat_chr, range(len(example_sig))))
467 cache_res = res(test_string)
468 cache_spec = [ord(c) for c in cache_res]
469
470 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
471 return res
472
473 def _print_sig_code(self, func, example_sig):
474 def gen_sig_code(idxs):
475 def _genslice(start, end, step):
476 starts = '' if start == 0 else str(start)
477 ends = (':%d' % (end+step)) if end + step >= 0 else ':'
478 steps = '' if step == 1 else (':%d' % step)
479 return 's[%s%s%s]' % (starts, ends, steps)
480
481 step = None
482 start = '(Never used)' # Quelch pyflakes warnings - start will be
483 # set as soon as step is set
484 for i, prev in zip(idxs[1:], idxs[:-1]):
485 if step is not None:
486 if i - prev == step:
487 continue
488 yield _genslice(start, prev, step)
489 step = None
490 continue
491 if i - prev in [-1, 1]:
492 step = i - prev
493 start = prev
494 continue
495 else:
496 yield 's[%d]' % prev
497 if step is None:
498 yield 's[%d]' % i
499 else:
500 yield _genslice(start, i, step)
501
502 test_string = ''.join(map(compat_chr, range(len(example_sig))))
503 cache_res = func(test_string)
504 cache_spec = [ord(c) for c in cache_res]
505 expr_code = ' + '.join(gen_sig_code(cache_spec))
506 signature_id_tuple = '(%s)' % (
507 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
508 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
509 ' return %s\n') % (signature_id_tuple, expr_code)
510 self.to_screen('Extracted signature function:\n' + code)
511
512 def _parse_sig_js(self, jscode):
513 funcname = self._search_regex(
514 r'signature=([$a-zA-Z]+)', jscode,
515 'Initial JS player signature function name')
516
517 jsi = JSInterpreter(jscode)
518 initial_function = jsi.extract_function(funcname)
519 return lambda s: initial_function([s])
520
521 def _parse_sig_swf(self, file_contents):
522 swfi = SWFInterpreter(file_contents)
523 TARGET_CLASSNAME = 'SignatureDecipher'
524 searched_class = swfi.extract_class(TARGET_CLASSNAME)
525 initial_function = swfi.extract_function(searched_class, 'decipher')
526 return lambda s: initial_function([s])
527
528 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
529 """Turn the encrypted s field into a working signature"""
530
531 if player_url is None:
532 raise ExtractorError('Cannot decrypt signature without player_url')
533
534 if player_url.startswith('//'):
535 player_url = 'https:' + player_url
536 try:
537 player_id = (player_url, self._signature_cache_id(s))
538 if player_id not in self._player_cache:
539 func = self._extract_signature_function(
540 video_id, player_url, s
541 )
542 self._player_cache[player_id] = func
543 func = self._player_cache[player_id]
544 if self._downloader.params.get('youtube_print_sig_code'):
545 self._print_sig_code(func, s)
546 return func(s)
547 except Exception as e:
548 tb = traceback.format_exc()
549 raise ExtractorError(
550 'Signature extraction failed: ' + tb, cause=e)
551
552 def _get_available_subtitles(self, video_id, webpage):
553 try:
554 sub_list = self._download_webpage(
555 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
556 video_id, note=False)
557 except ExtractorError as err:
558 self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
559 return {}
560 lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
561
562 sub_lang_list = {}
563 for l in lang_list:
564 lang = l[1]
565 if lang in sub_lang_list:
566 continue
567 params = compat_urllib_parse.urlencode({
568 'lang': lang,
569 'v': video_id,
570 'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
571 'name': unescapeHTML(l[0]).encode('utf-8'),
572 })
573 url = 'https://www.youtube.com/api/timedtext?' + params
574 sub_lang_list[lang] = url
575 if not sub_lang_list:
576 self._downloader.report_warning('video doesn\'t have subtitles')
577 return {}
578 return sub_lang_list
579
580 def _get_available_automatic_caption(self, video_id, webpage):
581 """We need the webpage for getting the captions url, pass it as an
582 argument to speed up the process."""
583 sub_format = self._downloader.params.get('subtitlesformat', 'srt')
584 self.to_screen('%s: Looking for automatic captions' % video_id)
585 mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
586 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
587 if mobj is None:
588 self._downloader.report_warning(err_msg)
589 return {}
590 player_config = json.loads(mobj.group(1))
591 try:
592 args = player_config[u'args']
593 caption_url = args[u'ttsurl']
594 timestamp = args[u'timestamp']
595 # We get the available subtitles
596 list_params = compat_urllib_parse.urlencode({
597 'type': 'list',
598 'tlangs': 1,
599 'asrs': 1,
600 })
601 list_url = caption_url + '&' + list_params
602 caption_list = self._download_xml(list_url, video_id)
603 original_lang_node = caption_list.find('track')
604 if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
605 self._downloader.report_warning('Video doesn\'t have automatic captions')
606 return {}
607 original_lang = original_lang_node.attrib['lang_code']
608
609 sub_lang_list = {}
610 for lang_node in caption_list.findall('target'):
611 sub_lang = lang_node.attrib['lang_code']
612 params = compat_urllib_parse.urlencode({
613 'lang': original_lang,
614 'tlang': sub_lang,
615 'fmt': sub_format,
616 'ts': timestamp,
617 'kind': 'asr',
618 })
619 sub_lang_list[sub_lang] = caption_url + '&' + params
620 return sub_lang_list
621 # An extractor error can be raise by the download process if there are
622 # no automatic captions but there are subtitles
623 except (KeyError, ExtractorError):
624 self._downloader.report_warning(err_msg)
625 return {}
626
627 @classmethod
628 def extract_id(cls, url):
629 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
630 if mobj is None:
631 raise ExtractorError('Invalid URL: %s' % url)
632 video_id = mobj.group(2)
633 return video_id
634
635 def _extract_from_m3u8(self, manifest_url, video_id):
636 url_map = {}
637 def _get_urls(_manifest):
638 lines = _manifest.split('\n')
639 urls = filter(lambda l: l and not l.startswith('#'),
640 lines)
641 return urls
642 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
643 formats_urls = _get_urls(manifest)
644 for format_url in formats_urls:
645 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
646 url_map[itag] = format_url
647 return url_map
648
649 def _extract_annotations(self, video_id):
650 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
651 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
652
653 def _real_extract(self, url):
654 proto = (
655 'http' if self._downloader.params.get('prefer_insecure', False)
656 else 'https')
657
658 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
659 mobj = re.search(self._NEXT_URL_RE, url)
660 if mobj:
661 url = proto + '://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
662 video_id = self.extract_id(url)
663
664 # Get video webpage
665 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
666 pref_cookies = [
667 c for c in self._downloader.cookiejar
668 if c.domain == '.youtube.com' and c.name == 'PREF']
669 for pc in pref_cookies:
670 if 'hl=' in pc.value:
671 pc.value = re.sub(r'hl=[^&]+', 'hl=en', pc.value)
672 else:
673 if pc.value:
674 pc.value += '&'
675 pc.value += 'hl=en'
676 video_webpage = self._download_webpage(url, video_id)
677
678 # Attempt to extract SWF player URL
679 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
680 if mobj is not None:
681 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
682 else:
683 player_url = None
684
685 # Get video info
686 self.report_video_info_webpage_download(video_id)
687 if re.search(r'player-age-gate-content">', video_webpage) is not None:
688 self.report_age_confirmation()
689 age_gate = True
690 # We simulate the access to the video from www.youtube.com/v/{video_id}
691 # this can be viewed without login into Youtube
692 data = compat_urllib_parse.urlencode({
693 'video_id': video_id,
694 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
695 'sts': self._search_regex(
696 r'"sts"\s*:\s*(\d+)', video_webpage, 'sts'),
697 })
698 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
699 video_info_webpage = self._download_webpage(video_info_url, video_id,
700 note=False,
701 errnote='unable to download video info webpage')
702 video_info = compat_parse_qs(video_info_webpage)
703 else:
704 age_gate = False
705 for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
706 video_info_url = (proto + '://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
707 % (video_id, el_type))
708 video_info_webpage = self._download_webpage(video_info_url, video_id,
709 note=False,
710 errnote='unable to download video info webpage')
711 video_info = compat_parse_qs(video_info_webpage)
712 if 'token' in video_info:
713 break
714 if 'token' not in video_info:
715 if 'reason' in video_info:
716 raise ExtractorError(
717 'YouTube said: %s' % video_info['reason'][0],
718 expected=True, video_id=video_id)
719 else:
720 raise ExtractorError(
721 '"token" parameter not in video info for unknown reason',
722 video_id=video_id)
723
724 if 'view_count' in video_info:
725 view_count = int(video_info['view_count'][0])
726 else:
727 view_count = None
728
729 # Check for "rental" videos
730 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
731 raise ExtractorError('"rental" videos not supported')
732
733 # Start extracting information
734 self.report_information_extraction(video_id)
735
736 # uploader
737 if 'author' not in video_info:
738 raise ExtractorError('Unable to extract uploader name')
739 video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
740
741 # uploader_id
742 video_uploader_id = None
743 mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
744 if mobj is not None:
745 video_uploader_id = mobj.group(1)
746 else:
747 self._downloader.report_warning('unable to extract uploader nickname')
748
749 # title
750 if 'title' in video_info:
751 video_title = video_info['title'][0]
752 else:
753 self._downloader.report_warning('Unable to extract video title')
754 video_title = '_'
755
756 # thumbnail image
757 # We try first to get a high quality image:
758 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
759 video_webpage, re.DOTALL)
760 if m_thumb is not None:
761 video_thumbnail = m_thumb.group(1)
762 elif 'thumbnail_url' not in video_info:
763 self._downloader.report_warning('unable to extract video thumbnail')
764 video_thumbnail = None
765 else: # don't panic if we can't find it
766 video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
767
768 # upload date
769 upload_date = None
770 mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage)
771 if mobj is None:
772 mobj = re.search(
773 r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>',
774 video_webpage)
775 if mobj is not None:
776 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
777 upload_date = unified_strdate(upload_date)
778
779 m_cat_container = self._search_regex(
780 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
781 video_webpage, 'categories', fatal=False)
782 if m_cat_container:
783 category = self._html_search_regex(
784 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
785 default=None)
786 video_categories = None if category is None else [category]
787 else:
788 video_categories = None
789
790 # description
791 video_description = get_element_by_id("eow-description", video_webpage)
792 if video_description:
793 video_description = re.sub(r'''(?x)
794 <a\s+
795 (?:[a-zA-Z-]+="[^"]+"\s+)*?
796 title="([^"]+)"\s+
797 (?:[a-zA-Z-]+="[^"]+"\s+)*?
798 class="yt-uix-redirect-link"\s*>
799 [^<]+
800 </a>
801 ''', r'\1', video_description)
802 video_description = clean_html(video_description)
803 else:
804 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
805 if fd_mobj:
806 video_description = unescapeHTML(fd_mobj.group(1))
807 else:
808 video_description = ''
809
810 def _extract_count(count_name):
811 count = self._search_regex(
812 r'id="watch-%s"[^>]*>.*?([\d,]+)\s*</span>' % re.escape(count_name),
813 video_webpage, count_name, default=None)
814 if count is not None:
815 return int(count.replace(',', ''))
816 return None
817 like_count = _extract_count('like')
818 dislike_count = _extract_count('dislike')
819
820 # subtitles
821 video_subtitles = self.extract_subtitles(video_id, video_webpage)
822
823 if self._downloader.params.get('listsubtitles', False):
824 self._list_available_subtitles(video_id, video_webpage)
825 return
826
827 if 'length_seconds' not in video_info:
828 self._downloader.report_warning('unable to extract video duration')
829 video_duration = None
830 else:
831 video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]))
832
833 # annotations
834 video_annotations = None
835 if self._downloader.params.get('writeannotations', False):
836 video_annotations = self._extract_annotations(video_id)
837
838 # Decide which formats to download
839 try:
840 mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
841 if not mobj:
842 raise ValueError('Could not find vevo ID')
843 json_code = uppercase_escape(mobj.group(1))
844 ytplayer_config = json.loads(json_code)
845 args = ytplayer_config['args']
846 # Easy way to know if the 's' value is in url_encoded_fmt_stream_map
847 # this signatures are encrypted
848 if 'url_encoded_fmt_stream_map' not in args:
849 raise ValueError('No stream_map present') # caught below
850 re_signature = re.compile(r'[&,]s=')
851 m_s = re_signature.search(args['url_encoded_fmt_stream_map'])
852 if m_s is not None:
853 self.to_screen('%s: Encrypted signatures detected.' % video_id)
854 video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
855 m_s = re_signature.search(args.get('adaptive_fmts', ''))
856 if m_s is not None:
857 if 'adaptive_fmts' in video_info:
858 video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
859 else:
860 video_info['adaptive_fmts'] = [args['adaptive_fmts']]
861 except ValueError:
862 pass
863
864 def _map_to_format_list(urlmap):
865 formats = []
866 for itag, video_real_url in urlmap.items():
867 dct = {
868 'format_id': itag,
869 'url': video_real_url,
870 'player_url': player_url,
871 }
872 if itag in self._formats:
873 dct.update(self._formats[itag])
874 formats.append(dct)
875 return formats
876
877 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
878 self.report_rtmp_download()
879 formats = [{
880 'format_id': '_rtmp',
881 'protocol': 'rtmp',
882 'url': video_info['conn'][0],
883 'player_url': player_url,
884 }]
885 elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
886 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
887 if 'rtmpe%3Dyes' in encoded_url_map:
888 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
889 url_map = {}
890 for url_data_str in encoded_url_map.split(','):
891 url_data = compat_parse_qs(url_data_str)
892 if 'itag' not in url_data or 'url' not in url_data:
893 continue
894 format_id = url_data['itag'][0]
895 url = url_data['url'][0]
896
897 if 'sig' in url_data:
898 url += '&signature=' + url_data['sig'][0]
899 elif 's' in url_data:
900 encrypted_sig = url_data['s'][0]
901
902 if not age_gate:
903 jsplayer_url_json = self._search_regex(
904 r'"assets":.+?"js":\s*("[^"]+")',
905 video_webpage, 'JS player URL')
906 player_url = json.loads(jsplayer_url_json)
907 if player_url is None:
908 player_url_json = self._search_regex(
909 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
910 video_webpage, 'age gate player URL')
911 player_url = json.loads(player_url_json)
912
913 if self._downloader.params.get('verbose'):
914 if player_url is None:
915 player_version = 'unknown'
916 player_desc = 'unknown'
917 else:
918 if player_url.endswith('swf'):
919 player_version = self._search_regex(
920 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
921 'flash player', fatal=False)
922 player_desc = 'flash player %s' % player_version
923 else:
924 player_version = self._search_regex(
925 r'html5player-([^/]+?)(?:/html5player)?\.js',
926 player_url,
927 'html5 player', fatal=False)
928 player_desc = 'html5 player %s' % player_version
929
930 parts_sizes = self._signature_cache_id(encrypted_sig)
931 self.to_screen('{%s} signature length %s, %s' %
932 (format_id, parts_sizes, player_desc))
933
934 signature = self._decrypt_signature(
935 encrypted_sig, video_id, player_url, age_gate)
936 url += '&signature=' + signature
937 if 'ratebypass' not in url:
938 url += '&ratebypass=yes'
939 url_map[format_id] = url
940 formats = _map_to_format_list(url_map)
941 elif video_info.get('hlsvp'):
942 manifest_url = video_info['hlsvp'][0]
943 url_map = self._extract_from_m3u8(manifest_url, video_id)
944 formats = _map_to_format_list(url_map)
945 else:
946 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
947
948 # Look for the DASH manifest
949 if self._downloader.params.get('youtube_include_dash_manifest', True):
950 try:
951 # The DASH manifest used needs to be the one from the original video_webpage.
952 # The one found in get_video_info seems to be using different signatures.
953 # However, in the case of an age restriction there won't be any embedded dashmpd in the video_webpage.
954 # Luckily, it seems, this case uses some kind of default signature (len == 86), so the
955 # combination of get_video_info and the _static_decrypt_signature() decryption fallback will work here.
956 if age_gate:
957 dash_manifest_url = video_info.get('dashmpd')[0]
958 else:
959 dash_manifest_url = ytplayer_config['args']['dashmpd']
960 def decrypt_sig(mobj):
961 s = mobj.group(1)
962 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
963 return '/signature/%s' % dec_s
964 dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
965 dash_doc = self._download_xml(
966 dash_manifest_url, video_id,
967 note='Downloading DASH manifest',
968 errnote='Could not download DASH manifest')
969 for r in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
970 url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
971 if url_el is None:
972 continue
973 format_id = r.attrib['id']
974 video_url = url_el.text
975 filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
976 f = {
977 'format_id': format_id,
978 'url': video_url,
979 'width': int_or_none(r.attrib.get('width')),
980 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
981 'asr': int_or_none(r.attrib.get('audioSamplingRate')),
982 'filesize': filesize,
983 }
984 try:
985 existing_format = next(
986 fo for fo in formats
987 if fo['format_id'] == format_id)
988 except StopIteration:
989 f.update(self._formats.get(format_id, {}))
990 formats.append(f)
991 else:
992 existing_format.update(f)
993
994 except (ExtractorError, KeyError) as e:
995 self.report_warning('Skipping DASH manifest: %s' % e, video_id)
996
997 self._sort_formats(formats)
998
999 return {
1000 'id': video_id,
1001 'uploader': video_uploader,
1002 'uploader_id': video_uploader_id,
1003 'upload_date': upload_date,
1004 'title': video_title,
1005 'thumbnail': video_thumbnail,
1006 'description': video_description,
1007 'categories': video_categories,
1008 'subtitles': video_subtitles,
1009 'duration': video_duration,
1010 'age_limit': 18 if age_gate else 0,
1011 'annotations': video_annotations,
1012 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1013 'view_count': view_count,
1014 'like_count': like_count,
1015 'dislike_count': dislike_count,
1016 'formats': formats,
1017 }
1018
1019 class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
1020 IE_DESC = 'YouTube.com playlists'
1021 _VALID_URL = r"""(?x)(?:
1022 (?:https?://)?
1023 (?:\w+\.)?
1024 youtube\.com/
1025 (?:
1026 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
1027 \? (?:.*?&)*? (?:p|a|list)=
1028 | p/
1029 )
1030 (
1031 (?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
1032 # Top tracks, they can also include dots
1033 |(?:MC)[\w\.]*
1034 )
1035 .*
1036 |
1037 ((?:PL|LL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,})
1038 )"""
1039 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
1040 _MORE_PAGES_INDICATOR = r'data-link-type="next"'
1041 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
1042 IE_NAME = 'youtube:playlist'
1043 _TESTS = [{
1044 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1045 'info_dict': {
1046 'title': 'ytdl test PL',
1047 },
1048 'playlist_count': 3,
1049 }, {
1050 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1051 'info_dict': {
1052 'title': 'YDL_Empty_List',
1053 },
1054 'playlist_count': 0,
1055 }, {
1056 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1057 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1058 'info_dict': {
1059 'title': '29C3: Not my department',
1060 },
1061 'playlist_count': 95,
1062 }, {
1063 'note': 'issue #673',
1064 'url': 'PLBB231211A4F62143',
1065 'info_dict': {
1066 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1067 },
1068 'playlist_mincount': 26,
1069 }, {
1070 'note': 'Large playlist',
1071 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1072 'info_dict': {
1073 'title': 'Uploads from Cauchemar',
1074 },
1075 'playlist_mincount': 799,
1076 }, {
1077 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1078 'info_dict': {
1079 'title': 'YDL_safe_search',
1080 },
1081 'playlist_count': 2,
1082 }, {
1083 'note': 'embedded',
1084 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1085 'playlist_count': 4,
1086 'info_dict': {
1087 'title': 'JODA15',
1088 }
1089 }, {
1090 'note': 'Embedded SWF player',
1091 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1092 'playlist_count': 4,
1093 'info_dict': {
1094 'title': 'JODA7',
1095 }
1096 }]
1097
1098 def _real_initialize(self):
1099 self._login()
1100
1101 def _ids_to_results(self, ids):
1102 return [
1103 self.url_result(vid_id, 'Youtube', video_id=vid_id)
1104 for vid_id in ids]
1105
1106 def _extract_mix(self, playlist_id):
1107 # The mixes are generated from a a single video
1108 # the id of the playlist is just 'RD' + video_id
1109 url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
1110 webpage = self._download_webpage(
1111 url, playlist_id, 'Downloading Youtube mix')
1112 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
1113 title_span = (
1114 search_title('playlist-title') or
1115 search_title('title long-title') or
1116 search_title('title'))
1117 title = clean_html(title_span)
1118 ids = orderedSet(re.findall(
1119 r'''(?xs)data-video-username=".*?".*?
1120 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
1121 webpage))
1122 url_results = self._ids_to_results(ids)
1123
1124 return self.playlist_result(url_results, playlist_id, title)
1125
1126 def _real_extract(self, url):
1127 # Extract playlist id
1128 mobj = re.match(self._VALID_URL, url)
1129 if mobj is None:
1130 raise ExtractorError('Invalid URL: %s' % url)
1131 playlist_id = mobj.group(1) or mobj.group(2)
1132
1133 # Check if it's a video-specific URL
1134 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
1135 if 'v' in query_dict:
1136 video_id = query_dict['v'][0]
1137 if self._downloader.params.get('noplaylist'):
1138 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1139 return self.url_result(video_id, 'Youtube', video_id=video_id)
1140 else:
1141 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
1142
1143 if playlist_id.startswith('RD'):
1144 # Mixes require a custom extraction process
1145 return self._extract_mix(playlist_id)
1146 if playlist_id.startswith('TL'):
1147 raise ExtractorError('For downloading YouTube.com top lists, use '
1148 'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
1149
1150 url = self._TEMPLATE_URL % playlist_id
1151 page = self._download_webpage(url, playlist_id)
1152 more_widget_html = content_html = page
1153
1154 # Check if the playlist exists or is private
1155 if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None:
1156 raise ExtractorError(
1157 'The playlist doesn\'t exist or is private, use --username or '
1158 '--netrc to access it.',
1159 expected=True)
1160
1161 # Extract the video ids from the playlist pages
1162 ids = []
1163
1164 for page_num in itertools.count(1):
1165 matches = re.finditer(self._VIDEO_RE, content_html)
1166 # We remove the duplicates and the link with index 0
1167 # (it's not the first video of the playlist)
1168 new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
1169 ids.extend(new_ids)
1170
1171 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
1172 if not mobj:
1173 break
1174
1175 more = self._download_json(
1176 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
1177 'Downloading page #%s' % page_num,
1178 transform_source=uppercase_escape)
1179 content_html = more['content_html']
1180 more_widget_html = more['load_more_widget_html']
1181
1182 playlist_title = self._html_search_regex(
1183 r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
1184 page, 'title')
1185
1186 url_results = self._ids_to_results(ids)
1187 return self.playlist_result(url_results, playlist_id, playlist_title)
1188
1189
1190 class YoutubeTopListIE(YoutubePlaylistIE):
1191 IE_NAME = 'youtube:toplist'
1192 IE_DESC = ('YouTube.com top lists, "yttoplist:{channel}:{list title}"'
1193 ' (Example: "yttoplist:music:Top Tracks")')
1194 _VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'
1195 _TESTS = [{
1196 'url': 'yttoplist:music:Trending',
1197 'playlist_mincount': 5,
1198 'skip': 'Only works for logged-in users',
1199 }]
1200
1201 def _real_extract(self, url):
1202 mobj = re.match(self._VALID_URL, url)
1203 channel = mobj.group('chann')
1204 title = mobj.group('title')
1205 query = compat_urllib_parse.urlencode({'title': title})
1206 channel_page = self._download_webpage(
1207 'https://www.youtube.com/%s' % channel, title)
1208 link = self._html_search_regex(
1209 r'''(?x)
1210 <a\s+href="([^"]+)".*?>\s*
1211 <span\s+class="branded-page-module-title-text">\s*
1212 <span[^>]*>.*?%s.*?</span>''' % re.escape(query),
1213 channel_page, 'list')
1214 url = compat_urlparse.urljoin('https://www.youtube.com/', link)
1215
1216 video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"'
1217 ids = []
1218 # sometimes the webpage doesn't contain the videos
1219 # retry until we get them
1220 for i in itertools.count(0):
1221 msg = 'Downloading Youtube mix'
1222 if i > 0:
1223 msg += ', retry #%d' % i
1224
1225 webpage = self._download_webpage(url, title, msg)
1226 ids = orderedSet(re.findall(video_re, webpage))
1227 if ids:
1228 break
1229 url_results = self._ids_to_results(ids)
1230 return self.playlist_result(url_results, playlist_title=title)
1231
1232
1233 class YoutubeChannelIE(InfoExtractor):
1234 IE_DESC = 'YouTube.com channels'
1235 _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
1236 _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
1237 _MORE_PAGES_URL = 'https://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
1238 IE_NAME = 'youtube:channel'
1239 _TESTS = [{
1240 'note': 'paginated channel',
1241 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1242 'playlist_mincount': 91,
1243 }]
1244
1245 def extract_videos_from_page(self, page):
1246 ids_in_page = []
1247 for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
1248 if mobj.group(1) not in ids_in_page:
1249 ids_in_page.append(mobj.group(1))
1250 return ids_in_page
1251
1252 def _real_extract(self, url):
1253 # Extract channel id
1254 mobj = re.match(self._VALID_URL, url)
1255 if mobj is None:
1256 raise ExtractorError('Invalid URL: %s' % url)
1257
1258 # Download channel page
1259 channel_id = mobj.group(1)
1260 video_ids = []
1261 url = 'https://www.youtube.com/channel/%s/videos' % channel_id
1262 channel_page = self._download_webpage(url, channel_id)
1263 autogenerated = re.search(r'''(?x)
1264 class="[^"]*?(?:
1265 channel-header-autogenerated-label|
1266 yt-channel-title-autogenerated
1267 )[^"]*"''', channel_page) is not None
1268
1269 if autogenerated:
1270 # The videos are contained in a single page
1271 # the ajax pages can't be used, they are empty
1272 video_ids = self.extract_videos_from_page(channel_page)
1273 else:
1274 # Download all channel pages using the json-based channel_ajax query
1275 for pagenum in itertools.count(1):
1276 url = self._MORE_PAGES_URL % (pagenum, channel_id)
1277 page = self._download_json(
1278 url, channel_id, note='Downloading page #%s' % pagenum,
1279 transform_source=uppercase_escape)
1280
1281 ids_in_page = self.extract_videos_from_page(page['content_html'])
1282 video_ids.extend(ids_in_page)
1283
1284 if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
1285 break
1286
1287 self._downloader.to_screen('[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
1288
1289 url_entries = [self.url_result(video_id, 'Youtube', video_id=video_id)
1290 for video_id in video_ids]
1291 return self.playlist_result(url_entries, channel_id)
1292
1293
1294 class YoutubeUserIE(InfoExtractor):
1295 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
1296 _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
1297 _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
1298 _GDATA_PAGE_SIZE = 50
1299 _GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
1300 IE_NAME = 'youtube:user'
1301
1302 _TESTS = [{
1303 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
1304 'playlist_mincount': 320,
1305 'info_dict': {
1306 'title': 'TheLinuxFoundation',
1307 }
1308 }, {
1309 'url': 'ytuser:phihag',
1310 'only_matching': True,
1311 }]
1312
1313 @classmethod
1314 def suitable(cls, url):
1315 # Don't return True if the url can be extracted with other youtube
1316 # extractor, the regex would is too permissive and it would match.
1317 other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
1318 if any(ie.suitable(url) for ie in other_ies): return False
1319 else: return super(YoutubeUserIE, cls).suitable(url)
1320
1321 def _real_extract(self, url):
1322 # Extract username
1323 mobj = re.match(self._VALID_URL, url)
1324 if mobj is None:
1325 raise ExtractorError('Invalid URL: %s' % url)
1326
1327 username = mobj.group(1)
1328
1329 # Download video ids using YouTube Data API. Result size per
1330 # query is limited (currently to 50 videos) so we need to query
1331 # page by page until there are no video ids - it means we got
1332 # all of them.
1333
1334 def download_page(pagenum):
1335 start_index = pagenum * self._GDATA_PAGE_SIZE + 1
1336
1337 gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
1338 page = self._download_webpage(
1339 gdata_url, username,
1340 'Downloading video ids from %d to %d' % (
1341 start_index, start_index + self._GDATA_PAGE_SIZE))
1342
1343 try:
1344 response = json.loads(page)
1345 except ValueError as err:
1346 raise ExtractorError('Invalid JSON in API response: ' + compat_str(err))
1347 if 'entry' not in response['feed']:
1348 return
1349
1350 # Extract video identifiers
1351 entries = response['feed']['entry']
1352 for entry in entries:
1353 title = entry['title']['$t']
1354 video_id = entry['id']['$t'].split('/')[-1]
1355 yield {
1356 '_type': 'url',
1357 'url': video_id,
1358 'ie_key': 'Youtube',
1359 'id': video_id,
1360 'title': title,
1361 }
1362 url_results = OnDemandPagedList(download_page, self._GDATA_PAGE_SIZE)
1363
1364 return self.playlist_result(url_results, playlist_title=username)
1365
1366
1367 class YoutubeSearchIE(SearchInfoExtractor):
1368 IE_DESC = 'YouTube.com searches'
1369 _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
1370 _MAX_RESULTS = 1000
1371 IE_NAME = 'youtube:search'
1372 _SEARCH_KEY = 'ytsearch'
1373
1374 def _get_n_results(self, query, n):
1375 """Get a specified number of results for a query"""
1376
1377 video_ids = []
1378 pagenum = 0
1379 limit = n
1380 PAGE_SIZE = 50
1381
1382 while (PAGE_SIZE * pagenum) < limit:
1383 result_url = self._API_URL % (
1384 compat_urllib_parse.quote_plus(query.encode('utf-8')),
1385 (PAGE_SIZE * pagenum) + 1)
1386 data_json = self._download_webpage(
1387 result_url, video_id='query "%s"' % query,
1388 note='Downloading page %s' % (pagenum + 1),
1389 errnote='Unable to download API page')
1390 data = json.loads(data_json)
1391 api_response = data['data']
1392
1393 if 'items' not in api_response:
1394 raise ExtractorError(
1395 '[youtube] No video results', expected=True)
1396
1397 new_ids = list(video['id'] for video in api_response['items'])
1398 video_ids += new_ids
1399
1400 limit = min(n, api_response['totalItems'])
1401 pagenum += 1
1402
1403 if len(video_ids) > n:
1404 video_ids = video_ids[:n]
1405 videos = [self.url_result(video_id, 'Youtube', video_id=video_id)
1406 for video_id in video_ids]
1407 return self.playlist_result(videos, query)
1408
1409
1410 class YoutubeSearchDateIE(YoutubeSearchIE):
1411 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
1412 _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
1413 _SEARCH_KEY = 'ytsearchdate'
1414 IE_DESC = 'YouTube.com searches, newest videos first'
1415
1416
1417 class YoutubeSearchURLIE(InfoExtractor):
1418 IE_DESC = 'YouTube.com search URLs'
1419 IE_NAME = 'youtube:search_url'
1420 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
1421 _TESTS = [{
1422 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
1423 'playlist_mincount': 5,
1424 'info_dict': {
1425 'title': 'youtube-dl test video',
1426 }
1427 }]
1428
1429 def _real_extract(self, url):
1430 mobj = re.match(self._VALID_URL, url)
1431 query = compat_urllib_parse.unquote_plus(mobj.group('query'))
1432
1433 webpage = self._download_webpage(url, query)
1434 result_code = self._search_regex(
1435 r'(?s)<ol class="item-section"(.*?)</ol>', webpage, 'result HTML')
1436
1437 part_codes = re.findall(
1438 r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
1439 entries = []
1440 for part_code in part_codes:
1441 part_title = self._html_search_regex(
1442 [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
1443 part_url_snippet = self._html_search_regex(
1444 r'(?s)href="([^"]+)"', part_code, 'item URL')
1445 part_url = compat_urlparse.urljoin(
1446 'https://www.youtube.com/', part_url_snippet)
1447 entries.append({
1448 '_type': 'url',
1449 'url': part_url,
1450 'title': part_title,
1451 })
1452
1453 return {
1454 '_type': 'playlist',
1455 'entries': entries,
1456 'title': query,
1457 }
1458
1459
1460 class YoutubeShowIE(InfoExtractor):
1461 IE_DESC = 'YouTube.com (multi-season) shows'
1462 _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
1463 IE_NAME = 'youtube:show'
1464 _TESTS = [{
1465 'url': 'http://www.youtube.com/show/airdisasters',
1466 'playlist_mincount': 3,
1467 'info_dict': {
1468 'id': 'airdisasters',
1469 'title': 'Air Disasters',
1470 }
1471 }]
1472
1473 def _real_extract(self, url):
1474 mobj = re.match(self._VALID_URL, url)
1475 playlist_id = mobj.group('id')
1476 webpage = self._download_webpage(
1477 url, playlist_id, 'Downloading show webpage')
1478 # There's one playlist for each season of the show
1479 m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
1480 self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
1481 entries = [
1482 self.url_result(
1483 'https://www.youtube.com' + season.group(1), 'YoutubePlaylist')
1484 for season in m_seasons
1485 ]
1486 title = self._og_search_title(webpage, fatal=False)
1487
1488 return {
1489 '_type': 'playlist',
1490 'id': playlist_id,
1491 'title': title,
1492 'entries': entries,
1493 }
1494
1495
1496 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
1497 """
1498 Base class for extractors that fetch info from
1499 http://www.youtube.com/feed_ajax
1500 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
1501 """
1502 _LOGIN_REQUIRED = True
1503 # use action_load_personal_feed instead of action_load_system_feed
1504 _PERSONAL_FEED = False
1505
1506 @property
1507 def _FEED_TEMPLATE(self):
1508 action = 'action_load_system_feed'
1509 if self._PERSONAL_FEED:
1510 action = 'action_load_personal_feed'
1511 return 'https://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action, self._FEED_NAME)
1512
1513 @property
1514 def IE_NAME(self):
1515 return 'youtube:%s' % self._FEED_NAME
1516
1517 def _real_initialize(self):
1518 self._login()
1519
1520 def _real_extract(self, url):
1521 feed_entries = []
1522 paging = 0
1523 for i in itertools.count(1):
1524 info = self._download_json(self._FEED_TEMPLATE % paging,
1525 '%s feed' % self._FEED_NAME,
1526 'Downloading page %s' % i)
1527 feed_html = info.get('feed_html') or info.get('content_html')
1528 load_more_widget_html = info.get('load_more_widget_html') or feed_html
1529 m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
1530 ids = orderedSet(m.group(1) for m in m_ids)
1531 feed_entries.extend(
1532 self.url_result(video_id, 'Youtube', video_id=video_id)
1533 for video_id in ids)
1534 mobj = re.search(
1535 r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
1536 load_more_widget_html)
1537 if mobj is None:
1538 break
1539 paging = mobj.group('paging')
1540 return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
1541
1542 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
1543 IE_DESC = 'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
1544 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
1545 _FEED_NAME = 'recommended'
1546 _PLAYLIST_TITLE = 'Youtube Recommended videos'
1547
1548 class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
1549 IE_DESC = 'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
1550 _VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
1551 _FEED_NAME = 'watch_later'
1552 _PLAYLIST_TITLE = 'Youtube Watch Later'
1553 _PERSONAL_FEED = True
1554
1555 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
1556 IE_DESC = 'Youtube watch history, "ythistory" keyword (requires authentication)'
1557 _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
1558 _FEED_NAME = 'history'
1559 _PERSONAL_FEED = True
1560 _PLAYLIST_TITLE = 'Youtube Watch History'
1561
1562 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
1563 IE_NAME = 'youtube:favorites'
1564 IE_DESC = 'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
1565 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
1566 _LOGIN_REQUIRED = True
1567
1568 def _real_extract(self, url):
1569 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
1570 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
1571 return self.url_result(playlist_id, 'YoutubePlaylist')
1572
1573
1574 class YoutubeSubscriptionsIE(YoutubePlaylistIE):
1575 IE_NAME = 'youtube:subscriptions'
1576 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
1577 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
1578 _TESTS = []
1579
1580 def _real_extract(self, url):
1581 title = 'Youtube Subscriptions'
1582 page = self._download_webpage('https://www.youtube.com/feed/subscriptions', title)
1583
1584 # The extraction process is the same as for playlists, but the regex
1585 # for the video ids doesn't contain an index
1586 ids = []
1587 more_widget_html = content_html = page
1588
1589 for page_num in itertools.count(1):
1590 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
1591 new_ids = orderedSet(matches)
1592 ids.extend(new_ids)
1593
1594 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
1595 if not mobj:
1596 break
1597
1598 more = self._download_json(
1599 'https://youtube.com/%s' % mobj.group('more'), title,
1600 'Downloading page #%s' % page_num,
1601 transform_source=uppercase_escape)
1602 content_html = more['content_html']
1603 more_widget_html = more['load_more_widget_html']
1604
1605 return {
1606 '_type': 'playlist',
1607 'title': title,
1608 'entries': self._ids_to_results(ids),
1609 }
1610
1611
1612 class YoutubeTruncatedURLIE(InfoExtractor):
1613 IE_NAME = 'youtube:truncated_url'
1614 IE_DESC = False # Do not list
1615 _VALID_URL = r'''(?x)
1616 (?:https?://)?[^/]+/watch\?(?:
1617 feature=[a-z_]+|
1618 annotation_id=annotation_[^&]+
1619 )?$|
1620 (?:https?://)?(?:www\.)?youtube\.com/attribution_link\?a=[^&]+$
1621 '''
1622
1623 _TESTS = [{
1624 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
1625 'only_matching': True,
1626 }, {
1627 'url': 'http://www.youtube.com/watch?',
1628 'only_matching': True,
1629 }]
1630
1631 def _real_extract(self, url):
1632 raise ExtractorError(
1633 'Did you forget to quote the URL? Remember that & is a meta '
1634 'character in most shells, so you want to put the URL in quotes, '
1635 'like youtube-dl '
1636 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
1637 ' or simply youtube-dl BaW_jenozKc .',
1638 expected=True)