]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/youtube.py
[youtube] Move swfinterp into its own file
[yt-dlp.git] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 import collections
4 import errno
5 import io
6 import itertools
7 import json
8 import os.path
9 import re
10 import struct
11 import traceback
12 import zlib
13
14 from .common import InfoExtractor, SearchInfoExtractor
15 from .subtitles import SubtitlesInfoExtractor
16 from ..jsinterp import JSInterpreter
17 from ..swfinterp import SWFInterpreter
18 from ..utils import (
19 compat_chr,
20 compat_parse_qs,
21 compat_urllib_parse,
22 compat_urllib_request,
23 compat_urlparse,
24 compat_str,
25
26 clean_html,
27 get_cachedir,
28 get_element_by_id,
29 get_element_by_attribute,
30 ExtractorError,
31 int_or_none,
32 PagedList,
33 unescapeHTML,
34 unified_strdate,
35 orderedSet,
36 write_json_file,
37 uppercase_escape,
38 )
39
40 class YoutubeBaseInfoExtractor(InfoExtractor):
41 """Provide base functions for Youtube extractors"""
42 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
43 _LANG_URL = r'https://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
44 _AGE_URL = 'https://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
45 _NETRC_MACHINE = 'youtube'
46 # If True it will raise an error if no login info is provided
47 _LOGIN_REQUIRED = False
48
49 def _set_language(self):
50 return bool(self._download_webpage(
51 self._LANG_URL, None,
52 note=u'Setting language', errnote='unable to set language',
53 fatal=False))
54
55 def _login(self):
56 (username, password) = self._get_login_info()
57 # No authentication to be performed
58 if username is None:
59 if self._LOGIN_REQUIRED:
60 raise ExtractorError(u'No login info available, needed for using %s.' % self.IE_NAME, expected=True)
61 return False
62
63 login_page = self._download_webpage(
64 self._LOGIN_URL, None,
65 note=u'Downloading login page',
66 errnote=u'unable to fetch login page', fatal=False)
67 if login_page is False:
68 return
69
70 galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
71 login_page, u'Login GALX parameter')
72
73 # Log in
74 login_form_strs = {
75 u'continue': u'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
76 u'Email': username,
77 u'GALX': galx,
78 u'Passwd': password,
79 u'PersistentCookie': u'yes',
80 u'_utf8': u'霱',
81 u'bgresponse': u'js_disabled',
82 u'checkConnection': u'',
83 u'checkedDomains': u'youtube',
84 u'dnConn': u'',
85 u'pstMsg': u'0',
86 u'rmShown': u'1',
87 u'secTok': u'',
88 u'signIn': u'Sign in',
89 u'timeStmp': u'',
90 u'service': u'youtube',
91 u'uilel': u'3',
92 u'hl': u'en_US',
93 }
94 # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
95 # chokes on unicode
96 login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items())
97 login_data = compat_urllib_parse.urlencode(login_form).encode('ascii')
98
99 req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
100 login_results = self._download_webpage(
101 req, None,
102 note=u'Logging in', errnote=u'unable to log in', fatal=False)
103 if login_results is False:
104 return False
105 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
106 self._downloader.report_warning(u'unable to log in: bad username or password')
107 return False
108 return True
109
110 def _confirm_age(self):
111 age_form = {
112 'next_url': '/',
113 'action_confirm': 'Confirm',
114 }
115 req = compat_urllib_request.Request(self._AGE_URL,
116 compat_urllib_parse.urlencode(age_form).encode('ascii'))
117
118 self._download_webpage(
119 req, None,
120 note=u'Confirming age', errnote=u'Unable to confirm age')
121 return True
122
123 def _real_initialize(self):
124 if self._downloader is None:
125 return
126 if not self._set_language():
127 return
128 if not self._login():
129 return
130 self._confirm_age()
131
132
133 class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
134 IE_DESC = u'YouTube.com'
135 _VALID_URL = r"""(?x)^
136 (
137 (?:https?://|//)? # http(s):// or protocol-independent URL (optional)
138 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
139 (?:www\.)?deturl\.com/www\.youtube\.com/|
140 (?:www\.)?pwnyoutube\.com/|
141 (?:www\.)?yourepeat\.com/|
142 tube\.majestyc\.net/|
143 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
144 (?:.*?\#/)? # handle anchor (#/) redirect urls
145 (?: # the various things that can precede the ID:
146 (?:(?:v|embed|e)/) # v/ or embed/ or e/
147 |(?: # or the v= param in all its forms
148 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
149 (?:\?|\#!?) # the params delimiter ? or # or #!
150 (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx)
151 v=
152 )
153 ))
154 |youtu\.be/ # just youtu.be/xxxx
155 |https?://(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
156 )
157 )? # all until now is optional -> you can pass the naked ID
158 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
159 (?(1).+)? # if we found the ID, everything can follow
160 $"""
161 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
162 _formats = {
163 '5': {'ext': 'flv', 'width': 400, 'height': 240},
164 '6': {'ext': 'flv', 'width': 450, 'height': 270},
165 '13': {'ext': '3gp'},
166 '17': {'ext': '3gp', 'width': 176, 'height': 144},
167 '18': {'ext': 'mp4', 'width': 640, 'height': 360},
168 '22': {'ext': 'mp4', 'width': 1280, 'height': 720},
169 '34': {'ext': 'flv', 'width': 640, 'height': 360},
170 '35': {'ext': 'flv', 'width': 854, 'height': 480},
171 '36': {'ext': '3gp', 'width': 320, 'height': 240},
172 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
173 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
174 '43': {'ext': 'webm', 'width': 640, 'height': 360},
175 '44': {'ext': 'webm', 'width': 854, 'height': 480},
176 '45': {'ext': 'webm', 'width': 1280, 'height': 720},
177 '46': {'ext': 'webm', 'width': 1920, 'height': 1080},
178
179
180 # 3d videos
181 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
182 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
183 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
184 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
185 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
186 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
187 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
188
189 # Apple HTTP Live Streaming
190 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
191 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
192 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
193 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
194 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
195 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
196 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
197
198 # DASH mp4 video
199 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
200 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
201 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
202 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
203 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
204 '138': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
205 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
206 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
207
208 # Dash mp4 audio
209 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 48, 'preference': -50},
210 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 128, 'preference': -50},
211 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'vcodec': 'none', 'abr': 256, 'preference': -50},
212
213 # Dash webm
214 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
215 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
216 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
217 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
218 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
219 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'VP8', 'preference': -40},
220 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
221 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
222 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
223 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
224 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
225 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
226 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
227 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
228 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
229
230 # Dash webm audio
231 '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 48, 'preference': -50},
232 '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
233
234 # RTMP (unnamed)
235 '_rtmp': {'protocol': 'rtmp'},
236 }
237
238 IE_NAME = u'youtube'
239 _TESTS = [
240 {
241 u"url": u"http://www.youtube.com/watch?v=BaW_jenozKc",
242 u"file": u"BaW_jenozKc.mp4",
243 u"info_dict": {
244 u"title": u"youtube-dl test video \"'/\\ä↭𝕐",
245 u"uploader": u"Philipp Hagemeister",
246 u"uploader_id": u"phihag",
247 u"upload_date": u"20121002",
248 u"description": u"test chars: \"'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .",
249 u"categories": [u'Science & Technology'],
250 }
251 },
252 {
253 u"url": u"http://www.youtube.com/watch?v=UxxajLWwzqY",
254 u"file": u"UxxajLWwzqY.mp4",
255 u"note": u"Test generic use_cipher_signature video (#897)",
256 u"info_dict": {
257 u"upload_date": u"20120506",
258 u"title": u"Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]",
259 u"description": u"md5:fea86fda2d5a5784273df5c7cc994d9f",
260 u"uploader": u"Icona Pop",
261 u"uploader_id": u"IconaPop"
262 }
263 },
264 {
265 u"url": u"https://www.youtube.com/watch?v=07FYdnEawAQ",
266 u"file": u"07FYdnEawAQ.mp4",
267 u"note": u"Test VEVO video with age protection (#956)",
268 u"info_dict": {
269 u"upload_date": u"20130703",
270 u"title": u"Justin Timberlake - Tunnel Vision (Explicit)",
271 u"description": u"md5:64249768eec3bc4276236606ea996373",
272 u"uploader": u"justintimberlakeVEVO",
273 u"uploader_id": u"justintimberlakeVEVO"
274 }
275 },
276 {
277 u"url": u"//www.YouTube.com/watch?v=yZIXLfi8CZQ",
278 u"file": u"yZIXLfi8CZQ.mp4",
279 u"note": u"Embed-only video (#1746)",
280 u"info_dict": {
281 u"upload_date": u"20120608",
282 u"title": u"Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012",
283 u"description": u"md5:09b78bd971f1e3e289601dfba15ca4f7",
284 u"uploader": u"SET India",
285 u"uploader_id": u"setindia"
286 }
287 },
288 {
289 u"url": u"http://www.youtube.com/watch?v=a9LDPn-MO4I",
290 u"file": u"a9LDPn-MO4I.m4a",
291 u"note": u"256k DASH audio (format 141) via DASH manifest",
292 u"info_dict": {
293 u"upload_date": "20121002",
294 u"uploader_id": "8KVIDEO",
295 u"description": "No description available.",
296 u"uploader": "8KVIDEO",
297 u"title": "UHDTV TEST 8K VIDEO.mp4"
298 },
299 u"params": {
300 u"youtube_include_dash_manifest": True,
301 u"format": "141",
302 },
303 },
304 # DASH manifest with encrypted signature
305 {
306 u'url': u'https://www.youtube.com/watch?v=IB3lcPjvWLA',
307 u'info_dict': {
308 u'id': u'IB3lcPjvWLA',
309 u'ext': u'm4a',
310 u'title': u'Afrojack - The Spark ft. Spree Wilson',
311 u'description': u'md5:9717375db5a9a3992be4668bbf3bc0a8',
312 u'uploader': u'AfrojackVEVO',
313 u'uploader_id': u'AfrojackVEVO',
314 u'upload_date': u'20131011',
315 },
316 u"params": {
317 u'youtube_include_dash_manifest': True,
318 u'format': '141',
319 },
320 },
321 ]
322
323
324 @classmethod
325 def suitable(cls, url):
326 """Receives a URL and returns True if suitable for this IE."""
327 if YoutubePlaylistIE.suitable(url): return False
328 return re.match(cls._VALID_URL, url) is not None
329
330 def __init__(self, *args, **kwargs):
331 super(YoutubeIE, self).__init__(*args, **kwargs)
332 self._player_cache = {}
333
334 def report_video_info_webpage_download(self, video_id):
335 """Report attempt to download video info webpage."""
336 self.to_screen(u'%s: Downloading video info webpage' % video_id)
337
338 def report_information_extraction(self, video_id):
339 """Report attempt to extract video information."""
340 self.to_screen(u'%s: Extracting video information' % video_id)
341
342 def report_unavailable_format(self, video_id, format):
343 """Report extracted video URL."""
344 self.to_screen(u'%s: Format %s not available' % (video_id, format))
345
346 def report_rtmp_download(self):
347 """Indicate the download will use the RTMP protocol."""
348 self.to_screen(u'RTMP download detected')
349
350 def _extract_signature_function(self, video_id, player_url, slen):
351 id_m = re.match(
352 r'.*-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3)?\.(?P<ext>[a-z]+)$',
353 player_url)
354 player_type = id_m.group('ext')
355 player_id = id_m.group('id')
356
357 # Read from filesystem cache
358 func_id = '%s_%s_%d' % (player_type, player_id, slen)
359 assert os.path.basename(func_id) == func_id
360 cache_dir = get_cachedir(self._downloader.params)
361
362 cache_enabled = cache_dir is not None
363 if cache_enabled:
364 cache_fn = os.path.join(os.path.expanduser(cache_dir),
365 u'youtube-sigfuncs',
366 func_id + '.json')
367 try:
368 with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
369 cache_spec = json.load(cachef)
370 return lambda s: u''.join(s[i] for i in cache_spec)
371 except IOError:
372 pass # No cache available
373
374 if player_type == 'js':
375 code = self._download_webpage(
376 player_url, video_id,
377 note=u'Downloading %s player %s' % (player_type, player_id),
378 errnote=u'Download of %s failed' % player_url)
379 res = self._parse_sig_js(code)
380 elif player_type == 'swf':
381 urlh = self._request_webpage(
382 player_url, video_id,
383 note=u'Downloading %s player %s' % (player_type, player_id),
384 errnote=u'Download of %s failed' % player_url)
385 code = urlh.read()
386 res = self._parse_sig_swf(code)
387 else:
388 assert False, 'Invalid player type %r' % player_type
389
390 if cache_enabled:
391 try:
392 test_string = u''.join(map(compat_chr, range(slen)))
393 cache_res = res(test_string)
394 cache_spec = [ord(c) for c in cache_res]
395 try:
396 os.makedirs(os.path.dirname(cache_fn))
397 except OSError as ose:
398 if ose.errno != errno.EEXIST:
399 raise
400 write_json_file(cache_spec, cache_fn)
401 except Exception:
402 tb = traceback.format_exc()
403 self._downloader.report_warning(
404 u'Writing cache to %r failed: %s' % (cache_fn, tb))
405
406 return res
407
408 def _print_sig_code(self, func, slen):
409 def gen_sig_code(idxs):
410 def _genslice(start, end, step):
411 starts = u'' if start == 0 else str(start)
412 ends = (u':%d' % (end+step)) if end + step >= 0 else u':'
413 steps = u'' if step == 1 else (u':%d' % step)
414 return u's[%s%s%s]' % (starts, ends, steps)
415
416 step = None
417 start = '(Never used)' # Quelch pyflakes warnings - start will be
418 # set as soon as step is set
419 for i, prev in zip(idxs[1:], idxs[:-1]):
420 if step is not None:
421 if i - prev == step:
422 continue
423 yield _genslice(start, prev, step)
424 step = None
425 continue
426 if i - prev in [-1, 1]:
427 step = i - prev
428 start = prev
429 continue
430 else:
431 yield u's[%d]' % prev
432 if step is None:
433 yield u's[%d]' % i
434 else:
435 yield _genslice(start, i, step)
436
437 test_string = u''.join(map(compat_chr, range(slen)))
438 cache_res = func(test_string)
439 cache_spec = [ord(c) for c in cache_res]
440 expr_code = u' + '.join(gen_sig_code(cache_spec))
441 code = u'if len(s) == %d:\n return %s\n' % (slen, expr_code)
442 self.to_screen(u'Extracted signature function:\n' + code)
443
444 def _parse_sig_js(self, jscode):
445 funcname = self._search_regex(
446 r'signature=([$a-zA-Z]+)', jscode,
447 u'Initial JS player signature function name')
448
449 jsi = JSInterpreter(jscode)
450 initial_function = jsi.extract_function(funcname)
451 return lambda s: initial_function([s])
452
453 def _parse_sig_swf(self, file_contents):
454 swfi = SWFInterpreter(file_contents)
455 TARGET_CLASSNAME = u'SignatureDecipher'
456 searched_class = swfi.extract_class(TARGET_CLASSNAME)
457 initial_function = swfi.extract_function(searched_class, u'decipher')
458 return lambda s: initial_function([s])
459
460 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
461 """Turn the encrypted s field into a working signature"""
462
463 if player_url is None:
464 raise ExtractorError(u'Cannot decrypt signature without player_url')
465
466 if player_url.startswith(u'//'):
467 player_url = u'https:' + player_url
468 try:
469 player_id = (player_url, len(s))
470 if player_id not in self._player_cache:
471 func = self._extract_signature_function(
472 video_id, player_url, len(s)
473 )
474 self._player_cache[player_id] = func
475 func = self._player_cache[player_id]
476 if self._downloader.params.get('youtube_print_sig_code'):
477 self._print_sig_code(func, len(s))
478 return func(s)
479 except Exception as e:
480 tb = traceback.format_exc()
481 raise ExtractorError(
482 u'Automatic signature extraction failed: ' + tb, cause=e)
483
484 def _get_available_subtitles(self, video_id, webpage):
485 try:
486 sub_list = self._download_webpage(
487 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
488 video_id, note=False)
489 except ExtractorError as err:
490 self._downloader.report_warning(u'unable to download video subtitles: %s' % compat_str(err))
491 return {}
492 lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
493
494 sub_lang_list = {}
495 for l in lang_list:
496 lang = l[1]
497 params = compat_urllib_parse.urlencode({
498 'lang': lang,
499 'v': video_id,
500 'fmt': self._downloader.params.get('subtitlesformat', 'srt'),
501 'name': unescapeHTML(l[0]).encode('utf-8'),
502 })
503 url = u'https://www.youtube.com/api/timedtext?' + params
504 sub_lang_list[lang] = url
505 if not sub_lang_list:
506 self._downloader.report_warning(u'video doesn\'t have subtitles')
507 return {}
508 return sub_lang_list
509
510 def _get_available_automatic_caption(self, video_id, webpage):
511 """We need the webpage for getting the captions url, pass it as an
512 argument to speed up the process."""
513 sub_format = self._downloader.params.get('subtitlesformat', 'srt')
514 self.to_screen(u'%s: Looking for automatic captions' % video_id)
515 mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
516 err_msg = u'Couldn\'t find automatic captions for %s' % video_id
517 if mobj is None:
518 self._downloader.report_warning(err_msg)
519 return {}
520 player_config = json.loads(mobj.group(1))
521 try:
522 args = player_config[u'args']
523 caption_url = args[u'ttsurl']
524 timestamp = args[u'timestamp']
525 # We get the available subtitles
526 list_params = compat_urllib_parse.urlencode({
527 'type': 'list',
528 'tlangs': 1,
529 'asrs': 1,
530 })
531 list_url = caption_url + '&' + list_params
532 caption_list = self._download_xml(list_url, video_id)
533 original_lang_node = caption_list.find('track')
534 if original_lang_node is None or original_lang_node.attrib.get('kind') != 'asr' :
535 self._downloader.report_warning(u'Video doesn\'t have automatic captions')
536 return {}
537 original_lang = original_lang_node.attrib['lang_code']
538
539 sub_lang_list = {}
540 for lang_node in caption_list.findall('target'):
541 sub_lang = lang_node.attrib['lang_code']
542 params = compat_urllib_parse.urlencode({
543 'lang': original_lang,
544 'tlang': sub_lang,
545 'fmt': sub_format,
546 'ts': timestamp,
547 'kind': 'asr',
548 })
549 sub_lang_list[sub_lang] = caption_url + '&' + params
550 return sub_lang_list
551 # An extractor error can be raise by the download process if there are
552 # no automatic captions but there are subtitles
553 except (KeyError, ExtractorError):
554 self._downloader.report_warning(err_msg)
555 return {}
556
557 @classmethod
558 def extract_id(cls, url):
559 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
560 if mobj is None:
561 raise ExtractorError(u'Invalid URL: %s' % url)
562 video_id = mobj.group(2)
563 return video_id
564
565 def _extract_from_m3u8(self, manifest_url, video_id):
566 url_map = {}
567 def _get_urls(_manifest):
568 lines = _manifest.split('\n')
569 urls = filter(lambda l: l and not l.startswith('#'),
570 lines)
571 return urls
572 manifest = self._download_webpage(manifest_url, video_id, u'Downloading formats manifest')
573 formats_urls = _get_urls(manifest)
574 for format_url in formats_urls:
575 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
576 url_map[itag] = format_url
577 return url_map
578
579 def _extract_annotations(self, video_id):
580 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
581 return self._download_webpage(url, video_id, note=u'Searching for annotations.', errnote=u'Unable to download video annotations.')
582
583 def _real_extract(self, url):
584 proto = (
585 u'http' if self._downloader.params.get('prefer_insecure', False)
586 else u'https')
587
588 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
589 mobj = re.search(self._NEXT_URL_RE, url)
590 if mobj:
591 url = proto + '://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/')
592 video_id = self.extract_id(url)
593
594 # Get video webpage
595 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id
596 video_webpage = self._download_webpage(url, video_id)
597
598 # Attempt to extract SWF player URL
599 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
600 if mobj is not None:
601 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
602 else:
603 player_url = None
604
605 # Get video info
606 self.report_video_info_webpage_download(video_id)
607 if re.search(r'player-age-gate-content">', video_webpage) is not None:
608 self.report_age_confirmation()
609 age_gate = True
610 # We simulate the access to the video from www.youtube.com/v/{video_id}
611 # this can be viewed without login into Youtube
612 data = compat_urllib_parse.urlencode({'video_id': video_id,
613 'el': 'player_embedded',
614 'gl': 'US',
615 'hl': 'en',
616 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
617 'asv': 3,
618 'sts':'1588',
619 })
620 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
621 video_info_webpage = self._download_webpage(video_info_url, video_id,
622 note=False,
623 errnote='unable to download video info webpage')
624 video_info = compat_parse_qs(video_info_webpage)
625 else:
626 age_gate = False
627 for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
628 video_info_url = (proto + '://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
629 % (video_id, el_type))
630 video_info_webpage = self._download_webpage(video_info_url, video_id,
631 note=False,
632 errnote='unable to download video info webpage')
633 video_info = compat_parse_qs(video_info_webpage)
634 if 'token' in video_info:
635 break
636 if 'token' not in video_info:
637 if 'reason' in video_info:
638 raise ExtractorError(
639 u'YouTube said: %s' % video_info['reason'][0],
640 expected=True, video_id=video_id)
641 else:
642 raise ExtractorError(
643 u'"token" parameter not in video info for unknown reason',
644 video_id=video_id)
645
646 if 'view_count' in video_info:
647 view_count = int(video_info['view_count'][0])
648 else:
649 view_count = None
650
651 # Check for "rental" videos
652 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
653 raise ExtractorError(u'"rental" videos not supported')
654
655 # Start extracting information
656 self.report_information_extraction(video_id)
657
658 # uploader
659 if 'author' not in video_info:
660 raise ExtractorError(u'Unable to extract uploader name')
661 video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0])
662
663 # uploader_id
664 video_uploader_id = None
665 mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
666 if mobj is not None:
667 video_uploader_id = mobj.group(1)
668 else:
669 self._downloader.report_warning(u'unable to extract uploader nickname')
670
671 # title
672 if 'title' in video_info:
673 video_title = video_info['title'][0]
674 else:
675 self._downloader.report_warning(u'Unable to extract video title')
676 video_title = u'_'
677
678 # thumbnail image
679 # We try first to get a high quality image:
680 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
681 video_webpage, re.DOTALL)
682 if m_thumb is not None:
683 video_thumbnail = m_thumb.group(1)
684 elif 'thumbnail_url' not in video_info:
685 self._downloader.report_warning(u'unable to extract video thumbnail')
686 video_thumbnail = None
687 else: # don't panic if we can't find it
688 video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0])
689
690 # upload date
691 upload_date = None
692 mobj = re.search(r'(?s)id="eow-date.*?>(.*?)</span>', video_webpage)
693 if mobj is None:
694 mobj = re.search(
695 r'(?s)id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live) on (.*?)</strong>',
696 video_webpage)
697 if mobj is not None:
698 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
699 upload_date = unified_strdate(upload_date)
700
701 m_cat_container = get_element_by_id("eow-category", video_webpage)
702 if m_cat_container:
703 category = self._html_search_regex(
704 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
705 default=None)
706 video_categories = None if category is None else [category]
707 else:
708 video_categories = None
709
710 # description
711 video_description = get_element_by_id("eow-description", video_webpage)
712 if video_description:
713 video_description = re.sub(r'''(?x)
714 <a\s+
715 (?:[a-zA-Z-]+="[^"]+"\s+)*?
716 title="([^"]+)"\s+
717 (?:[a-zA-Z-]+="[^"]+"\s+)*?
718 class="yt-uix-redirect-link"\s*>
719 [^<]+
720 </a>
721 ''', r'\1', video_description)
722 video_description = clean_html(video_description)
723 else:
724 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
725 if fd_mobj:
726 video_description = unescapeHTML(fd_mobj.group(1))
727 else:
728 video_description = u''
729
730 def _extract_count(klass):
731 count = self._search_regex(
732 r'class="%s">([\d,]+)</span>' % re.escape(klass),
733 video_webpage, klass, default=None)
734 if count is not None:
735 return int(count.replace(',', ''))
736 return None
737 like_count = _extract_count(u'likes-count')
738 dislike_count = _extract_count(u'dislikes-count')
739
740 # subtitles
741 video_subtitles = self.extract_subtitles(video_id, video_webpage)
742
743 if self._downloader.params.get('listsubtitles', False):
744 self._list_available_subtitles(video_id, video_webpage)
745 return
746
747 if 'length_seconds' not in video_info:
748 self._downloader.report_warning(u'unable to extract video duration')
749 video_duration = None
750 else:
751 video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]))
752
753 # annotations
754 video_annotations = None
755 if self._downloader.params.get('writeannotations', False):
756 video_annotations = self._extract_annotations(video_id)
757
758 # Decide which formats to download
759 try:
760 mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
761 if not mobj:
762 raise ValueError('Could not find vevo ID')
763 json_code = uppercase_escape(mobj.group(1))
764 ytplayer_config = json.loads(json_code)
765 args = ytplayer_config['args']
766 # Easy way to know if the 's' value is in url_encoded_fmt_stream_map
767 # this signatures are encrypted
768 if 'url_encoded_fmt_stream_map' not in args:
769 raise ValueError(u'No stream_map present') # caught below
770 re_signature = re.compile(r'[&,]s=')
771 m_s = re_signature.search(args['url_encoded_fmt_stream_map'])
772 if m_s is not None:
773 self.to_screen(u'%s: Encrypted signatures detected.' % video_id)
774 video_info['url_encoded_fmt_stream_map'] = [args['url_encoded_fmt_stream_map']]
775 m_s = re_signature.search(args.get('adaptive_fmts', u''))
776 if m_s is not None:
777 if 'adaptive_fmts' in video_info:
778 video_info['adaptive_fmts'][0] += ',' + args['adaptive_fmts']
779 else:
780 video_info['adaptive_fmts'] = [args['adaptive_fmts']]
781 except ValueError:
782 pass
783
784 def _map_to_format_list(urlmap):
785 formats = []
786 for itag, video_real_url in urlmap.items():
787 dct = {
788 'format_id': itag,
789 'url': video_real_url,
790 'player_url': player_url,
791 }
792 if itag in self._formats:
793 dct.update(self._formats[itag])
794 formats.append(dct)
795 return formats
796
797 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
798 self.report_rtmp_download()
799 formats = [{
800 'format_id': '_rtmp',
801 'protocol': 'rtmp',
802 'url': video_info['conn'][0],
803 'player_url': player_url,
804 }]
805 elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
806 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
807 if 'rtmpe%3Dyes' in encoded_url_map:
808 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
809 url_map = {}
810 for url_data_str in encoded_url_map.split(','):
811 url_data = compat_parse_qs(url_data_str)
812 if 'itag' in url_data and 'url' in url_data:
813 url = url_data['url'][0]
814 if 'sig' in url_data:
815 url += '&signature=' + url_data['sig'][0]
816 elif 's' in url_data:
817 encrypted_sig = url_data['s'][0]
818
819 if not age_gate:
820 jsplayer_url_json = self._search_regex(
821 r'"assets":.+?"js":\s*("[^"]+")',
822 video_webpage, u'JS player URL')
823 player_url = json.loads(jsplayer_url_json)
824 if player_url is None:
825 player_url_json = self._search_regex(
826 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
827 video_webpage, u'age gate player URL')
828 player_url = json.loads(player_url_json)
829
830 if self._downloader.params.get('verbose'):
831 if player_url is None:
832 player_version = 'unknown'
833 player_desc = 'unknown'
834 else:
835 if player_url.endswith('swf'):
836 player_version = self._search_regex(
837 r'-(.+)\.swf$', player_url,
838 u'flash player', fatal=False)
839 player_desc = 'flash player %s' % player_version
840 else:
841 player_version = self._search_regex(
842 r'html5player-(.+?)\.js', video_webpage,
843 'html5 player', fatal=False)
844 player_desc = u'html5 player %s' % player_version
845
846 parts_sizes = u'.'.join(compat_str(len(part)) for part in encrypted_sig.split('.'))
847 self.to_screen(u'encrypted signature length %d (%s), itag %s, %s' %
848 (len(encrypted_sig), parts_sizes, url_data['itag'][0], player_desc))
849
850 signature = self._decrypt_signature(
851 encrypted_sig, video_id, player_url, age_gate)
852 url += '&signature=' + signature
853 if 'ratebypass' not in url:
854 url += '&ratebypass=yes'
855 url_map[url_data['itag'][0]] = url
856 formats = _map_to_format_list(url_map)
857 elif video_info.get('hlsvp'):
858 manifest_url = video_info['hlsvp'][0]
859 url_map = self._extract_from_m3u8(manifest_url, video_id)
860 formats = _map_to_format_list(url_map)
861 else:
862 raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
863
864 # Look for the DASH manifest
865 if (self._downloader.params.get('youtube_include_dash_manifest', False)):
866 try:
867 # The DASH manifest used needs to be the one from the original video_webpage.
868 # The one found in get_video_info seems to be using different signatures.
869 # However, in the case of an age restriction there won't be any embedded dashmpd in the video_webpage.
870 # Luckily, it seems, this case uses some kind of default signature (len == 86), so the
871 # combination of get_video_info and the _static_decrypt_signature() decryption fallback will work here.
872 if age_gate:
873 dash_manifest_url = video_info.get('dashmpd')[0]
874 else:
875 dash_manifest_url = ytplayer_config['args']['dashmpd']
876 def decrypt_sig(mobj):
877 s = mobj.group(1)
878 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
879 return '/signature/%s' % dec_s
880 dash_manifest_url = re.sub(r'/s/([\w\.]+)', decrypt_sig, dash_manifest_url)
881 dash_doc = self._download_xml(
882 dash_manifest_url, video_id,
883 note=u'Downloading DASH manifest',
884 errnote=u'Could not download DASH manifest')
885 for r in dash_doc.findall(u'.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
886 url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
887 if url_el is None:
888 continue
889 format_id = r.attrib['id']
890 video_url = url_el.text
891 filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
892 f = {
893 'format_id': format_id,
894 'url': video_url,
895 'width': int_or_none(r.attrib.get('width')),
896 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
897 'asr': int_or_none(r.attrib.get('audioSamplingRate')),
898 'filesize': filesize,
899 }
900 try:
901 existing_format = next(
902 fo for fo in formats
903 if fo['format_id'] == format_id)
904 except StopIteration:
905 f.update(self._formats.get(format_id, {}))
906 formats.append(f)
907 else:
908 existing_format.update(f)
909
910 except (ExtractorError, KeyError) as e:
911 self.report_warning(u'Skipping DASH manifest: %s' % e, video_id)
912
913 self._sort_formats(formats)
914
915 return {
916 'id': video_id,
917 'uploader': video_uploader,
918 'uploader_id': video_uploader_id,
919 'upload_date': upload_date,
920 'title': video_title,
921 'thumbnail': video_thumbnail,
922 'description': video_description,
923 'categories': video_categories,
924 'subtitles': video_subtitles,
925 'duration': video_duration,
926 'age_limit': 18 if age_gate else 0,
927 'annotations': video_annotations,
928 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
929 'view_count': view_count,
930 'like_count': like_count,
931 'dislike_count': dislike_count,
932 'formats': formats,
933 }
934
935 class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
936 IE_DESC = u'YouTube.com playlists'
937 _VALID_URL = r"""(?x)(?:
938 (?:https?://)?
939 (?:\w+\.)?
940 youtube\.com/
941 (?:
942 (?:course|view_play_list|my_playlists|artist|playlist|watch)
943 \? (?:.*?&)*? (?:p|a|list)=
944 | p/
945 )
946 (
947 (?:PL|LL|EC|UU|FL|RD)?[0-9A-Za-z-_]{10,}
948 # Top tracks, they can also include dots
949 |(?:MC)[\w\.]*
950 )
951 .*
952 |
953 ((?:PL|LL|EC|UU|FL|RD)[0-9A-Za-z-_]{10,})
954 )"""
955 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
956 _MORE_PAGES_INDICATOR = r'data-link-type="next"'
957 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)'
958 IE_NAME = u'youtube:playlist'
959
960 def _real_initialize(self):
961 self._login()
962
963 def _ids_to_results(self, ids):
964 return [self.url_result(vid_id, 'Youtube', video_id=vid_id)
965 for vid_id in ids]
966
967 def _extract_mix(self, playlist_id):
968 # The mixes are generated from a a single video
969 # the id of the playlist is just 'RD' + video_id
970 url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
971 webpage = self._download_webpage(url, playlist_id, u'Downloading Youtube mix')
972 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
973 title_span = (search_title('playlist-title') or
974 search_title('title long-title') or search_title('title'))
975 title = clean_html(title_span)
976 video_re = r'''(?x)data-video-username=".*?".*?
977 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id)
978 ids = orderedSet(re.findall(video_re, webpage, flags=re.DOTALL))
979 url_results = self._ids_to_results(ids)
980
981 return self.playlist_result(url_results, playlist_id, title)
982
983 def _real_extract(self, url):
984 # Extract playlist id
985 mobj = re.match(self._VALID_URL, url)
986 if mobj is None:
987 raise ExtractorError(u'Invalid URL: %s' % url)
988 playlist_id = mobj.group(1) or mobj.group(2)
989
990 # Check if it's a video-specific URL
991 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
992 if 'v' in query_dict:
993 video_id = query_dict['v'][0]
994 if self._downloader.params.get('noplaylist'):
995 self.to_screen(u'Downloading just video %s because of --no-playlist' % video_id)
996 return self.url_result(video_id, 'Youtube', video_id=video_id)
997 else:
998 self.to_screen(u'Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
999
1000 if playlist_id.startswith('RD'):
1001 # Mixes require a custom extraction process
1002 return self._extract_mix(playlist_id)
1003 if playlist_id.startswith('TL'):
1004 raise ExtractorError(u'For downloading YouTube.com top lists, use '
1005 u'the "yttoplist" keyword, for example "youtube-dl \'yttoplist:music:Top Tracks\'"', expected=True)
1006
1007 url = self._TEMPLATE_URL % playlist_id
1008 page = self._download_webpage(url, playlist_id)
1009 more_widget_html = content_html = page
1010
1011 # Check if the playlist exists or is private
1012 if re.search(r'<div class="yt-alert-message">[^<]*?(The|This) playlist (does not exist|is private)[^<]*?</div>', page) is not None:
1013 raise ExtractorError(
1014 u'The playlist doesn\'t exist or is private, use --username or '
1015 '--netrc to access it.',
1016 expected=True)
1017
1018 # Extract the video ids from the playlist pages
1019 ids = []
1020
1021 for page_num in itertools.count(1):
1022 matches = re.finditer(self._VIDEO_RE, content_html)
1023 # We remove the duplicates and the link with index 0
1024 # (it's not the first video of the playlist)
1025 new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
1026 ids.extend(new_ids)
1027
1028 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
1029 if not mobj:
1030 break
1031
1032 more = self._download_json(
1033 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
1034 'Downloading page #%s' % page_num,
1035 transform_source=uppercase_escape)
1036 content_html = more['content_html']
1037 more_widget_html = more['load_more_widget_html']
1038
1039 playlist_title = self._html_search_regex(
1040 r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
1041 page, u'title')
1042
1043 url_results = self._ids_to_results(ids)
1044 return self.playlist_result(url_results, playlist_id, playlist_title)
1045
1046
1047 class YoutubeTopListIE(YoutubePlaylistIE):
1048 IE_NAME = u'youtube:toplist'
1049 IE_DESC = (u'YouTube.com top lists, "yttoplist:{channel}:{list title}"'
1050 u' (Example: "yttoplist:music:Top Tracks")')
1051 _VALID_URL = r'yttoplist:(?P<chann>.*?):(?P<title>.*?)$'
1052
1053 def _real_extract(self, url):
1054 mobj = re.match(self._VALID_URL, url)
1055 channel = mobj.group('chann')
1056 title = mobj.group('title')
1057 query = compat_urllib_parse.urlencode({'title': title})
1058 playlist_re = 'href="([^"]+?%s.*?)"' % re.escape(query)
1059 channel_page = self._download_webpage('https://www.youtube.com/%s' % channel, title)
1060 link = self._html_search_regex(playlist_re, channel_page, u'list')
1061 url = compat_urlparse.urljoin('https://www.youtube.com/', link)
1062
1063 video_re = r'data-index="\d+".*?data-video-id="([0-9A-Za-z_-]{11})"'
1064 ids = []
1065 # sometimes the webpage doesn't contain the videos
1066 # retry until we get them
1067 for i in itertools.count(0):
1068 msg = u'Downloading Youtube mix'
1069 if i > 0:
1070 msg += ', retry #%d' % i
1071 webpage = self._download_webpage(url, title, msg)
1072 ids = orderedSet(re.findall(video_re, webpage))
1073 if ids:
1074 break
1075 url_results = self._ids_to_results(ids)
1076 return self.playlist_result(url_results, playlist_title=title)
1077
1078
1079 class YoutubeChannelIE(InfoExtractor):
1080 IE_DESC = u'YouTube.com channels'
1081 _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)"
1082 _MORE_PAGES_INDICATOR = 'yt-uix-load-more'
1083 _MORE_PAGES_URL = 'https://www.youtube.com/c4_browse_ajax?action_load_more_videos=1&flow=list&paging=%s&view=0&sort=da&channel_id=%s'
1084 IE_NAME = u'youtube:channel'
1085
1086 def extract_videos_from_page(self, page):
1087 ids_in_page = []
1088 for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&?', page):
1089 if mobj.group(1) not in ids_in_page:
1090 ids_in_page.append(mobj.group(1))
1091 return ids_in_page
1092
1093 def _real_extract(self, url):
1094 # Extract channel id
1095 mobj = re.match(self._VALID_URL, url)
1096 if mobj is None:
1097 raise ExtractorError(u'Invalid URL: %s' % url)
1098
1099 # Download channel page
1100 channel_id = mobj.group(1)
1101 video_ids = []
1102 url = 'https://www.youtube.com/channel/%s/videos' % channel_id
1103 channel_page = self._download_webpage(url, channel_id)
1104 autogenerated = re.search(r'''(?x)
1105 class="[^"]*?(?:
1106 channel-header-autogenerated-label|
1107 yt-channel-title-autogenerated
1108 )[^"]*"''', channel_page) is not None
1109
1110 if autogenerated:
1111 # The videos are contained in a single page
1112 # the ajax pages can't be used, they are empty
1113 video_ids = self.extract_videos_from_page(channel_page)
1114 else:
1115 # Download all channel pages using the json-based channel_ajax query
1116 for pagenum in itertools.count(1):
1117 url = self._MORE_PAGES_URL % (pagenum, channel_id)
1118 page = self._download_json(
1119 url, channel_id, note=u'Downloading page #%s' % pagenum,
1120 transform_source=uppercase_escape)
1121
1122 ids_in_page = self.extract_videos_from_page(page['content_html'])
1123 video_ids.extend(ids_in_page)
1124
1125 if self._MORE_PAGES_INDICATOR not in page['load_more_widget_html']:
1126 break
1127
1128 self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids)))
1129
1130 url_entries = [self.url_result(video_id, 'Youtube', video_id=video_id)
1131 for video_id in video_ids]
1132 return self.playlist_result(url_entries, channel_id)
1133
1134
1135 class YoutubeUserIE(InfoExtractor):
1136 IE_DESC = u'YouTube.com user videos (URL or "ytuser" keyword)'
1137 _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)([A-Za-z0-9_-]+)'
1138 _TEMPLATE_URL = 'https://gdata.youtube.com/feeds/api/users/%s'
1139 _GDATA_PAGE_SIZE = 50
1140 _GDATA_URL = 'https://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d&alt=json'
1141 IE_NAME = u'youtube:user'
1142
1143 @classmethod
1144 def suitable(cls, url):
1145 # Don't return True if the url can be extracted with other youtube
1146 # extractor, the regex would is too permissive and it would match.
1147 other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
1148 if any(ie.suitable(url) for ie in other_ies): return False
1149 else: return super(YoutubeUserIE, cls).suitable(url)
1150
1151 def _real_extract(self, url):
1152 # Extract username
1153 mobj = re.match(self._VALID_URL, url)
1154 if mobj is None:
1155 raise ExtractorError(u'Invalid URL: %s' % url)
1156
1157 username = mobj.group(1)
1158
1159 # Download video ids using YouTube Data API. Result size per
1160 # query is limited (currently to 50 videos) so we need to query
1161 # page by page until there are no video ids - it means we got
1162 # all of them.
1163
1164 def download_page(pagenum):
1165 start_index = pagenum * self._GDATA_PAGE_SIZE + 1
1166
1167 gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
1168 page = self._download_webpage(
1169 gdata_url, username,
1170 u'Downloading video ids from %d to %d' % (
1171 start_index, start_index + self._GDATA_PAGE_SIZE))
1172
1173 try:
1174 response = json.loads(page)
1175 except ValueError as err:
1176 raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
1177 if 'entry' not in response['feed']:
1178 return
1179
1180 # Extract video identifiers
1181 entries = response['feed']['entry']
1182 for entry in entries:
1183 title = entry['title']['$t']
1184 video_id = entry['id']['$t'].split('/')[-1]
1185 yield {
1186 '_type': 'url',
1187 'url': video_id,
1188 'ie_key': 'Youtube',
1189 'id': video_id,
1190 'title': title,
1191 }
1192 url_results = PagedList(download_page, self._GDATA_PAGE_SIZE)
1193
1194 return self.playlist_result(url_results, playlist_title=username)
1195
1196
1197 class YoutubeSearchIE(SearchInfoExtractor):
1198 IE_DESC = u'YouTube.com searches'
1199 _API_URL = u'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc'
1200 _MAX_RESULTS = 1000
1201 IE_NAME = u'youtube:search'
1202 _SEARCH_KEY = 'ytsearch'
1203
1204 def _get_n_results(self, query, n):
1205 """Get a specified number of results for a query"""
1206
1207 video_ids = []
1208 pagenum = 0
1209 limit = n
1210 PAGE_SIZE = 50
1211
1212 while (PAGE_SIZE * pagenum) < limit:
1213 result_url = self._API_URL % (
1214 compat_urllib_parse.quote_plus(query.encode('utf-8')),
1215 (PAGE_SIZE * pagenum) + 1)
1216 data_json = self._download_webpage(
1217 result_url, video_id=u'query "%s"' % query,
1218 note=u'Downloading page %s' % (pagenum + 1),
1219 errnote=u'Unable to download API page')
1220 data = json.loads(data_json)
1221 api_response = data['data']
1222
1223 if 'items' not in api_response:
1224 raise ExtractorError(
1225 u'[youtube] No video results', expected=True)
1226
1227 new_ids = list(video['id'] for video in api_response['items'])
1228 video_ids += new_ids
1229
1230 limit = min(n, api_response['totalItems'])
1231 pagenum += 1
1232
1233 if len(video_ids) > n:
1234 video_ids = video_ids[:n]
1235 videos = [self.url_result(video_id, 'Youtube', video_id=video_id)
1236 for video_id in video_ids]
1237 return self.playlist_result(videos, query)
1238
1239
1240 class YoutubeSearchDateIE(YoutubeSearchIE):
1241 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
1242 _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc&orderby=published'
1243 _SEARCH_KEY = 'ytsearchdate'
1244 IE_DESC = u'YouTube.com searches, newest videos first'
1245
1246
1247 class YoutubeSearchURLIE(InfoExtractor):
1248 IE_DESC = u'YouTube.com search URLs'
1249 IE_NAME = u'youtube:search_url'
1250 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
1251
1252 def _real_extract(self, url):
1253 mobj = re.match(self._VALID_URL, url)
1254 query = compat_urllib_parse.unquote_plus(mobj.group('query'))
1255
1256 webpage = self._download_webpage(url, query)
1257 result_code = self._search_regex(
1258 r'(?s)<ol class="item-section"(.*?)</ol>', webpage, u'result HTML')
1259
1260 part_codes = re.findall(
1261 r'(?s)<h3 class="yt-lockup-title">(.*?)</h3>', result_code)
1262 entries = []
1263 for part_code in part_codes:
1264 part_title = self._html_search_regex(
1265 [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
1266 part_url_snippet = self._html_search_regex(
1267 r'(?s)href="([^"]+)"', part_code, 'item URL')
1268 part_url = compat_urlparse.urljoin(
1269 'https://www.youtube.com/', part_url_snippet)
1270 entries.append({
1271 '_type': 'url',
1272 'url': part_url,
1273 'title': part_title,
1274 })
1275
1276 return {
1277 '_type': 'playlist',
1278 'entries': entries,
1279 'title': query,
1280 }
1281
1282
1283 class YoutubeShowIE(InfoExtractor):
1284 IE_DESC = u'YouTube.com (multi-season) shows'
1285 _VALID_URL = r'https?://www\.youtube\.com/show/(.*)'
1286 IE_NAME = u'youtube:show'
1287
1288 def _real_extract(self, url):
1289 mobj = re.match(self._VALID_URL, url)
1290 show_name = mobj.group(1)
1291 webpage = self._download_webpage(url, show_name, u'Downloading show webpage')
1292 # There's one playlist for each season of the show
1293 m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
1294 self.to_screen(u'%s: Found %s seasons' % (show_name, len(m_seasons)))
1295 return [self.url_result('https://www.youtube.com' + season.group(1), 'YoutubePlaylist') for season in m_seasons]
1296
1297
1298 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
1299 """
1300 Base class for extractors that fetch info from
1301 http://www.youtube.com/feed_ajax
1302 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
1303 """
1304 _LOGIN_REQUIRED = True
1305 # use action_load_personal_feed instead of action_load_system_feed
1306 _PERSONAL_FEED = False
1307
1308 @property
1309 def _FEED_TEMPLATE(self):
1310 action = 'action_load_system_feed'
1311 if self._PERSONAL_FEED:
1312 action = 'action_load_personal_feed'
1313 return 'https://www.youtube.com/feed_ajax?%s=1&feed_name=%s&paging=%%s' % (action, self._FEED_NAME)
1314
1315 @property
1316 def IE_NAME(self):
1317 return u'youtube:%s' % self._FEED_NAME
1318
1319 def _real_initialize(self):
1320 self._login()
1321
1322 def _real_extract(self, url):
1323 feed_entries = []
1324 paging = 0
1325 for i in itertools.count(1):
1326 info = self._download_json(self._FEED_TEMPLATE % paging,
1327 u'%s feed' % self._FEED_NAME,
1328 u'Downloading page %s' % i)
1329 feed_html = info.get('feed_html') or info.get('content_html')
1330 m_ids = re.finditer(r'"/watch\?v=(.*?)["&]', feed_html)
1331 ids = orderedSet(m.group(1) for m in m_ids)
1332 feed_entries.extend(
1333 self.url_result(video_id, 'Youtube', video_id=video_id)
1334 for video_id in ids)
1335 mobj = re.search(
1336 r'data-uix-load-more-href="/?[^"]+paging=(?P<paging>\d+)',
1337 feed_html)
1338 if mobj is None:
1339 break
1340 paging = mobj.group('paging')
1341 return self.playlist_result(feed_entries, playlist_title=self._PLAYLIST_TITLE)
1342
1343 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
1344 IE_DESC = u'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
1345 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
1346 _FEED_NAME = 'subscriptions'
1347 _PLAYLIST_TITLE = u'Youtube Subscriptions'
1348
1349 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
1350 IE_DESC = u'YouTube.com recommended videos, "ytrec" keyword (requires authentication)'
1351 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
1352 _FEED_NAME = 'recommended'
1353 _PLAYLIST_TITLE = u'Youtube Recommended videos'
1354
1355 class YoutubeWatchLaterIE(YoutubeFeedsInfoExtractor):
1356 IE_DESC = u'Youtube watch later list, "ytwatchlater" keyword (requires authentication)'
1357 _VALID_URL = r'https?://www\.youtube\.com/feed/watch_later|:ytwatchlater'
1358 _FEED_NAME = 'watch_later'
1359 _PLAYLIST_TITLE = u'Youtube Watch Later'
1360 _PERSONAL_FEED = True
1361
1362 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
1363 IE_DESC = u'Youtube watch history, "ythistory" keyword (requires authentication)'
1364 _VALID_URL = u'https?://www\.youtube\.com/feed/history|:ythistory'
1365 _FEED_NAME = 'history'
1366 _PERSONAL_FEED = True
1367 _PLAYLIST_TITLE = u'Youtube Watch History'
1368
1369 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
1370 IE_NAME = u'youtube:favorites'
1371 IE_DESC = u'YouTube.com favourite videos, "ytfav" keyword (requires authentication)'
1372 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
1373 _LOGIN_REQUIRED = True
1374
1375 def _real_extract(self, url):
1376 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
1377 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, u'favourites playlist id')
1378 return self.url_result(playlist_id, 'YoutubePlaylist')
1379
1380
1381 class YoutubeTruncatedURLIE(InfoExtractor):
1382 IE_NAME = 'youtube:truncated_url'
1383 IE_DESC = False # Do not list
1384 _VALID_URL = r'''(?x)
1385 (?:https?://)?[^/]+/watch\?(?:
1386 feature=[a-z_]+|
1387 annotation_id=annotation_[^&]+
1388 )?$|
1389 (?:https?://)?(?:www\.)?youtube\.com/attribution_link\?a=[^&]+$
1390 '''
1391
1392 _TESTS = [{
1393 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
1394 'only_matching': True,
1395 }, {
1396 'url': 'http://www.youtube.com/watch?',
1397 'only_matching': True,
1398 }]
1399
1400 def _real_extract(self, url):
1401 raise ExtractorError(
1402 u'Did you forget to quote the URL? Remember that & is a meta '
1403 u'character in most shells, so you want to put the URL in quotes, '
1404 u'like youtube-dl '
1405 u'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
1406 u' or simply youtube-dl BaW_jenozKc .',
1407 expected=True)