]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/youtube.py
[facebook] Make alternative title optional (Closes #7700)
[yt-dlp.git] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import re
10 import time
11 import traceback
12
13 from .common import InfoExtractor, SearchInfoExtractor
14 from ..jsinterp import JSInterpreter
15 from ..swfinterp import SWFInterpreter
16 from ..compat import (
17 compat_chr,
18 compat_parse_qs,
19 compat_urllib_parse,
20 compat_urllib_parse_unquote,
21 compat_urllib_parse_unquote_plus,
22 compat_urllib_parse_urlparse,
23 compat_urlparse,
24 compat_str,
25 )
26 from ..utils import (
27 clean_html,
28 encode_dict,
29 ExtractorError,
30 float_or_none,
31 get_element_by_attribute,
32 get_element_by_id,
33 int_or_none,
34 orderedSet,
35 parse_duration,
36 remove_start,
37 sanitized_Request,
38 smuggle_url,
39 str_to_int,
40 unescapeHTML,
41 unified_strdate,
42 unsmuggle_url,
43 uppercase_escape,
44 ISO3166Utils,
45 )
46
47
48 class YoutubeBaseInfoExtractor(InfoExtractor):
49 """Provide base functions for Youtube extractors"""
50 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
51 _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
52 _NETRC_MACHINE = 'youtube'
53 # If True it will raise an error if no login info is provided
54 _LOGIN_REQUIRED = False
55
56 def _set_language(self):
57 self._set_cookie(
58 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
59 # YouTube sets the expire time to about two months
60 expire_time=time.time() + 2 * 30 * 24 * 3600)
61
62 def _ids_to_results(self, ids):
63 return [
64 self.url_result(vid_id, 'Youtube', video_id=vid_id)
65 for vid_id in ids]
66
67 def _login(self):
68 """
69 Attempt to log in to YouTube.
70 True is returned if successful or skipped.
71 False is returned if login failed.
72
73 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
74 """
75 (username, password) = self._get_login_info()
76 # No authentication to be performed
77 if username is None:
78 if self._LOGIN_REQUIRED:
79 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
80 return True
81
82 login_page = self._download_webpage(
83 self._LOGIN_URL, None,
84 note='Downloading login page',
85 errnote='unable to fetch login page', fatal=False)
86 if login_page is False:
87 return
88
89 galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
90 login_page, 'Login GALX parameter')
91
92 # Log in
93 login_form_strs = {
94 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
95 'Email': username,
96 'GALX': galx,
97 'Passwd': password,
98
99 'PersistentCookie': 'yes',
100 '_utf8': '霱',
101 'bgresponse': 'js_disabled',
102 'checkConnection': '',
103 'checkedDomains': 'youtube',
104 'dnConn': '',
105 'pstMsg': '0',
106 'rmShown': '1',
107 'secTok': '',
108 'signIn': 'Sign in',
109 'timeStmp': '',
110 'service': 'youtube',
111 'uilel': '3',
112 'hl': 'en_US',
113 }
114
115 login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
116
117 req = sanitized_Request(self._LOGIN_URL, login_data)
118 login_results = self._download_webpage(
119 req, None,
120 note='Logging in', errnote='unable to log in', fatal=False)
121 if login_results is False:
122 return False
123
124 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
125 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
126
127 # Two-Factor
128 # TODO add SMS and phone call support - these require making a request and then prompting the user
129
130 if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
131 tfa_code = self._get_tfa_info('2-step verification code')
132
133 if not tfa_code:
134 self._downloader.report_warning(
135 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
136 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
137 return False
138
139 tfa_code = remove_start(tfa_code, 'G-')
140
141 tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
142
143 tfa_form_strs.update({
144 'Pin': tfa_code,
145 'TrustDevice': 'on',
146 })
147
148 tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
149
150 tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
151 tfa_results = self._download_webpage(
152 tfa_req, None,
153 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
154
155 if tfa_results is False:
156 return False
157
158 if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
159 self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
160 return False
161 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
162 self._downloader.report_warning('unable to log in - did the page structure change?')
163 return False
164 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
165 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
166 return False
167
168 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
169 self._downloader.report_warning('unable to log in: bad username or password')
170 return False
171 return True
172
173 def _real_initialize(self):
174 if self._downloader is None:
175 return
176 self._set_language()
177 if not self._login():
178 return
179
180
181 class YoutubeEntryListBaseInfoExtractor(InfoExtractor):
182 # Extract entries from page with "Load more" button
183 def _entries(self, page, playlist_id):
184 more_widget_html = content_html = page
185 for page_num in itertools.count(1):
186 for entry in self._process_page(content_html):
187 yield entry
188
189 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
190 if not mobj:
191 break
192
193 more = self._download_json(
194 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
195 'Downloading page #%s' % page_num,
196 transform_source=uppercase_escape)
197 content_html = more['content_html']
198 if not content_html.strip():
199 # Some webpages show a "Load more" button but they don't
200 # have more videos
201 break
202 more_widget_html = more['load_more_widget_html']
203
204
205 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
206 def _process_page(self, content):
207 for video_id, video_title in self.extract_videos_from_page(content):
208 yield self.url_result(video_id, 'Youtube', video_id, video_title)
209
210 def extract_videos_from_page(self, page):
211 ids_in_page = []
212 titles_in_page = []
213 for mobj in re.finditer(self._VIDEO_RE, page):
214 # The link with index 0 is not the first video of the playlist (not sure if still actual)
215 if 'index' in mobj.groupdict() and mobj.group('id') == '0':
216 continue
217 video_id = mobj.group('id')
218 video_title = unescapeHTML(mobj.group('title'))
219 if video_title:
220 video_title = video_title.strip()
221 try:
222 idx = ids_in_page.index(video_id)
223 if video_title and not titles_in_page[idx]:
224 titles_in_page[idx] = video_title
225 except ValueError:
226 ids_in_page.append(video_id)
227 titles_in_page.append(video_title)
228 return zip(ids_in_page, titles_in_page)
229
230
231 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
232 def _process_page(self, content):
233 for playlist_id in re.findall(r'href="/?playlist\?list=(.+?)"', content):
234 yield self.url_result(
235 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
236
237 def _real_extract(self, url):
238 playlist_id = self._match_id(url)
239 webpage = self._download_webpage(url, playlist_id)
240 title = self._og_search_title(webpage, fatal=False)
241 return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
242
243
244 class YoutubeIE(YoutubeBaseInfoExtractor):
245 IE_DESC = 'YouTube.com'
246 _VALID_URL = r"""(?x)^
247 (
248 (?:https?://|//) # http(s):// or protocol-independent URL
249 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
250 (?:www\.)?deturl\.com/www\.youtube\.com/|
251 (?:www\.)?pwnyoutube\.com/|
252 (?:www\.)?yourepeat\.com/|
253 tube\.majestyc\.net/|
254 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
255 (?:.*?\#/)? # handle anchor (#/) redirect urls
256 (?: # the various things that can precede the ID:
257 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
258 |(?: # or the v= param in all its forms
259 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
260 (?:\?|\#!?) # the params delimiter ? or # or #!
261 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
262 v=
263 )
264 ))
265 |(?:
266 youtu\.be| # just youtu.be/xxxx
267 vid\.plus # or vid.plus/xxxx
268 )/
269 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
270 )
271 )? # all until now is optional -> you can pass the naked ID
272 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
273 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
274 (?(1).+)? # if we found the ID, everything can follow
275 $"""
276 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
277 _formats = {
278 '5': {'ext': 'flv', 'width': 400, 'height': 240},
279 '6': {'ext': 'flv', 'width': 450, 'height': 270},
280 '13': {'ext': '3gp'},
281 '17': {'ext': '3gp', 'width': 176, 'height': 144},
282 '18': {'ext': 'mp4', 'width': 640, 'height': 360},
283 '22': {'ext': 'mp4', 'width': 1280, 'height': 720},
284 '34': {'ext': 'flv', 'width': 640, 'height': 360},
285 '35': {'ext': 'flv', 'width': 854, 'height': 480},
286 '36': {'ext': '3gp', 'width': 320, 'height': 240},
287 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
288 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
289 '43': {'ext': 'webm', 'width': 640, 'height': 360},
290 '44': {'ext': 'webm', 'width': 854, 'height': 480},
291 '45': {'ext': 'webm', 'width': 1280, 'height': 720},
292 '46': {'ext': 'webm', 'width': 1920, 'height': 1080},
293 '59': {'ext': 'mp4', 'width': 854, 'height': 480},
294 '78': {'ext': 'mp4', 'width': 854, 'height': 480},
295
296
297 # 3d videos
298 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
299 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
300 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
301 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
302 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
303 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
304 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
305
306 # Apple HTTP Live Streaming
307 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
308 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
309 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
310 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
311 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
312 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
313 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
314
315 # DASH mp4 video
316 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
317 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
318 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
319 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
320 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
321 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
322 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
323 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
324 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
325 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
326 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
327
328 # Dash mp4 audio
329 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
330 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
331 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
332
333 # Dash webm
334 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
335 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
336 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
337 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
338 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
339 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
340 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'vp9'},
341 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
342 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
343 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
344 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
345 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
346 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
347 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
348 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
349 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
350 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
351 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
352 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
353 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'vp9'},
354 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
355
356 # Dash webm audio
357 '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
358 '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
359
360 # Dash webm audio with opus inside
361 '249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
362 '250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
363 '251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
364
365 # RTMP (unnamed)
366 '_rtmp': {'protocol': 'rtmp'},
367 }
368
369 IE_NAME = 'youtube'
370 _TESTS = [
371 {
372 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&t=1s&end=9',
373 'info_dict': {
374 'id': 'BaW_jenozKc',
375 'ext': 'mp4',
376 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
377 'uploader': 'Philipp Hagemeister',
378 'uploader_id': 'phihag',
379 'upload_date': '20121002',
380 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
381 'categories': ['Science & Technology'],
382 'tags': ['youtube-dl'],
383 'like_count': int,
384 'dislike_count': int,
385 'start_time': 1,
386 'end_time': 9,
387 }
388 },
389 {
390 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
391 'note': 'Test generic use_cipher_signature video (#897)',
392 'info_dict': {
393 'id': 'UxxajLWwzqY',
394 'ext': 'mp4',
395 'upload_date': '20120506',
396 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
397 'description': 'md5:782e8651347686cba06e58f71ab51773',
398 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
399 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
400 'iconic ep', 'iconic', 'love', 'it'],
401 'uploader': 'Icona Pop',
402 'uploader_id': 'IconaPop',
403 }
404 },
405 {
406 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
407 'note': 'Test VEVO video with age protection (#956)',
408 'info_dict': {
409 'id': '07FYdnEawAQ',
410 'ext': 'mp4',
411 'upload_date': '20130703',
412 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
413 'description': 'md5:64249768eec3bc4276236606ea996373',
414 'uploader': 'justintimberlakeVEVO',
415 'uploader_id': 'justintimberlakeVEVO',
416 'age_limit': 18,
417 }
418 },
419 {
420 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
421 'note': 'Embed-only video (#1746)',
422 'info_dict': {
423 'id': 'yZIXLfi8CZQ',
424 'ext': 'mp4',
425 'upload_date': '20120608',
426 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
427 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
428 'uploader': 'SET India',
429 'uploader_id': 'setindia',
430 'age_limit': 18,
431 }
432 },
433 {
434 'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&v=UxxajLWwzqY',
435 'note': 'Use the first video ID in the URL',
436 'info_dict': {
437 'id': 'BaW_jenozKc',
438 'ext': 'mp4',
439 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
440 'uploader': 'Philipp Hagemeister',
441 'uploader_id': 'phihag',
442 'upload_date': '20121002',
443 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
444 'categories': ['Science & Technology'],
445 'tags': ['youtube-dl'],
446 'like_count': int,
447 'dislike_count': int,
448 },
449 'params': {
450 'skip_download': True,
451 },
452 },
453 {
454 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
455 'note': '256k DASH audio (format 141) via DASH manifest',
456 'info_dict': {
457 'id': 'a9LDPn-MO4I',
458 'ext': 'm4a',
459 'upload_date': '20121002',
460 'uploader_id': '8KVIDEO',
461 'description': '',
462 'uploader': '8KVIDEO',
463 'title': 'UHDTV TEST 8K VIDEO.mp4'
464 },
465 'params': {
466 'youtube_include_dash_manifest': True,
467 'format': '141',
468 },
469 },
470 # DASH manifest with encrypted signature
471 {
472 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
473 'info_dict': {
474 'id': 'IB3lcPjvWLA',
475 'ext': 'm4a',
476 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
477 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
478 'uploader': 'AfrojackVEVO',
479 'uploader_id': 'AfrojackVEVO',
480 'upload_date': '20131011',
481 },
482 'params': {
483 'youtube_include_dash_manifest': True,
484 'format': '141',
485 },
486 },
487 # JS player signature function name containing $
488 {
489 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
490 'info_dict': {
491 'id': 'nfWlot6h_JM',
492 'ext': 'm4a',
493 'title': 'Taylor Swift - Shake It Off',
494 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
495 'uploader': 'TaylorSwiftVEVO',
496 'uploader_id': 'TaylorSwiftVEVO',
497 'upload_date': '20140818',
498 },
499 'params': {
500 'youtube_include_dash_manifest': True,
501 'format': '141',
502 },
503 },
504 # Controversy video
505 {
506 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
507 'info_dict': {
508 'id': 'T4XJQO3qol8',
509 'ext': 'mp4',
510 'upload_date': '20100909',
511 'uploader': 'The Amazing Atheist',
512 'uploader_id': 'TheAmazingAtheist',
513 'title': 'Burning Everyone\'s Koran',
514 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
515 }
516 },
517 # Normal age-gate video (No vevo, embed allowed)
518 {
519 'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
520 'info_dict': {
521 'id': 'HtVdAasjOgU',
522 'ext': 'mp4',
523 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
524 'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
525 'uploader': 'The Witcher',
526 'uploader_id': 'WitcherGame',
527 'upload_date': '20140605',
528 'age_limit': 18,
529 },
530 },
531 # Age-gate video with encrypted signature
532 {
533 'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
534 'info_dict': {
535 'id': '6kLq3WMV1nU',
536 'ext': 'mp4',
537 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
538 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
539 'uploader': 'LloydVEVO',
540 'uploader_id': 'LloydVEVO',
541 'upload_date': '20110629',
542 'age_limit': 18,
543 },
544 },
545 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
546 {
547 'url': '__2ABJjxzNo',
548 'info_dict': {
549 'id': '__2ABJjxzNo',
550 'ext': 'mp4',
551 'upload_date': '20100430',
552 'uploader_id': 'deadmau5',
553 'description': 'md5:12c56784b8032162bb936a5f76d55360',
554 'uploader': 'deadmau5',
555 'title': 'Deadmau5 - Some Chords (HD)',
556 },
557 'expected_warnings': [
558 'DASH manifest missing',
559 ]
560 },
561 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
562 {
563 'url': 'lqQg6PlCWgI',
564 'info_dict': {
565 'id': 'lqQg6PlCWgI',
566 'ext': 'mp4',
567 'upload_date': '20150827',
568 'uploader_id': 'olympic',
569 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
570 'uploader': 'Olympics',
571 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
572 },
573 'params': {
574 'skip_download': 'requires avconv',
575 }
576 },
577 # Non-square pixels
578 {
579 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
580 'info_dict': {
581 'id': '_b-2C3KPAM0',
582 'ext': 'mp4',
583 'stretched_ratio': 16 / 9.,
584 'upload_date': '20110310',
585 'uploader_id': 'AllenMeow',
586 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
587 'uploader': '孫艾倫',
588 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
589 },
590 },
591 # url_encoded_fmt_stream_map is empty string
592 {
593 'url': 'qEJwOuvDf7I',
594 'info_dict': {
595 'id': 'qEJwOuvDf7I',
596 'ext': 'webm',
597 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
598 'description': '',
599 'upload_date': '20150404',
600 'uploader_id': 'spbelect',
601 'uploader': 'Наблюдатели Петербурга',
602 },
603 'params': {
604 'skip_download': 'requires avconv',
605 }
606 },
607 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
608 {
609 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
610 'info_dict': {
611 'id': 'FIl7x6_3R5Y',
612 'ext': 'mp4',
613 'title': 'md5:7b81415841e02ecd4313668cde88737a',
614 'description': 'md5:116377fd2963b81ec4ce64b542173306',
615 'upload_date': '20150625',
616 'uploader_id': 'dorappi2000',
617 'uploader': 'dorappi2000',
618 'formats': 'mincount:33',
619 },
620 },
621 # DASH manifest with segment_list
622 {
623 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
624 'md5': '8ce563a1d667b599d21064e982ab9e31',
625 'info_dict': {
626 'id': 'CsmdDsKjzN8',
627 'ext': 'mp4',
628 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
629 'uploader': 'Airtek',
630 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
631 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
632 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
633 },
634 'params': {
635 'youtube_include_dash_manifest': True,
636 'format': '135', # bestvideo
637 }
638 },
639 {
640 # Multifeed videos (multiple cameras), URL is for Main Camera
641 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
642 'info_dict': {
643 'id': 'jqWvoWXjCVs',
644 'title': 'teamPGP: Rocket League Noob Stream',
645 'description': 'md5:dc7872fb300e143831327f1bae3af010',
646 },
647 'playlist': [{
648 'info_dict': {
649 'id': 'jqWvoWXjCVs',
650 'ext': 'mp4',
651 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
652 'description': 'md5:dc7872fb300e143831327f1bae3af010',
653 'upload_date': '20150721',
654 'uploader': 'Beer Games Beer',
655 'uploader_id': 'beergamesbeer',
656 },
657 }, {
658 'info_dict': {
659 'id': '6h8e8xoXJzg',
660 'ext': 'mp4',
661 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
662 'description': 'md5:dc7872fb300e143831327f1bae3af010',
663 'upload_date': '20150721',
664 'uploader': 'Beer Games Beer',
665 'uploader_id': 'beergamesbeer',
666 },
667 }, {
668 'info_dict': {
669 'id': 'PUOgX5z9xZw',
670 'ext': 'mp4',
671 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
672 'description': 'md5:dc7872fb300e143831327f1bae3af010',
673 'upload_date': '20150721',
674 'uploader': 'Beer Games Beer',
675 'uploader_id': 'beergamesbeer',
676 },
677 }, {
678 'info_dict': {
679 'id': 'teuwxikvS5k',
680 'ext': 'mp4',
681 'title': 'teamPGP: Rocket League Noob Stream (zim)',
682 'description': 'md5:dc7872fb300e143831327f1bae3af010',
683 'upload_date': '20150721',
684 'uploader': 'Beer Games Beer',
685 'uploader_id': 'beergamesbeer',
686 },
687 }],
688 'params': {
689 'skip_download': True,
690 },
691 },
692 {
693 'url': 'http://vid.plus/FlRa-iH7PGw',
694 'only_matching': True,
695 },
696 {
697 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
698 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
699 'info_dict': {
700 'id': 'lsguqyKfVQg',
701 'ext': 'mp4',
702 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
703 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
704 'upload_date': '20151119',
705 'uploader_id': 'IronSoulElf',
706 'uploader': 'IronSoulElf',
707 },
708 'params': {
709 'skip_download': True,
710 },
711 },
712 {
713 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
714 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
715 'only_matching': True,
716 },
717 {
718 # Video with yt:stretch=17:0
719 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
720 'info_dict': {
721 'id': 'Q39EVAstoRM',
722 'ext': 'mp4',
723 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
724 'description': 'md5:ee18a25c350637c8faff806845bddee9',
725 'upload_date': '20151107',
726 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
727 'uploader': 'CH GAMER DROID',
728 },
729 'params': {
730 'skip_download': True,
731 },
732 },
733 {
734 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
735 'only_matching': True,
736 }
737 ]
738
739 def __init__(self, *args, **kwargs):
740 super(YoutubeIE, self).__init__(*args, **kwargs)
741 self._player_cache = {}
742
743 def report_video_info_webpage_download(self, video_id):
744 """Report attempt to download video info webpage."""
745 self.to_screen('%s: Downloading video info webpage' % video_id)
746
747 def report_information_extraction(self, video_id):
748 """Report attempt to extract video information."""
749 self.to_screen('%s: Extracting video information' % video_id)
750
751 def report_unavailable_format(self, video_id, format):
752 """Report extracted video URL."""
753 self.to_screen('%s: Format %s not available' % (video_id, format))
754
755 def report_rtmp_download(self):
756 """Indicate the download will use the RTMP protocol."""
757 self.to_screen('RTMP download detected')
758
759 def _signature_cache_id(self, example_sig):
760 """ Return a string representation of a signature """
761 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
762
763 def _extract_signature_function(self, video_id, player_url, example_sig):
764 id_m = re.match(
765 r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
766 player_url)
767 if not id_m:
768 raise ExtractorError('Cannot identify player %r' % player_url)
769 player_type = id_m.group('ext')
770 player_id = id_m.group('id')
771
772 # Read from filesystem cache
773 func_id = '%s_%s_%s' % (
774 player_type, player_id, self._signature_cache_id(example_sig))
775 assert os.path.basename(func_id) == func_id
776
777 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
778 if cache_spec is not None:
779 return lambda s: ''.join(s[i] for i in cache_spec)
780
781 download_note = (
782 'Downloading player %s' % player_url
783 if self._downloader.params.get('verbose') else
784 'Downloading %s player %s' % (player_type, player_id)
785 )
786 if player_type == 'js':
787 code = self._download_webpage(
788 player_url, video_id,
789 note=download_note,
790 errnote='Download of %s failed' % player_url)
791 res = self._parse_sig_js(code)
792 elif player_type == 'swf':
793 urlh = self._request_webpage(
794 player_url, video_id,
795 note=download_note,
796 errnote='Download of %s failed' % player_url)
797 code = urlh.read()
798 res = self._parse_sig_swf(code)
799 else:
800 assert False, 'Invalid player type %r' % player_type
801
802 test_string = ''.join(map(compat_chr, range(len(example_sig))))
803 cache_res = res(test_string)
804 cache_spec = [ord(c) for c in cache_res]
805
806 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
807 return res
808
809 def _print_sig_code(self, func, example_sig):
810 def gen_sig_code(idxs):
811 def _genslice(start, end, step):
812 starts = '' if start == 0 else str(start)
813 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
814 steps = '' if step == 1 else (':%d' % step)
815 return 's[%s%s%s]' % (starts, ends, steps)
816
817 step = None
818 # Quelch pyflakes warnings - start will be set when step is set
819 start = '(Never used)'
820 for i, prev in zip(idxs[1:], idxs[:-1]):
821 if step is not None:
822 if i - prev == step:
823 continue
824 yield _genslice(start, prev, step)
825 step = None
826 continue
827 if i - prev in [-1, 1]:
828 step = i - prev
829 start = prev
830 continue
831 else:
832 yield 's[%d]' % prev
833 if step is None:
834 yield 's[%d]' % i
835 else:
836 yield _genslice(start, i, step)
837
838 test_string = ''.join(map(compat_chr, range(len(example_sig))))
839 cache_res = func(test_string)
840 cache_spec = [ord(c) for c in cache_res]
841 expr_code = ' + '.join(gen_sig_code(cache_spec))
842 signature_id_tuple = '(%s)' % (
843 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
844 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
845 ' return %s\n') % (signature_id_tuple, expr_code)
846 self.to_screen('Extracted signature function:\n' + code)
847
848 def _parse_sig_js(self, jscode):
849 funcname = self._search_regex(
850 r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
851 'Initial JS player signature function name')
852
853 jsi = JSInterpreter(jscode)
854 initial_function = jsi.extract_function(funcname)
855 return lambda s: initial_function([s])
856
857 def _parse_sig_swf(self, file_contents):
858 swfi = SWFInterpreter(file_contents)
859 TARGET_CLASSNAME = 'SignatureDecipher'
860 searched_class = swfi.extract_class(TARGET_CLASSNAME)
861 initial_function = swfi.extract_function(searched_class, 'decipher')
862 return lambda s: initial_function([s])
863
864 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
865 """Turn the encrypted s field into a working signature"""
866
867 if player_url is None:
868 raise ExtractorError('Cannot decrypt signature without player_url')
869
870 if player_url.startswith('//'):
871 player_url = 'https:' + player_url
872 try:
873 player_id = (player_url, self._signature_cache_id(s))
874 if player_id not in self._player_cache:
875 func = self._extract_signature_function(
876 video_id, player_url, s
877 )
878 self._player_cache[player_id] = func
879 func = self._player_cache[player_id]
880 if self._downloader.params.get('youtube_print_sig_code'):
881 self._print_sig_code(func, s)
882 return func(s)
883 except Exception as e:
884 tb = traceback.format_exc()
885 raise ExtractorError(
886 'Signature extraction failed: ' + tb, cause=e)
887
888 def _get_subtitles(self, video_id, webpage):
889 try:
890 subs_doc = self._download_xml(
891 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
892 video_id, note=False)
893 except ExtractorError as err:
894 self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
895 return {}
896
897 sub_lang_list = {}
898 for track in subs_doc.findall('track'):
899 lang = track.attrib['lang_code']
900 if lang in sub_lang_list:
901 continue
902 sub_formats = []
903 for ext in ['sbv', 'vtt', 'srt']:
904 params = compat_urllib_parse.urlencode({
905 'lang': lang,
906 'v': video_id,
907 'fmt': ext,
908 'name': track.attrib['name'].encode('utf-8'),
909 })
910 sub_formats.append({
911 'url': 'https://www.youtube.com/api/timedtext?' + params,
912 'ext': ext,
913 })
914 sub_lang_list[lang] = sub_formats
915 if not sub_lang_list:
916 self._downloader.report_warning('video doesn\'t have subtitles')
917 return {}
918 return sub_lang_list
919
920 def _get_ytplayer_config(self, video_id, webpage):
921 patterns = (
922 # User data may contain arbitrary character sequences that may affect
923 # JSON extraction with regex, e.g. when '};' is contained the second
924 # regex won't capture the whole JSON. Yet working around by trying more
925 # concrete regex first keeping in mind proper quoted string handling
926 # to be implemented in future that will replace this workaround (see
927 # https://github.com/rg3/youtube-dl/issues/7468,
928 # https://github.com/rg3/youtube-dl/pull/7599)
929 r';ytplayer\.config\s*=\s*({.+?});ytplayer',
930 r';ytplayer\.config\s*=\s*({.+?});',
931 )
932 config = self._search_regex(
933 patterns, webpage, 'ytplayer.config', default=None)
934 if config:
935 return self._parse_json(
936 uppercase_escape(config), video_id, fatal=False)
937
938 def _get_automatic_captions(self, video_id, webpage):
939 """We need the webpage for getting the captions url, pass it as an
940 argument to speed up the process."""
941 self.to_screen('%s: Looking for automatic captions' % video_id)
942 player_config = self._get_ytplayer_config(video_id, webpage)
943 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
944 if not player_config:
945 self._downloader.report_warning(err_msg)
946 return {}
947 try:
948 args = player_config['args']
949 caption_url = args['ttsurl']
950 timestamp = args['timestamp']
951 # We get the available subtitles
952 list_params = compat_urllib_parse.urlencode({
953 'type': 'list',
954 'tlangs': 1,
955 'asrs': 1,
956 })
957 list_url = caption_url + '&' + list_params
958 caption_list = self._download_xml(list_url, video_id)
959 original_lang_node = caption_list.find('track')
960 if original_lang_node is None:
961 self._downloader.report_warning('Video doesn\'t have automatic captions')
962 return {}
963 original_lang = original_lang_node.attrib['lang_code']
964 caption_kind = original_lang_node.attrib.get('kind', '')
965
966 sub_lang_list = {}
967 for lang_node in caption_list.findall('target'):
968 sub_lang = lang_node.attrib['lang_code']
969 sub_formats = []
970 for ext in ['sbv', 'vtt', 'srt']:
971 params = compat_urllib_parse.urlencode({
972 'lang': original_lang,
973 'tlang': sub_lang,
974 'fmt': ext,
975 'ts': timestamp,
976 'kind': caption_kind,
977 })
978 sub_formats.append({
979 'url': caption_url + '&' + params,
980 'ext': ext,
981 })
982 sub_lang_list[sub_lang] = sub_formats
983 return sub_lang_list
984 # An extractor error can be raise by the download process if there are
985 # no automatic captions but there are subtitles
986 except (KeyError, ExtractorError):
987 self._downloader.report_warning(err_msg)
988 return {}
989
990 @classmethod
991 def extract_id(cls, url):
992 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
993 if mobj is None:
994 raise ExtractorError('Invalid URL: %s' % url)
995 video_id = mobj.group(2)
996 return video_id
997
998 def _extract_from_m3u8(self, manifest_url, video_id):
999 url_map = {}
1000
1001 def _get_urls(_manifest):
1002 lines = _manifest.split('\n')
1003 urls = filter(lambda l: l and not l.startswith('#'),
1004 lines)
1005 return urls
1006 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
1007 formats_urls = _get_urls(manifest)
1008 for format_url in formats_urls:
1009 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
1010 url_map[itag] = format_url
1011 return url_map
1012
1013 def _extract_annotations(self, video_id):
1014 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
1015 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
1016
1017 def _parse_dash_manifest(
1018 self, video_id, dash_manifest_url, player_url, age_gate, fatal=True):
1019 def decrypt_sig(mobj):
1020 s = mobj.group(1)
1021 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
1022 return '/signature/%s' % dec_s
1023 dash_manifest_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, dash_manifest_url)
1024 dash_doc = self._download_xml(
1025 dash_manifest_url, video_id,
1026 note='Downloading DASH manifest',
1027 errnote='Could not download DASH manifest',
1028 fatal=fatal)
1029
1030 if dash_doc is False:
1031 return []
1032
1033 formats = []
1034 for a in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}AdaptationSet'):
1035 mime_type = a.attrib.get('mimeType')
1036 for r in a.findall('{urn:mpeg:DASH:schema:MPD:2011}Representation'):
1037 url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
1038 if url_el is None:
1039 continue
1040 if mime_type == 'text/vtt':
1041 # TODO implement WebVTT downloading
1042 pass
1043 elif mime_type.startswith('audio/') or mime_type.startswith('video/'):
1044 segment_list = r.find('{urn:mpeg:DASH:schema:MPD:2011}SegmentList')
1045 format_id = r.attrib['id']
1046 video_url = url_el.text
1047 filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
1048 f = {
1049 'format_id': format_id,
1050 'url': video_url,
1051 'width': int_or_none(r.attrib.get('width')),
1052 'height': int_or_none(r.attrib.get('height')),
1053 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
1054 'asr': int_or_none(r.attrib.get('audioSamplingRate')),
1055 'filesize': filesize,
1056 'fps': int_or_none(r.attrib.get('frameRate')),
1057 }
1058 if segment_list is not None:
1059 f.update({
1060 'initialization_url': segment_list.find('{urn:mpeg:DASH:schema:MPD:2011}Initialization').attrib['sourceURL'],
1061 'segment_urls': [segment.attrib.get('media') for segment in segment_list.findall('{urn:mpeg:DASH:schema:MPD:2011}SegmentURL')],
1062 'protocol': 'http_dash_segments',
1063 })
1064 try:
1065 existing_format = next(
1066 fo for fo in formats
1067 if fo['format_id'] == format_id)
1068 except StopIteration:
1069 full_info = self._formats.get(format_id, {}).copy()
1070 full_info.update(f)
1071 codecs = r.attrib.get('codecs')
1072 if codecs:
1073 if full_info.get('acodec') == 'none' and 'vcodec' not in full_info:
1074 full_info['vcodec'] = codecs
1075 elif full_info.get('vcodec') == 'none' and 'acodec' not in full_info:
1076 full_info['acodec'] = codecs
1077 formats.append(full_info)
1078 else:
1079 existing_format.update(f)
1080 else:
1081 self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
1082 return formats
1083
1084 def _real_extract(self, url):
1085 url, smuggled_data = unsmuggle_url(url, {})
1086
1087 proto = (
1088 'http' if self._downloader.params.get('prefer_insecure', False)
1089 else 'https')
1090
1091 start_time = None
1092 end_time = None
1093 parsed_url = compat_urllib_parse_urlparse(url)
1094 for component in [parsed_url.fragment, parsed_url.query]:
1095 query = compat_parse_qs(component)
1096 if start_time is None and 't' in query:
1097 start_time = parse_duration(query['t'][0])
1098 if start_time is None and 'start' in query:
1099 start_time = parse_duration(query['start'][0])
1100 if end_time is None and 'end' in query:
1101 end_time = parse_duration(query['end'][0])
1102
1103 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1104 mobj = re.search(self._NEXT_URL_RE, url)
1105 if mobj:
1106 url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1107 video_id = self.extract_id(url)
1108
1109 # Get video webpage
1110 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1111 video_webpage = self._download_webpage(url, video_id)
1112
1113 # Attempt to extract SWF player URL
1114 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1115 if mobj is not None:
1116 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1117 else:
1118 player_url = None
1119
1120 dash_mpds = []
1121
1122 def add_dash_mpd(video_info):
1123 dash_mpd = video_info.get('dashmpd')
1124 if dash_mpd and dash_mpd[0] not in dash_mpds:
1125 dash_mpds.append(dash_mpd[0])
1126
1127 # Get video info
1128 embed_webpage = None
1129 is_live = None
1130 if re.search(r'player-age-gate-content">', video_webpage) is not None:
1131 age_gate = True
1132 # We simulate the access to the video from www.youtube.com/v/{video_id}
1133 # this can be viewed without login into Youtube
1134 url = proto + '://www.youtube.com/embed/%s' % video_id
1135 embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1136 data = compat_urllib_parse.urlencode({
1137 'video_id': video_id,
1138 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1139 'sts': self._search_regex(
1140 r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1141 })
1142 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1143 video_info_webpage = self._download_webpage(
1144 video_info_url, video_id,
1145 note='Refetching age-gated info webpage',
1146 errnote='unable to download video info webpage')
1147 video_info = compat_parse_qs(video_info_webpage)
1148 add_dash_mpd(video_info)
1149 else:
1150 age_gate = False
1151 video_info = None
1152 # Try looking directly into the video webpage
1153 ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1154 if ytplayer_config:
1155 args = ytplayer_config['args']
1156 if args.get('url_encoded_fmt_stream_map'):
1157 # Convert to the same format returned by compat_parse_qs
1158 video_info = dict((k, [v]) for k, v in args.items())
1159 add_dash_mpd(video_info)
1160 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1161 is_live = True
1162 if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1163 # We also try looking in get_video_info since it may contain different dashmpd
1164 # URL that points to a DASH manifest with possibly different itag set (some itags
1165 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1166 # manifest pointed by get_video_info's dashmpd).
1167 # The general idea is to take a union of itags of both DASH manifests (for example
1168 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1169 self.report_video_info_webpage_download(video_id)
1170 for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1171 video_info_url = (
1172 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1173 % (proto, video_id, el_type))
1174 video_info_webpage = self._download_webpage(
1175 video_info_url,
1176 video_id, note=False,
1177 errnote='unable to download video info webpage')
1178 get_video_info = compat_parse_qs(video_info_webpage)
1179 if get_video_info.get('use_cipher_signature') != ['True']:
1180 add_dash_mpd(get_video_info)
1181 if not video_info:
1182 video_info = get_video_info
1183 if 'token' in get_video_info:
1184 # Different get_video_info requests may report different results, e.g.
1185 # some may report video unavailability, but some may serve it without
1186 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1187 # the original webpage as well as el=info and el=embedded get_video_info
1188 # requests report video unavailability due to geo restriction while
1189 # el=detailpage succeeds and returns valid data). This is probably
1190 # due to YouTube measures against IP ranges of hosting providers.
1191 # Working around by preferring the first succeeded video_info containing
1192 # the token if no such video_info yet was found.
1193 if 'token' not in video_info:
1194 video_info = get_video_info
1195 break
1196 if 'token' not in video_info:
1197 if 'reason' in video_info:
1198 if 'The uploader has not made this video available in your country.' in video_info['reason']:
1199 regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
1200 if regions_allowed:
1201 raise ExtractorError('YouTube said: This video is available in %s only' % (
1202 ', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
1203 expected=True)
1204 raise ExtractorError(
1205 'YouTube said: %s' % video_info['reason'][0],
1206 expected=True, video_id=video_id)
1207 else:
1208 raise ExtractorError(
1209 '"token" parameter not in video info for unknown reason',
1210 video_id=video_id)
1211
1212 # title
1213 if 'title' in video_info:
1214 video_title = video_info['title'][0]
1215 else:
1216 self._downloader.report_warning('Unable to extract video title')
1217 video_title = '_'
1218
1219 # description
1220 video_description = get_element_by_id("eow-description", video_webpage)
1221 if video_description:
1222 video_description = re.sub(r'''(?x)
1223 <a\s+
1224 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1225 title="([^"]+)"\s+
1226 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1227 class="yt-uix-redirect-link"\s*>
1228 [^<]+
1229 </a>
1230 ''', r'\1', video_description)
1231 video_description = clean_html(video_description)
1232 else:
1233 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
1234 if fd_mobj:
1235 video_description = unescapeHTML(fd_mobj.group(1))
1236 else:
1237 video_description = ''
1238
1239 if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
1240 if not self._downloader.params.get('noplaylist'):
1241 entries = []
1242 feed_ids = []
1243 multifeed_metadata_list = compat_urllib_parse_unquote_plus(video_info['multifeed_metadata_list'][0])
1244 for feed in multifeed_metadata_list.split(','):
1245 feed_data = compat_parse_qs(feed)
1246 entries.append({
1247 '_type': 'url_transparent',
1248 'ie_key': 'Youtube',
1249 'url': smuggle_url(
1250 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1251 {'force_singlefeed': True}),
1252 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1253 })
1254 feed_ids.append(feed_data['id'][0])
1255 self.to_screen(
1256 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1257 % (', '.join(feed_ids), video_id))
1258 return self.playlist_result(entries, video_id, video_title, video_description)
1259 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1260
1261 if 'view_count' in video_info:
1262 view_count = int(video_info['view_count'][0])
1263 else:
1264 view_count = None
1265
1266 # Check for "rental" videos
1267 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1268 raise ExtractorError('"rental" videos not supported')
1269
1270 # Start extracting information
1271 self.report_information_extraction(video_id)
1272
1273 # uploader
1274 if 'author' not in video_info:
1275 raise ExtractorError('Unable to extract uploader name')
1276 video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
1277
1278 # uploader_id
1279 video_uploader_id = None
1280 mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
1281 if mobj is not None:
1282 video_uploader_id = mobj.group(1)
1283 else:
1284 self._downloader.report_warning('unable to extract uploader nickname')
1285
1286 # thumbnail image
1287 # We try first to get a high quality image:
1288 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
1289 video_webpage, re.DOTALL)
1290 if m_thumb is not None:
1291 video_thumbnail = m_thumb.group(1)
1292 elif 'thumbnail_url' not in video_info:
1293 self._downloader.report_warning('unable to extract video thumbnail')
1294 video_thumbnail = None
1295 else: # don't panic if we can't find it
1296 video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
1297
1298 # upload date
1299 upload_date = self._html_search_meta(
1300 'datePublished', video_webpage, 'upload date', default=None)
1301 if not upload_date:
1302 upload_date = self._search_regex(
1303 [r'(?s)id="eow-date.*?>(.*?)</span>',
1304 r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
1305 video_webpage, 'upload date', default=None)
1306 if upload_date:
1307 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1308 upload_date = unified_strdate(upload_date)
1309
1310 m_cat_container = self._search_regex(
1311 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
1312 video_webpage, 'categories', default=None)
1313 if m_cat_container:
1314 category = self._html_search_regex(
1315 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
1316 default=None)
1317 video_categories = None if category is None else [category]
1318 else:
1319 video_categories = None
1320
1321 video_tags = [
1322 unescapeHTML(m.group('content'))
1323 for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
1324
1325 def _extract_count(count_name):
1326 return str_to_int(self._search_regex(
1327 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
1328 % re.escape(count_name),
1329 video_webpage, count_name, default=None))
1330
1331 like_count = _extract_count('like')
1332 dislike_count = _extract_count('dislike')
1333
1334 # subtitles
1335 video_subtitles = self.extract_subtitles(video_id, video_webpage)
1336 automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
1337
1338 if 'length_seconds' not in video_info:
1339 self._downloader.report_warning('unable to extract video duration')
1340 video_duration = None
1341 else:
1342 video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
1343
1344 # annotations
1345 video_annotations = None
1346 if self._downloader.params.get('writeannotations', False):
1347 video_annotations = self._extract_annotations(video_id)
1348
1349 def _map_to_format_list(urlmap):
1350 formats = []
1351 for itag, video_real_url in urlmap.items():
1352 dct = {
1353 'format_id': itag,
1354 'url': video_real_url,
1355 'player_url': player_url,
1356 }
1357 if itag in self._formats:
1358 dct.update(self._formats[itag])
1359 formats.append(dct)
1360 return formats
1361
1362 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1363 self.report_rtmp_download()
1364 formats = [{
1365 'format_id': '_rtmp',
1366 'protocol': 'rtmp',
1367 'url': video_info['conn'][0],
1368 'player_url': player_url,
1369 }]
1370 elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
1371 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1372 if 'rtmpe%3Dyes' in encoded_url_map:
1373 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
1374 formats = []
1375 for url_data_str in encoded_url_map.split(','):
1376 url_data = compat_parse_qs(url_data_str)
1377 if 'itag' not in url_data or 'url' not in url_data:
1378 continue
1379 format_id = url_data['itag'][0]
1380 url = url_data['url'][0]
1381
1382 if 'sig' in url_data:
1383 url += '&signature=' + url_data['sig'][0]
1384 elif 's' in url_data:
1385 encrypted_sig = url_data['s'][0]
1386 ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
1387
1388 jsplayer_url_json = self._search_regex(
1389 ASSETS_RE,
1390 embed_webpage if age_gate else video_webpage,
1391 'JS player URL (1)', default=None)
1392 if not jsplayer_url_json and not age_gate:
1393 # We need the embed website after all
1394 if embed_webpage is None:
1395 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
1396 embed_webpage = self._download_webpage(
1397 embed_url, video_id, 'Downloading embed webpage')
1398 jsplayer_url_json = self._search_regex(
1399 ASSETS_RE, embed_webpage, 'JS player URL')
1400
1401 player_url = json.loads(jsplayer_url_json)
1402 if player_url is None:
1403 player_url_json = self._search_regex(
1404 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
1405 video_webpage, 'age gate player URL')
1406 player_url = json.loads(player_url_json)
1407
1408 if self._downloader.params.get('verbose'):
1409 if player_url is None:
1410 player_version = 'unknown'
1411 player_desc = 'unknown'
1412 else:
1413 if player_url.endswith('swf'):
1414 player_version = self._search_regex(
1415 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
1416 'flash player', fatal=False)
1417 player_desc = 'flash player %s' % player_version
1418 else:
1419 player_version = self._search_regex(
1420 [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
1421 player_url,
1422 'html5 player', fatal=False)
1423 player_desc = 'html5 player %s' % player_version
1424
1425 parts_sizes = self._signature_cache_id(encrypted_sig)
1426 self.to_screen('{%s} signature length %s, %s' %
1427 (format_id, parts_sizes, player_desc))
1428
1429 signature = self._decrypt_signature(
1430 encrypted_sig, video_id, player_url, age_gate)
1431 url += '&signature=' + signature
1432 if 'ratebypass' not in url:
1433 url += '&ratebypass=yes'
1434
1435 # Some itags are not included in DASH manifest thus corresponding formats will
1436 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1437 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1438 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
1439 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
1440 dct = {
1441 'format_id': format_id,
1442 'url': url,
1443 'player_url': player_url,
1444 'filesize': int_or_none(url_data.get('clen', [None])[0]),
1445 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
1446 'width': width,
1447 'height': height,
1448 'fps': int_or_none(url_data.get('fps', [None])[0]),
1449 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
1450 }
1451 type_ = url_data.get('type', [None])[0]
1452 if type_:
1453 type_split = type_.split(';')
1454 kind_ext = type_split[0].split('/')
1455 if len(kind_ext) == 2:
1456 kind, ext = kind_ext
1457 dct['ext'] = ext
1458 if kind in ('audio', 'video'):
1459 codecs = None
1460 for mobj in re.finditer(
1461 r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
1462 if mobj.group('key') == 'codecs':
1463 codecs = mobj.group('val')
1464 break
1465 if codecs:
1466 codecs = codecs.split(',')
1467 if len(codecs) == 2:
1468 acodec, vcodec = codecs[0], codecs[1]
1469 else:
1470 acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
1471 dct.update({
1472 'acodec': acodec,
1473 'vcodec': vcodec,
1474 })
1475 if format_id in self._formats:
1476 dct.update(self._formats[format_id])
1477 formats.append(dct)
1478 elif video_info.get('hlsvp'):
1479 manifest_url = video_info['hlsvp'][0]
1480 url_map = self._extract_from_m3u8(manifest_url, video_id)
1481 formats = _map_to_format_list(url_map)
1482 # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
1483 for a_format in formats:
1484 a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
1485 else:
1486 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
1487
1488 # Look for the DASH manifest
1489 if self._downloader.params.get('youtube_include_dash_manifest', True):
1490 dash_mpd_fatal = True
1491 for dash_manifest_url in dash_mpds:
1492 dash_formats = {}
1493 try:
1494 for df in self._parse_dash_manifest(
1495 video_id, dash_manifest_url, player_url, age_gate, dash_mpd_fatal):
1496 # Do not overwrite DASH format found in some previous DASH manifest
1497 if df['format_id'] not in dash_formats:
1498 dash_formats[df['format_id']] = df
1499 # Additional DASH manifests may end up in HTTP Error 403 therefore
1500 # allow them to fail without bug report message if we already have
1501 # some DASH manifest succeeded. This is temporary workaround to reduce
1502 # burst of bug reports until we figure out the reason and whether it
1503 # can be fixed at all.
1504 dash_mpd_fatal = False
1505 except (ExtractorError, KeyError) as e:
1506 self.report_warning(
1507 'Skipping DASH manifest: %r' % e, video_id)
1508 if dash_formats:
1509 # Remove the formats we found through non-DASH, they
1510 # contain less info and it can be wrong, because we use
1511 # fixed values (for example the resolution). See
1512 # https://github.com/rg3/youtube-dl/issues/5774 for an
1513 # example.
1514 formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
1515 formats.extend(dash_formats.values())
1516
1517 # Check for malformed aspect ratio
1518 stretched_m = re.search(
1519 r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
1520 video_webpage)
1521 if stretched_m:
1522 w = float(stretched_m.group('w'))
1523 h = float(stretched_m.group('h'))
1524 # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
1525 # We will only process correct ratios.
1526 if w > 0 and h > 0:
1527 ratio = w / h
1528 for f in formats:
1529 if f.get('vcodec') != 'none':
1530 f['stretched_ratio'] = ratio
1531
1532 self._sort_formats(formats)
1533
1534 return {
1535 'id': video_id,
1536 'uploader': video_uploader,
1537 'uploader_id': video_uploader_id,
1538 'upload_date': upload_date,
1539 'title': video_title,
1540 'thumbnail': video_thumbnail,
1541 'description': video_description,
1542 'categories': video_categories,
1543 'tags': video_tags,
1544 'subtitles': video_subtitles,
1545 'automatic_captions': automatic_captions,
1546 'duration': video_duration,
1547 'age_limit': 18 if age_gate else 0,
1548 'annotations': video_annotations,
1549 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1550 'view_count': view_count,
1551 'like_count': like_count,
1552 'dislike_count': dislike_count,
1553 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
1554 'formats': formats,
1555 'is_live': is_live,
1556 'start_time': start_time,
1557 'end_time': end_time,
1558 }
1559
1560
1561 class YoutubePlaylistIE(YoutubeBaseInfoExtractor, YoutubePlaylistBaseInfoExtractor):
1562 IE_DESC = 'YouTube.com playlists'
1563 _VALID_URL = r"""(?x)(?:
1564 (?:https?://)?
1565 (?:\w+\.)?
1566 youtube\.com/
1567 (?:
1568 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
1569 \? (?:.*?[&;])*? (?:p|a|list)=
1570 | p/
1571 )
1572 (
1573 (?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
1574 # Top tracks, they can also include dots
1575 |(?:MC)[\w\.]*
1576 )
1577 .*
1578 |
1579 ((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
1580 )"""
1581 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
1582 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
1583 IE_NAME = 'youtube:playlist'
1584 _TESTS = [{
1585 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1586 'info_dict': {
1587 'title': 'ytdl test PL',
1588 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1589 },
1590 'playlist_count': 3,
1591 }, {
1592 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1593 'info_dict': {
1594 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1595 'title': 'YDL_Empty_List',
1596 },
1597 'playlist_count': 0,
1598 }, {
1599 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1600 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1601 'info_dict': {
1602 'title': '29C3: Not my department',
1603 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1604 },
1605 'playlist_count': 95,
1606 }, {
1607 'note': 'issue #673',
1608 'url': 'PLBB231211A4F62143',
1609 'info_dict': {
1610 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1611 'id': 'PLBB231211A4F62143',
1612 },
1613 'playlist_mincount': 26,
1614 }, {
1615 'note': 'Large playlist',
1616 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1617 'info_dict': {
1618 'title': 'Uploads from Cauchemar',
1619 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
1620 },
1621 'playlist_mincount': 799,
1622 }, {
1623 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1624 'info_dict': {
1625 'title': 'YDL_safe_search',
1626 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1627 },
1628 'playlist_count': 2,
1629 }, {
1630 'note': 'embedded',
1631 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1632 'playlist_count': 4,
1633 'info_dict': {
1634 'title': 'JODA15',
1635 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1636 }
1637 }, {
1638 'note': 'Embedded SWF player',
1639 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1640 'playlist_count': 4,
1641 'info_dict': {
1642 'title': 'JODA7',
1643 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
1644 }
1645 }, {
1646 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
1647 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
1648 'info_dict': {
1649 'title': 'Uploads from Interstellar Movie',
1650 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
1651 },
1652 'playlist_mincout': 21,
1653 }]
1654
1655 def _real_initialize(self):
1656 self._login()
1657
1658 def _extract_mix(self, playlist_id):
1659 # The mixes are generated from a single video
1660 # the id of the playlist is just 'RD' + video_id
1661 url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
1662 webpage = self._download_webpage(
1663 url, playlist_id, 'Downloading Youtube mix')
1664 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
1665 title_span = (
1666 search_title('playlist-title') or
1667 search_title('title long-title') or
1668 search_title('title'))
1669 title = clean_html(title_span)
1670 ids = orderedSet(re.findall(
1671 r'''(?xs)data-video-username=".*?".*?
1672 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
1673 webpage))
1674 url_results = self._ids_to_results(ids)
1675
1676 return self.playlist_result(url_results, playlist_id, title)
1677
1678 def _extract_playlist(self, playlist_id):
1679 url = self._TEMPLATE_URL % playlist_id
1680 page = self._download_webpage(url, playlist_id)
1681
1682 for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
1683 match = match.strip()
1684 # Check if the playlist exists or is private
1685 if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
1686 raise ExtractorError(
1687 'The playlist doesn\'t exist or is private, use --username or '
1688 '--netrc to access it.',
1689 expected=True)
1690 elif re.match(r'[^<]*Invalid parameters[^<]*', match):
1691 raise ExtractorError(
1692 'Invalid parameters. Maybe URL is incorrect.',
1693 expected=True)
1694 elif re.match(r'[^<]*Choose your language[^<]*', match):
1695 continue
1696 else:
1697 self.report_warning('Youtube gives an alert message: ' + match)
1698
1699 playlist_title = self._html_search_regex(
1700 r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
1701 page, 'title')
1702
1703 return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
1704
1705 def _real_extract(self, url):
1706 # Extract playlist id
1707 mobj = re.match(self._VALID_URL, url)
1708 if mobj is None:
1709 raise ExtractorError('Invalid URL: %s' % url)
1710 playlist_id = mobj.group(1) or mobj.group(2)
1711
1712 # Check if it's a video-specific URL
1713 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
1714 if 'v' in query_dict:
1715 video_id = query_dict['v'][0]
1716 if self._downloader.params.get('noplaylist'):
1717 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1718 return self.url_result(video_id, 'Youtube', video_id=video_id)
1719 else:
1720 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
1721
1722 if playlist_id.startswith('RD') or playlist_id.startswith('UL'):
1723 # Mixes require a custom extraction process
1724 return self._extract_mix(playlist_id)
1725
1726 return self._extract_playlist(playlist_id)
1727
1728
1729 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
1730 IE_DESC = 'YouTube.com channels'
1731 _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
1732 _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
1733 _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
1734 IE_NAME = 'youtube:channel'
1735 _TESTS = [{
1736 'note': 'paginated channel',
1737 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1738 'playlist_mincount': 91,
1739 'info_dict': {
1740 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
1741 'title': 'Uploads from lex will',
1742 }
1743 }, {
1744 'note': 'Age restricted channel',
1745 # from https://www.youtube.com/user/DeusExOfficial
1746 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
1747 'playlist_mincount': 64,
1748 'info_dict': {
1749 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
1750 'title': 'Uploads from Deus Ex',
1751 },
1752 }]
1753
1754 def _real_extract(self, url):
1755 channel_id = self._match_id(url)
1756
1757 url = self._TEMPLATE_URL % channel_id
1758
1759 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
1760 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
1761 # otherwise fallback on channel by page extraction
1762 channel_page = self._download_webpage(
1763 url + '?view=57', channel_id,
1764 'Downloading channel page', fatal=False)
1765 if channel_page is False:
1766 channel_playlist_id = False
1767 else:
1768 channel_playlist_id = self._html_search_meta(
1769 'channelId', channel_page, 'channel id', default=None)
1770 if not channel_playlist_id:
1771 channel_playlist_id = self._search_regex(
1772 r'data-(?:channel-external-|yt)id="([^"]+)"',
1773 channel_page, 'channel id', default=None)
1774 if channel_playlist_id and channel_playlist_id.startswith('UC'):
1775 playlist_id = 'UU' + channel_playlist_id[2:]
1776 return self.url_result(
1777 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
1778
1779 channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
1780 autogenerated = re.search(r'''(?x)
1781 class="[^"]*?(?:
1782 channel-header-autogenerated-label|
1783 yt-channel-title-autogenerated
1784 )[^"]*"''', channel_page) is not None
1785
1786 if autogenerated:
1787 # The videos are contained in a single page
1788 # the ajax pages can't be used, they are empty
1789 entries = [
1790 self.url_result(
1791 video_id, 'Youtube', video_id=video_id,
1792 video_title=video_title)
1793 for video_id, video_title in self.extract_videos_from_page(channel_page)]
1794 return self.playlist_result(entries, channel_id)
1795
1796 return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
1797
1798
1799 class YoutubeUserIE(YoutubeChannelIE):
1800 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
1801 _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
1802 _TEMPLATE_URL = 'https://www.youtube.com/user/%s/videos'
1803 IE_NAME = 'youtube:user'
1804
1805 _TESTS = [{
1806 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
1807 'playlist_mincount': 320,
1808 'info_dict': {
1809 'title': 'TheLinuxFoundation',
1810 }
1811 }, {
1812 'url': 'ytuser:phihag',
1813 'only_matching': True,
1814 }]
1815
1816 @classmethod
1817 def suitable(cls, url):
1818 # Don't return True if the url can be extracted with other youtube
1819 # extractor, the regex would is too permissive and it would match.
1820 other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
1821 if any(ie.suitable(url) for ie in other_ies):
1822 return False
1823 else:
1824 return super(YoutubeUserIE, cls).suitable(url)
1825
1826
1827 class YoutubeUserPlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
1828 IE_DESC = 'YouTube.com user playlists'
1829 _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/user/(?P<id>[^/]+)/playlists'
1830 IE_NAME = 'youtube:user:playlists'
1831
1832 _TESTS = [{
1833 'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
1834 'playlist_mincount': 4,
1835 'info_dict': {
1836 'id': 'ThirstForScience',
1837 'title': 'Thirst for Science',
1838 },
1839 }, {
1840 # with "Load more" button
1841 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
1842 'playlist_mincount': 70,
1843 'info_dict': {
1844 'id': 'igorkle1',
1845 'title': 'Игорь Клейнер',
1846 },
1847 }]
1848
1849
1850 class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
1851 IE_DESC = 'YouTube.com searches'
1852 # there doesn't appear to be a real limit, for example if you search for
1853 # 'python' you get more than 8.000.000 results
1854 _MAX_RESULTS = float('inf')
1855 IE_NAME = 'youtube:search'
1856 _SEARCH_KEY = 'ytsearch'
1857 _EXTRA_QUERY_ARGS = {}
1858 _TESTS = []
1859
1860 def _get_n_results(self, query, n):
1861 """Get a specified number of results for a query"""
1862
1863 videos = []
1864 limit = n
1865
1866 for pagenum in itertools.count(1):
1867 url_query = {
1868 'search_query': query.encode('utf-8'),
1869 'page': pagenum,
1870 'spf': 'navigate',
1871 }
1872 url_query.update(self._EXTRA_QUERY_ARGS)
1873 result_url = 'https://www.youtube.com/results?' + compat_urllib_parse.urlencode(url_query)
1874 data = self._download_json(
1875 result_url, video_id='query "%s"' % query,
1876 note='Downloading page %s' % pagenum,
1877 errnote='Unable to download API page')
1878 html_content = data[1]['body']['content']
1879
1880 if 'class="search-message' in html_content:
1881 raise ExtractorError(
1882 '[youtube] No video results', expected=True)
1883
1884 new_videos = self._ids_to_results(orderedSet(re.findall(
1885 r'href="/watch\?v=(.{11})', html_content)))
1886 videos += new_videos
1887 if not new_videos or len(videos) > limit:
1888 break
1889
1890 if len(videos) > n:
1891 videos = videos[:n]
1892 return self.playlist_result(videos, query)
1893
1894
1895 class YoutubeSearchDateIE(YoutubeSearchIE):
1896 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
1897 _SEARCH_KEY = 'ytsearchdate'
1898 IE_DESC = 'YouTube.com searches, newest videos first'
1899 _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
1900
1901
1902 class YoutubeSearchURLIE(InfoExtractor):
1903 IE_DESC = 'YouTube.com search URLs'
1904 IE_NAME = 'youtube:search_url'
1905 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
1906 _TESTS = [{
1907 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
1908 'playlist_mincount': 5,
1909 'info_dict': {
1910 'title': 'youtube-dl test video',
1911 }
1912 }]
1913
1914 def _real_extract(self, url):
1915 mobj = re.match(self._VALID_URL, url)
1916 query = compat_urllib_parse_unquote_plus(mobj.group('query'))
1917
1918 webpage = self._download_webpage(url, query)
1919 result_code = self._search_regex(
1920 r'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage, 'result HTML')
1921
1922 part_codes = re.findall(
1923 r'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code)
1924 entries = []
1925 for part_code in part_codes:
1926 part_title = self._html_search_regex(
1927 [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
1928 part_url_snippet = self._html_search_regex(
1929 r'(?s)href="([^"]+)"', part_code, 'item URL')
1930 part_url = compat_urlparse.urljoin(
1931 'https://www.youtube.com/', part_url_snippet)
1932 entries.append({
1933 '_type': 'url',
1934 'url': part_url,
1935 'title': part_title,
1936 })
1937
1938 return {
1939 '_type': 'playlist',
1940 'entries': entries,
1941 'title': query,
1942 }
1943
1944
1945 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
1946 IE_DESC = 'YouTube.com (multi-season) shows'
1947 _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
1948 IE_NAME = 'youtube:show'
1949 _TESTS = [{
1950 'url': 'https://www.youtube.com/show/airdisasters',
1951 'playlist_mincount': 5,
1952 'info_dict': {
1953 'id': 'airdisasters',
1954 'title': 'Air Disasters',
1955 }
1956 }]
1957
1958 def _real_extract(self, url):
1959 playlist_id = self._match_id(url)
1960 return super(YoutubeShowIE, self)._real_extract(
1961 'https://www.youtube.com/show/%s/playlists' % playlist_id)
1962
1963
1964 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
1965 """
1966 Base class for feed extractors
1967 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
1968 """
1969 _LOGIN_REQUIRED = True
1970
1971 @property
1972 def IE_NAME(self):
1973 return 'youtube:%s' % self._FEED_NAME
1974
1975 def _real_initialize(self):
1976 self._login()
1977
1978 def _real_extract(self, url):
1979 page = self._download_webpage(
1980 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
1981
1982 # The extraction process is the same as for playlists, but the regex
1983 # for the video ids doesn't contain an index
1984 ids = []
1985 more_widget_html = content_html = page
1986 for page_num in itertools.count(1):
1987 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
1988
1989 # 'recommended' feed has infinite 'load more' and each new portion spins
1990 # the same videos in (sometimes) slightly different order, so we'll check
1991 # for unicity and break when portion has no new videos
1992 new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
1993 if not new_ids:
1994 break
1995
1996 ids.extend(new_ids)
1997
1998 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
1999 if not mobj:
2000 break
2001
2002 more = self._download_json(
2003 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
2004 'Downloading page #%s' % page_num,
2005 transform_source=uppercase_escape)
2006 content_html = more['content_html']
2007 more_widget_html = more['load_more_widget_html']
2008
2009 return self.playlist_result(
2010 self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
2011
2012
2013 class YoutubeWatchLaterIE(YoutubePlaylistIE):
2014 IE_NAME = 'youtube:watchlater'
2015 IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
2016 _VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|playlist\?list=WL)|:ytwatchlater'
2017
2018 _TESTS = [] # override PlaylistIE tests
2019
2020 def _real_extract(self, url):
2021 return self._extract_playlist('WL')
2022
2023
2024 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
2025 IE_NAME = 'youtube:favorites'
2026 IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
2027 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2028 _LOGIN_REQUIRED = True
2029
2030 def _real_extract(self, url):
2031 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2032 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
2033 return self.url_result(playlist_id, 'YoutubePlaylist')
2034
2035
2036 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
2037 IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2038 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2039 _FEED_NAME = 'recommended'
2040 _PLAYLIST_TITLE = 'Youtube Recommended videos'
2041
2042
2043 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
2044 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2045 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2046 _FEED_NAME = 'subscriptions'
2047 _PLAYLIST_TITLE = 'Youtube Subscriptions'
2048
2049
2050 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
2051 IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
2052 _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
2053 _FEED_NAME = 'history'
2054 _PLAYLIST_TITLE = 'Youtube History'
2055
2056
2057 class YoutubeTruncatedURLIE(InfoExtractor):
2058 IE_NAME = 'youtube:truncated_url'
2059 IE_DESC = False # Do not list
2060 _VALID_URL = r'''(?x)
2061 (?:https?://)?
2062 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2063 (?:watch\?(?:
2064 feature=[a-z_]+|
2065 annotation_id=annotation_[^&]+|
2066 x-yt-cl=[0-9]+|
2067 hl=[^&]*|
2068 t=[0-9]+
2069 )?
2070 |
2071 attribution_link\?a=[^&]+
2072 )
2073 $
2074 '''
2075
2076 _TESTS = [{
2077 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
2078 'only_matching': True,
2079 }, {
2080 'url': 'http://www.youtube.com/watch?',
2081 'only_matching': True,
2082 }, {
2083 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2084 'only_matching': True,
2085 }, {
2086 'url': 'https://www.youtube.com/watch?feature=foo',
2087 'only_matching': True,
2088 }, {
2089 'url': 'https://www.youtube.com/watch?hl=en-GB',
2090 'only_matching': True,
2091 }, {
2092 'url': 'https://www.youtube.com/watch?t=2372',
2093 'only_matching': True,
2094 }]
2095
2096 def _real_extract(self, url):
2097 raise ExtractorError(
2098 'Did you forget to quote the URL? Remember that & is a meta '
2099 'character in most shells, so you want to put the URL in quotes, '
2100 'like youtube-dl '
2101 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2102 ' or simply youtube-dl BaW_jenozKc .',
2103 expected=True)
2104
2105
2106 class YoutubeTruncatedIDIE(InfoExtractor):
2107 IE_NAME = 'youtube:truncated_id'
2108 IE_DESC = False # Do not list
2109 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2110
2111 _TESTS = [{
2112 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2113 'only_matching': True,
2114 }]
2115
2116 def _real_extract(self, url):
2117 video_id = self._match_id(url)
2118 raise ExtractorError(
2119 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
2120 expected=True)