]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/youtube.py
[brightcove:legacy] skip None value for uploader_id
[yt-dlp.git] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import random
10 import re
11 import time
12 import traceback
13
14 from .common import InfoExtractor, SearchInfoExtractor
15 from ..jsinterp import JSInterpreter
16 from ..swfinterp import SWFInterpreter
17 from ..compat import (
18 compat_chr,
19 compat_parse_qs,
20 compat_urllib_parse_unquote,
21 compat_urllib_parse_unquote_plus,
22 compat_urllib_parse_urlencode,
23 compat_urllib_parse_urlparse,
24 compat_urlparse,
25 compat_str,
26 )
27 from ..utils import (
28 clean_html,
29 error_to_compat_str,
30 ExtractorError,
31 float_or_none,
32 get_element_by_attribute,
33 get_element_by_id,
34 int_or_none,
35 mimetype2ext,
36 orderedSet,
37 parse_duration,
38 remove_quotes,
39 remove_start,
40 sanitized_Request,
41 smuggle_url,
42 str_to_int,
43 unescapeHTML,
44 unified_strdate,
45 unsmuggle_url,
46 uppercase_escape,
47 urlencode_postdata,
48 ISO3166Utils,
49 )
50
51
52 class YoutubeBaseInfoExtractor(InfoExtractor):
53 """Provide base functions for Youtube extractors"""
54 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
55 _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
56 _NETRC_MACHINE = 'youtube'
57 # If True it will raise an error if no login info is provided
58 _LOGIN_REQUIRED = False
59
60 def _set_language(self):
61 self._set_cookie(
62 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
63 # YouTube sets the expire time to about two months
64 expire_time=time.time() + 2 * 30 * 24 * 3600)
65
66 def _ids_to_results(self, ids):
67 return [
68 self.url_result(vid_id, 'Youtube', video_id=vid_id)
69 for vid_id in ids]
70
71 def _login(self):
72 """
73 Attempt to log in to YouTube.
74 True is returned if successful or skipped.
75 False is returned if login failed.
76
77 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
78 """
79 (username, password) = self._get_login_info()
80 # No authentication to be performed
81 if username is None:
82 if self._LOGIN_REQUIRED:
83 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
84 return True
85
86 login_page = self._download_webpage(
87 self._LOGIN_URL, None,
88 note='Downloading login page',
89 errnote='unable to fetch login page', fatal=False)
90 if login_page is False:
91 return
92
93 galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
94 login_page, 'Login GALX parameter')
95
96 # Log in
97 login_form_strs = {
98 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
99 'Email': username,
100 'GALX': galx,
101 'Passwd': password,
102
103 'PersistentCookie': 'yes',
104 '_utf8': '霱',
105 'bgresponse': 'js_disabled',
106 'checkConnection': '',
107 'checkedDomains': 'youtube',
108 'dnConn': '',
109 'pstMsg': '0',
110 'rmShown': '1',
111 'secTok': '',
112 'signIn': 'Sign in',
113 'timeStmp': '',
114 'service': 'youtube',
115 'uilel': '3',
116 'hl': 'en_US',
117 }
118
119 login_data = urlencode_postdata(login_form_strs)
120
121 req = sanitized_Request(self._LOGIN_URL, login_data)
122 login_results = self._download_webpage(
123 req, None,
124 note='Logging in', errnote='unable to log in', fatal=False)
125 if login_results is False:
126 return False
127
128 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
129 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
130
131 # Two-Factor
132 # TODO add SMS and phone call support - these require making a request and then prompting the user
133
134 if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
135 tfa_code = self._get_tfa_info('2-step verification code')
136
137 if not tfa_code:
138 self._downloader.report_warning(
139 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
140 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
141 return False
142
143 tfa_code = remove_start(tfa_code, 'G-')
144
145 tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
146
147 tfa_form_strs.update({
148 'Pin': tfa_code,
149 'TrustDevice': 'on',
150 })
151
152 tfa_data = urlencode_postdata(tfa_form_strs)
153
154 tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
155 tfa_results = self._download_webpage(
156 tfa_req, None,
157 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
158
159 if tfa_results is False:
160 return False
161
162 if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
163 self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
164 return False
165 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
166 self._downloader.report_warning('unable to log in - did the page structure change?')
167 return False
168 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
169 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
170 return False
171
172 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
173 self._downloader.report_warning('unable to log in: bad username or password')
174 return False
175 return True
176
177 def _real_initialize(self):
178 if self._downloader is None:
179 return
180 self._set_language()
181 if not self._login():
182 return
183
184
185 class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
186 # Extract entries from page with "Load more" button
187 def _entries(self, page, playlist_id):
188 more_widget_html = content_html = page
189 for page_num in itertools.count(1):
190 for entry in self._process_page(content_html):
191 yield entry
192
193 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
194 if not mobj:
195 break
196
197 more = self._download_json(
198 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
199 'Downloading page #%s' % page_num,
200 transform_source=uppercase_escape)
201 content_html = more['content_html']
202 if not content_html.strip():
203 # Some webpages show a "Load more" button but they don't
204 # have more videos
205 break
206 more_widget_html = more['load_more_widget_html']
207
208
209 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
210 def _process_page(self, content):
211 for video_id, video_title in self.extract_videos_from_page(content):
212 yield self.url_result(video_id, 'Youtube', video_id, video_title)
213
214 def extract_videos_from_page(self, page):
215 ids_in_page = []
216 titles_in_page = []
217 for mobj in re.finditer(self._VIDEO_RE, page):
218 # The link with index 0 is not the first video of the playlist (not sure if still actual)
219 if 'index' in mobj.groupdict() and mobj.group('id') == '0':
220 continue
221 video_id = mobj.group('id')
222 video_title = unescapeHTML(mobj.group('title'))
223 if video_title:
224 video_title = video_title.strip()
225 try:
226 idx = ids_in_page.index(video_id)
227 if video_title and not titles_in_page[idx]:
228 titles_in_page[idx] = video_title
229 except ValueError:
230 ids_in_page.append(video_id)
231 titles_in_page.append(video_title)
232 return zip(ids_in_page, titles_in_page)
233
234
235 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
236 def _process_page(self, content):
237 for playlist_id in orderedSet(re.findall(
238 r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
239 content)):
240 yield self.url_result(
241 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
242
243 def _real_extract(self, url):
244 playlist_id = self._match_id(url)
245 webpage = self._download_webpage(url, playlist_id)
246 title = self._og_search_title(webpage, fatal=False)
247 return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
248
249
250 class YoutubeIE(YoutubeBaseInfoExtractor):
251 IE_DESC = 'YouTube.com'
252 _VALID_URL = r"""(?x)^
253 (
254 (?:https?://|//) # http(s):// or protocol-independent URL
255 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
256 (?:www\.)?deturl\.com/www\.youtube\.com/|
257 (?:www\.)?pwnyoutube\.com/|
258 (?:www\.)?yourepeat\.com/|
259 tube\.majestyc\.net/|
260 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
261 (?:.*?\#/)? # handle anchor (#/) redirect urls
262 (?: # the various things that can precede the ID:
263 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
264 |(?: # or the v= param in all its forms
265 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
266 (?:\?|\#!?) # the params delimiter ? or # or #!
267 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
268 v=
269 )
270 ))
271 |(?:
272 youtu\.be| # just youtu.be/xxxx
273 vid\.plus # or vid.plus/xxxx
274 )/
275 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
276 )
277 )? # all until now is optional -> you can pass the naked ID
278 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
279 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
280 (?(1).+)? # if we found the ID, everything can follow
281 $"""
282 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
283 _formats = {
284 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
285 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
286 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
287 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
288 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
289 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
290 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
291 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
292 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
293 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
294 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
295 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
296 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
297 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
298 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
299 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
300 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
301 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
302
303
304 # 3D videos
305 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
306 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
307 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
308 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
309 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
310 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
311 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
312
313 # Apple HTTP Live Streaming
314 '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
315 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
316 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
317 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
318 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
319 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
320 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
321 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
322
323 # DASH mp4 video
324 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
325 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
326 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
327 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
328 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
329 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
330 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
331 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
332 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
333 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
334 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
335
336 # Dash mp4 audio
337 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
338 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
339 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
340
341 # Dash webm
342 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
343 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
344 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
345 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
346 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
347 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
348 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9', 'preference': -40},
349 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
350 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
351 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
352 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
353 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
354 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
355 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
356 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
357 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
358 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
359 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
360 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
361 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
362 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
363 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
364
365 # Dash webm audio
366 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
367 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
368
369 # Dash webm audio with opus inside
370 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
371 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
372 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
373
374 # RTMP (unnamed)
375 '_rtmp': {'protocol': 'rtmp'},
376 }
377 _SUBTITLE_FORMATS = ('ttml', 'vtt')
378
379 IE_NAME = 'youtube'
380 _TESTS = [
381 {
382 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
383 'info_dict': {
384 'id': 'BaW_jenozKc',
385 'ext': 'mp4',
386 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
387 'uploader': 'Philipp Hagemeister',
388 'uploader_id': 'phihag',
389 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
390 'upload_date': '20121002',
391 'license': 'Standard YouTube License',
392 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
393 'categories': ['Science & Technology'],
394 'tags': ['youtube-dl'],
395 'like_count': int,
396 'dislike_count': int,
397 'start_time': 1,
398 'end_time': 9,
399 }
400 },
401 {
402 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
403 'note': 'Test generic use_cipher_signature video (#897)',
404 'info_dict': {
405 'id': 'UxxajLWwzqY',
406 'ext': 'mp4',
407 'upload_date': '20120506',
408 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
409 'alt_title': 'I Love It (feat. Charli XCX)',
410 'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
411 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
412 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
413 'iconic ep', 'iconic', 'love', 'it'],
414 'uploader': 'Icona Pop',
415 'uploader_id': 'IconaPop',
416 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IconaPop',
417 'license': 'Standard YouTube License',
418 'creator': 'Icona Pop',
419 }
420 },
421 {
422 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
423 'note': 'Test VEVO video with age protection (#956)',
424 'info_dict': {
425 'id': '07FYdnEawAQ',
426 'ext': 'mp4',
427 'upload_date': '20130703',
428 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
429 'alt_title': 'Tunnel Vision',
430 'description': 'md5:64249768eec3bc4276236606ea996373',
431 'uploader': 'justintimberlakeVEVO',
432 'uploader_id': 'justintimberlakeVEVO',
433 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
434 'license': 'Standard YouTube License',
435 'creator': 'Justin Timberlake',
436 'age_limit': 18,
437 }
438 },
439 {
440 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
441 'note': 'Embed-only video (#1746)',
442 'info_dict': {
443 'id': 'yZIXLfi8CZQ',
444 'ext': 'mp4',
445 'upload_date': '20120608',
446 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
447 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
448 'uploader': 'SET India',
449 'uploader_id': 'setindia',
450 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/setindia',
451 'license': 'Standard YouTube License',
452 'age_limit': 18,
453 }
454 },
455 {
456 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
457 'note': 'Use the first video ID in the URL',
458 'info_dict': {
459 'id': 'BaW_jenozKc',
460 'ext': 'mp4',
461 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
462 'uploader': 'Philipp Hagemeister',
463 'uploader_id': 'phihag',
464 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
465 'upload_date': '20121002',
466 'license': 'Standard YouTube License',
467 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
468 'categories': ['Science & Technology'],
469 'tags': ['youtube-dl'],
470 'like_count': int,
471 'dislike_count': int,
472 },
473 'params': {
474 'skip_download': True,
475 },
476 },
477 {
478 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
479 'note': '256k DASH audio (format 141) via DASH manifest',
480 'info_dict': {
481 'id': 'a9LDPn-MO4I',
482 'ext': 'm4a',
483 'upload_date': '20121002',
484 'uploader_id': '8KVIDEO',
485 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
486 'description': '',
487 'uploader': '8KVIDEO',
488 'license': 'Standard YouTube License',
489 'title': 'UHDTV TEST 8K VIDEO.mp4'
490 },
491 'params': {
492 'youtube_include_dash_manifest': True,
493 'format': '141',
494 },
495 },
496 # DASH manifest with encrypted signature
497 {
498 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
499 'info_dict': {
500 'id': 'IB3lcPjvWLA',
501 'ext': 'm4a',
502 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
503 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
504 'uploader': 'AfrojackVEVO',
505 'uploader_id': 'AfrojackVEVO',
506 'upload_date': '20131011',
507 'license': 'Standard YouTube License',
508 },
509 'params': {
510 'youtube_include_dash_manifest': True,
511 'format': '141',
512 },
513 },
514 # JS player signature function name containing $
515 {
516 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
517 'info_dict': {
518 'id': 'nfWlot6h_JM',
519 'ext': 'm4a',
520 'title': 'Taylor Swift - Shake It Off',
521 'alt_title': 'Shake It Off',
522 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
523 'uploader': 'TaylorSwiftVEVO',
524 'uploader_id': 'TaylorSwiftVEVO',
525 'upload_date': '20140818',
526 'license': 'Standard YouTube License',
527 'creator': 'Taylor Swift',
528 },
529 'params': {
530 'youtube_include_dash_manifest': True,
531 'format': '141',
532 },
533 },
534 # Controversy video
535 {
536 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
537 'info_dict': {
538 'id': 'T4XJQO3qol8',
539 'ext': 'mp4',
540 'upload_date': '20100909',
541 'uploader': 'The Amazing Atheist',
542 'uploader_id': 'TheAmazingAtheist',
543 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
544 'license': 'Standard YouTube License',
545 'title': 'Burning Everyone\'s Koran',
546 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
547 }
548 },
549 # Normal age-gate video (No vevo, embed allowed)
550 {
551 'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
552 'info_dict': {
553 'id': 'HtVdAasjOgU',
554 'ext': 'mp4',
555 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
556 'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
557 'uploader': 'The Witcher',
558 'uploader_id': 'WitcherGame',
559 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
560 'upload_date': '20140605',
561 'license': 'Standard YouTube License',
562 'age_limit': 18,
563 },
564 },
565 # Age-gate video with encrypted signature
566 {
567 'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
568 'info_dict': {
569 'id': '6kLq3WMV1nU',
570 'ext': 'mp4',
571 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
572 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
573 'uploader': 'LloydVEVO',
574 'uploader_id': 'LloydVEVO',
575 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
576 'upload_date': '20110629',
577 'license': 'Standard YouTube License',
578 'age_limit': 18,
579 },
580 },
581 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
582 {
583 'url': '__2ABJjxzNo',
584 'info_dict': {
585 'id': '__2ABJjxzNo',
586 'ext': 'mp4',
587 'upload_date': '20100430',
588 'uploader_id': 'deadmau5',
589 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/deadmau5',
590 'creator': 'deadmau5',
591 'description': 'md5:12c56784b8032162bb936a5f76d55360',
592 'uploader': 'deadmau5',
593 'license': 'Standard YouTube License',
594 'title': 'Deadmau5 - Some Chords (HD)',
595 'alt_title': 'Some Chords',
596 },
597 'expected_warnings': [
598 'DASH manifest missing',
599 ]
600 },
601 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
602 {
603 'url': 'lqQg6PlCWgI',
604 'info_dict': {
605 'id': 'lqQg6PlCWgI',
606 'ext': 'mp4',
607 'upload_date': '20150827',
608 'uploader_id': 'olympic',
609 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/olympic',
610 'license': 'Standard YouTube License',
611 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
612 'uploader': 'Olympics',
613 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
614 },
615 'params': {
616 'skip_download': 'requires avconv',
617 }
618 },
619 # Non-square pixels
620 {
621 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
622 'info_dict': {
623 'id': '_b-2C3KPAM0',
624 'ext': 'mp4',
625 'stretched_ratio': 16 / 9.,
626 'upload_date': '20110310',
627 'uploader_id': 'AllenMeow',
628 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
629 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
630 'uploader': '孫艾倫',
631 'license': 'Standard YouTube License',
632 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
633 },
634 },
635 # url_encoded_fmt_stream_map is empty string
636 {
637 'url': 'qEJwOuvDf7I',
638 'info_dict': {
639 'id': 'qEJwOuvDf7I',
640 'ext': 'webm',
641 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
642 'description': '',
643 'upload_date': '20150404',
644 'uploader_id': 'spbelect',
645 'uploader': 'Наблюдатели Петербурга',
646 },
647 'params': {
648 'skip_download': 'requires avconv',
649 },
650 'skip': 'This live event has ended.',
651 },
652 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
653 {
654 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
655 'info_dict': {
656 'id': 'FIl7x6_3R5Y',
657 'ext': 'mp4',
658 'title': 'md5:7b81415841e02ecd4313668cde88737a',
659 'description': 'md5:116377fd2963b81ec4ce64b542173306',
660 'upload_date': '20150625',
661 'uploader_id': 'dorappi2000',
662 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
663 'uploader': 'dorappi2000',
664 'license': 'Standard YouTube License',
665 'formats': 'mincount:33',
666 },
667 },
668 # DASH manifest with segment_list
669 {
670 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
671 'md5': '8ce563a1d667b599d21064e982ab9e31',
672 'info_dict': {
673 'id': 'CsmdDsKjzN8',
674 'ext': 'mp4',
675 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
676 'uploader': 'Airtek',
677 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
678 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
679 'license': 'Standard YouTube License',
680 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
681 },
682 'params': {
683 'youtube_include_dash_manifest': True,
684 'format': '135', # bestvideo
685 }
686 },
687 {
688 # Multifeed videos (multiple cameras), URL is for Main Camera
689 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
690 'info_dict': {
691 'id': 'jqWvoWXjCVs',
692 'title': 'teamPGP: Rocket League Noob Stream',
693 'description': 'md5:dc7872fb300e143831327f1bae3af010',
694 },
695 'playlist': [{
696 'info_dict': {
697 'id': 'jqWvoWXjCVs',
698 'ext': 'mp4',
699 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
700 'description': 'md5:dc7872fb300e143831327f1bae3af010',
701 'upload_date': '20150721',
702 'uploader': 'Beer Games Beer',
703 'uploader_id': 'beergamesbeer',
704 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
705 'license': 'Standard YouTube License',
706 },
707 }, {
708 'info_dict': {
709 'id': '6h8e8xoXJzg',
710 'ext': 'mp4',
711 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
712 'description': 'md5:dc7872fb300e143831327f1bae3af010',
713 'upload_date': '20150721',
714 'uploader': 'Beer Games Beer',
715 'uploader_id': 'beergamesbeer',
716 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
717 'license': 'Standard YouTube License',
718 },
719 }, {
720 'info_dict': {
721 'id': 'PUOgX5z9xZw',
722 'ext': 'mp4',
723 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
724 'description': 'md5:dc7872fb300e143831327f1bae3af010',
725 'upload_date': '20150721',
726 'uploader': 'Beer Games Beer',
727 'uploader_id': 'beergamesbeer',
728 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
729 'license': 'Standard YouTube License',
730 },
731 }, {
732 'info_dict': {
733 'id': 'teuwxikvS5k',
734 'ext': 'mp4',
735 'title': 'teamPGP: Rocket League Noob Stream (zim)',
736 'description': 'md5:dc7872fb300e143831327f1bae3af010',
737 'upload_date': '20150721',
738 'uploader': 'Beer Games Beer',
739 'uploader_id': 'beergamesbeer',
740 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
741 'license': 'Standard YouTube License',
742 },
743 }],
744 'params': {
745 'skip_download': True,
746 },
747 },
748 {
749 # Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
750 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
751 'info_dict': {
752 'id': 'gVfLd0zydlo',
753 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
754 },
755 'playlist_count': 2,
756 },
757 {
758 'url': 'http://vid.plus/FlRa-iH7PGw',
759 'only_matching': True,
760 },
761 {
762 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
763 # Also tests cut-off URL expansion in video description (see
764 # https://github.com/rg3/youtube-dl/issues/1892,
765 # https://github.com/rg3/youtube-dl/issues/8164)
766 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
767 'info_dict': {
768 'id': 'lsguqyKfVQg',
769 'ext': 'mp4',
770 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
771 'alt_title': 'Dark Walk',
772 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
773 'upload_date': '20151119',
774 'uploader_id': 'IronSoulElf',
775 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
776 'uploader': 'IronSoulElf',
777 'license': 'Standard YouTube License',
778 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
779 },
780 'params': {
781 'skip_download': True,
782 },
783 },
784 {
785 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
786 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
787 'only_matching': True,
788 },
789 {
790 # Video with yt:stretch=17:0
791 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
792 'info_dict': {
793 'id': 'Q39EVAstoRM',
794 'ext': 'mp4',
795 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
796 'description': 'md5:ee18a25c350637c8faff806845bddee9',
797 'upload_date': '20151107',
798 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
799 'uploader': 'CH GAMER DROID',
800 },
801 'params': {
802 'skip_download': True,
803 },
804 },
805 {
806 # Video licensed under Creative Commons
807 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
808 'info_dict': {
809 'id': 'M4gD1WSo5mA',
810 'ext': 'mp4',
811 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
812 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
813 'upload_date': '20150127',
814 'uploader_id': 'BerkmanCenter',
815 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
816 'uploader': 'BerkmanCenter',
817 'license': 'Creative Commons Attribution license (reuse allowed)',
818 },
819 'params': {
820 'skip_download': True,
821 },
822 },
823 {
824 # Channel-like uploader_url
825 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
826 'info_dict': {
827 'id': 'eQcmzGIKrzg',
828 'ext': 'mp4',
829 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
830 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
831 'upload_date': '20151119',
832 'uploader': 'Bernie 2016',
833 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
834 'uploader_url': 're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
835 'license': 'Creative Commons Attribution license (reuse allowed)',
836 },
837 'params': {
838 'skip_download': True,
839 },
840 },
841 {
842 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
843 'only_matching': True,
844 }
845 ]
846
847 def __init__(self, *args, **kwargs):
848 super(YoutubeIE, self).__init__(*args, **kwargs)
849 self._player_cache = {}
850
851 def report_video_info_webpage_download(self, video_id):
852 """Report attempt to download video info webpage."""
853 self.to_screen('%s: Downloading video info webpage' % video_id)
854
855 def report_information_extraction(self, video_id):
856 """Report attempt to extract video information."""
857 self.to_screen('%s: Extracting video information' % video_id)
858
859 def report_unavailable_format(self, video_id, format):
860 """Report extracted video URL."""
861 self.to_screen('%s: Format %s not available' % (video_id, format))
862
863 def report_rtmp_download(self):
864 """Indicate the download will use the RTMP protocol."""
865 self.to_screen('RTMP download detected')
866
867 def _signature_cache_id(self, example_sig):
868 """ Return a string representation of a signature """
869 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
870
871 def _extract_signature_function(self, video_id, player_url, example_sig):
872 id_m = re.match(
873 r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
874 player_url)
875 if not id_m:
876 raise ExtractorError('Cannot identify player %r' % player_url)
877 player_type = id_m.group('ext')
878 player_id = id_m.group('id')
879
880 # Read from filesystem cache
881 func_id = '%s_%s_%s' % (
882 player_type, player_id, self._signature_cache_id(example_sig))
883 assert os.path.basename(func_id) == func_id
884
885 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
886 if cache_spec is not None:
887 return lambda s: ''.join(s[i] for i in cache_spec)
888
889 download_note = (
890 'Downloading player %s' % player_url
891 if self._downloader.params.get('verbose') else
892 'Downloading %s player %s' % (player_type, player_id)
893 )
894 if player_type == 'js':
895 code = self._download_webpage(
896 player_url, video_id,
897 note=download_note,
898 errnote='Download of %s failed' % player_url)
899 res = self._parse_sig_js(code)
900 elif player_type == 'swf':
901 urlh = self._request_webpage(
902 player_url, video_id,
903 note=download_note,
904 errnote='Download of %s failed' % player_url)
905 code = urlh.read()
906 res = self._parse_sig_swf(code)
907 else:
908 assert False, 'Invalid player type %r' % player_type
909
910 test_string = ''.join(map(compat_chr, range(len(example_sig))))
911 cache_res = res(test_string)
912 cache_spec = [ord(c) for c in cache_res]
913
914 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
915 return res
916
917 def _print_sig_code(self, func, example_sig):
918 def gen_sig_code(idxs):
919 def _genslice(start, end, step):
920 starts = '' if start == 0 else str(start)
921 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
922 steps = '' if step == 1 else (':%d' % step)
923 return 's[%s%s%s]' % (starts, ends, steps)
924
925 step = None
926 # Quelch pyflakes warnings - start will be set when step is set
927 start = '(Never used)'
928 for i, prev in zip(idxs[1:], idxs[:-1]):
929 if step is not None:
930 if i - prev == step:
931 continue
932 yield _genslice(start, prev, step)
933 step = None
934 continue
935 if i - prev in [-1, 1]:
936 step = i - prev
937 start = prev
938 continue
939 else:
940 yield 's[%d]' % prev
941 if step is None:
942 yield 's[%d]' % i
943 else:
944 yield _genslice(start, i, step)
945
946 test_string = ''.join(map(compat_chr, range(len(example_sig))))
947 cache_res = func(test_string)
948 cache_spec = [ord(c) for c in cache_res]
949 expr_code = ' + '.join(gen_sig_code(cache_spec))
950 signature_id_tuple = '(%s)' % (
951 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
952 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
953 ' return %s\n') % (signature_id_tuple, expr_code)
954 self.to_screen('Extracted signature function:\n' + code)
955
956 def _parse_sig_js(self, jscode):
957 funcname = self._search_regex(
958 r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
959 'Initial JS player signature function name')
960
961 jsi = JSInterpreter(jscode)
962 initial_function = jsi.extract_function(funcname)
963 return lambda s: initial_function([s])
964
965 def _parse_sig_swf(self, file_contents):
966 swfi = SWFInterpreter(file_contents)
967 TARGET_CLASSNAME = 'SignatureDecipher'
968 searched_class = swfi.extract_class(TARGET_CLASSNAME)
969 initial_function = swfi.extract_function(searched_class, 'decipher')
970 return lambda s: initial_function([s])
971
972 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
973 """Turn the encrypted s field into a working signature"""
974
975 if player_url is None:
976 raise ExtractorError('Cannot decrypt signature without player_url')
977
978 if player_url.startswith('//'):
979 player_url = 'https:' + player_url
980 try:
981 player_id = (player_url, self._signature_cache_id(s))
982 if player_id not in self._player_cache:
983 func = self._extract_signature_function(
984 video_id, player_url, s
985 )
986 self._player_cache[player_id] = func
987 func = self._player_cache[player_id]
988 if self._downloader.params.get('youtube_print_sig_code'):
989 self._print_sig_code(func, s)
990 return func(s)
991 except Exception as e:
992 tb = traceback.format_exc()
993 raise ExtractorError(
994 'Signature extraction failed: ' + tb, cause=e)
995
996 def _get_subtitles(self, video_id, webpage):
997 try:
998 subs_doc = self._download_xml(
999 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
1000 video_id, note=False)
1001 except ExtractorError as err:
1002 self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
1003 return {}
1004
1005 sub_lang_list = {}
1006 for track in subs_doc.findall('track'):
1007 lang = track.attrib['lang_code']
1008 if lang in sub_lang_list:
1009 continue
1010 sub_formats = []
1011 for ext in self._SUBTITLE_FORMATS:
1012 params = compat_urllib_parse_urlencode({
1013 'lang': lang,
1014 'v': video_id,
1015 'fmt': ext,
1016 'name': track.attrib['name'].encode('utf-8'),
1017 })
1018 sub_formats.append({
1019 'url': 'https://www.youtube.com/api/timedtext?' + params,
1020 'ext': ext,
1021 })
1022 sub_lang_list[lang] = sub_formats
1023 if not sub_lang_list:
1024 self._downloader.report_warning('video doesn\'t have subtitles')
1025 return {}
1026 return sub_lang_list
1027
1028 def _get_ytplayer_config(self, video_id, webpage):
1029 patterns = (
1030 # User data may contain arbitrary character sequences that may affect
1031 # JSON extraction with regex, e.g. when '};' is contained the second
1032 # regex won't capture the whole JSON. Yet working around by trying more
1033 # concrete regex first keeping in mind proper quoted string handling
1034 # to be implemented in future that will replace this workaround (see
1035 # https://github.com/rg3/youtube-dl/issues/7468,
1036 # https://github.com/rg3/youtube-dl/pull/7599)
1037 r';ytplayer\.config\s*=\s*({.+?});ytplayer',
1038 r';ytplayer\.config\s*=\s*({.+?});',
1039 )
1040 config = self._search_regex(
1041 patterns, webpage, 'ytplayer.config', default=None)
1042 if config:
1043 return self._parse_json(
1044 uppercase_escape(config), video_id, fatal=False)
1045
1046 def _get_automatic_captions(self, video_id, webpage):
1047 """We need the webpage for getting the captions url, pass it as an
1048 argument to speed up the process."""
1049 self.to_screen('%s: Looking for automatic captions' % video_id)
1050 player_config = self._get_ytplayer_config(video_id, webpage)
1051 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
1052 if not player_config:
1053 self._downloader.report_warning(err_msg)
1054 return {}
1055 try:
1056 args = player_config['args']
1057 caption_url = args.get('ttsurl')
1058 if caption_url:
1059 timestamp = args['timestamp']
1060 # We get the available subtitles
1061 list_params = compat_urllib_parse_urlencode({
1062 'type': 'list',
1063 'tlangs': 1,
1064 'asrs': 1,
1065 })
1066 list_url = caption_url + '&' + list_params
1067 caption_list = self._download_xml(list_url, video_id)
1068 original_lang_node = caption_list.find('track')
1069 if original_lang_node is None:
1070 self._downloader.report_warning('Video doesn\'t have automatic captions')
1071 return {}
1072 original_lang = original_lang_node.attrib['lang_code']
1073 caption_kind = original_lang_node.attrib.get('kind', '')
1074
1075 sub_lang_list = {}
1076 for lang_node in caption_list.findall('target'):
1077 sub_lang = lang_node.attrib['lang_code']
1078 sub_formats = []
1079 for ext in self._SUBTITLE_FORMATS:
1080 params = compat_urllib_parse_urlencode({
1081 'lang': original_lang,
1082 'tlang': sub_lang,
1083 'fmt': ext,
1084 'ts': timestamp,
1085 'kind': caption_kind,
1086 })
1087 sub_formats.append({
1088 'url': caption_url + '&' + params,
1089 'ext': ext,
1090 })
1091 sub_lang_list[sub_lang] = sub_formats
1092 return sub_lang_list
1093
1094 # Some videos don't provide ttsurl but rather caption_tracks and
1095 # caption_translation_languages (e.g. 20LmZk1hakA)
1096 caption_tracks = args['caption_tracks']
1097 caption_translation_languages = args['caption_translation_languages']
1098 caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
1099 parsed_caption_url = compat_urllib_parse_urlparse(caption_url)
1100 caption_qs = compat_parse_qs(parsed_caption_url.query)
1101
1102 sub_lang_list = {}
1103 for lang in caption_translation_languages.split(','):
1104 lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
1105 sub_lang = lang_qs.get('lc', [None])[0]
1106 if not sub_lang:
1107 continue
1108 sub_formats = []
1109 for ext in self._SUBTITLE_FORMATS:
1110 caption_qs.update({
1111 'tlang': [sub_lang],
1112 'fmt': [ext],
1113 })
1114 sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
1115 query=compat_urllib_parse_urlencode(caption_qs, True)))
1116 sub_formats.append({
1117 'url': sub_url,
1118 'ext': ext,
1119 })
1120 sub_lang_list[sub_lang] = sub_formats
1121 return sub_lang_list
1122 # An extractor error can be raise by the download process if there are
1123 # no automatic captions but there are subtitles
1124 except (KeyError, ExtractorError):
1125 self._downloader.report_warning(err_msg)
1126 return {}
1127
1128 def _mark_watched(self, video_id, video_info):
1129 playback_url = video_info.get('videostats_playback_base_url', [None])[0]
1130 if not playback_url:
1131 return
1132 parsed_playback_url = compat_urlparse.urlparse(playback_url)
1133 qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1134
1135 # cpn generation algorithm is reverse engineered from base.js.
1136 # In fact it works even with dummy cpn.
1137 CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1138 cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1139
1140 qs.update({
1141 'ver': ['2'],
1142 'cpn': [cpn],
1143 })
1144 playback_url = compat_urlparse.urlunparse(
1145 parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1146
1147 self._download_webpage(
1148 playback_url, video_id, 'Marking watched',
1149 'Unable to mark watched', fatal=False)
1150
1151 @classmethod
1152 def extract_id(cls, url):
1153 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1154 if mobj is None:
1155 raise ExtractorError('Invalid URL: %s' % url)
1156 video_id = mobj.group(2)
1157 return video_id
1158
1159 def _extract_from_m3u8(self, manifest_url, video_id):
1160 url_map = {}
1161
1162 def _get_urls(_manifest):
1163 lines = _manifest.split('\n')
1164 urls = filter(lambda l: l and not l.startswith('#'),
1165 lines)
1166 return urls
1167 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
1168 formats_urls = _get_urls(manifest)
1169 for format_url in formats_urls:
1170 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
1171 url_map[itag] = format_url
1172 return url_map
1173
1174 def _extract_annotations(self, video_id):
1175 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
1176 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
1177
1178 def _real_extract(self, url):
1179 url, smuggled_data = unsmuggle_url(url, {})
1180
1181 proto = (
1182 'http' if self._downloader.params.get('prefer_insecure', False)
1183 else 'https')
1184
1185 start_time = None
1186 end_time = None
1187 parsed_url = compat_urllib_parse_urlparse(url)
1188 for component in [parsed_url.fragment, parsed_url.query]:
1189 query = compat_parse_qs(component)
1190 if start_time is None and 't' in query:
1191 start_time = parse_duration(query['t'][0])
1192 if start_time is None and 'start' in query:
1193 start_time = parse_duration(query['start'][0])
1194 if end_time is None and 'end' in query:
1195 end_time = parse_duration(query['end'][0])
1196
1197 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1198 mobj = re.search(self._NEXT_URL_RE, url)
1199 if mobj:
1200 url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1201 video_id = self.extract_id(url)
1202
1203 # Get video webpage
1204 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1205 video_webpage = self._download_webpage(url, video_id)
1206
1207 # Attempt to extract SWF player URL
1208 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1209 if mobj is not None:
1210 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1211 else:
1212 player_url = None
1213
1214 dash_mpds = []
1215
1216 def add_dash_mpd(video_info):
1217 dash_mpd = video_info.get('dashmpd')
1218 if dash_mpd and dash_mpd[0] not in dash_mpds:
1219 dash_mpds.append(dash_mpd[0])
1220
1221 # Get video info
1222 embed_webpage = None
1223 is_live = None
1224 if re.search(r'player-age-gate-content">', video_webpage) is not None:
1225 age_gate = True
1226 # We simulate the access to the video from www.youtube.com/v/{video_id}
1227 # this can be viewed without login into Youtube
1228 url = proto + '://www.youtube.com/embed/%s' % video_id
1229 embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1230 data = compat_urllib_parse_urlencode({
1231 'video_id': video_id,
1232 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1233 'sts': self._search_regex(
1234 r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1235 })
1236 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1237 video_info_webpage = self._download_webpage(
1238 video_info_url, video_id,
1239 note='Refetching age-gated info webpage',
1240 errnote='unable to download video info webpage')
1241 video_info = compat_parse_qs(video_info_webpage)
1242 add_dash_mpd(video_info)
1243 else:
1244 age_gate = False
1245 video_info = None
1246 # Try looking directly into the video webpage
1247 ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1248 if ytplayer_config:
1249 args = ytplayer_config['args']
1250 if args.get('url_encoded_fmt_stream_map'):
1251 # Convert to the same format returned by compat_parse_qs
1252 video_info = dict((k, [v]) for k, v in args.items())
1253 add_dash_mpd(video_info)
1254 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1255 is_live = True
1256 if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1257 # We also try looking in get_video_info since it may contain different dashmpd
1258 # URL that points to a DASH manifest with possibly different itag set (some itags
1259 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1260 # manifest pointed by get_video_info's dashmpd).
1261 # The general idea is to take a union of itags of both DASH manifests (for example
1262 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1263 self.report_video_info_webpage_download(video_id)
1264 for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1265 video_info_url = (
1266 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1267 % (proto, video_id, el_type))
1268 video_info_webpage = self._download_webpage(
1269 video_info_url,
1270 video_id, note=False,
1271 errnote='unable to download video info webpage')
1272 get_video_info = compat_parse_qs(video_info_webpage)
1273 if get_video_info.get('use_cipher_signature') != ['True']:
1274 add_dash_mpd(get_video_info)
1275 if not video_info:
1276 video_info = get_video_info
1277 if 'token' in get_video_info:
1278 # Different get_video_info requests may report different results, e.g.
1279 # some may report video unavailability, but some may serve it without
1280 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1281 # the original webpage as well as el=info and el=embedded get_video_info
1282 # requests report video unavailability due to geo restriction while
1283 # el=detailpage succeeds and returns valid data). This is probably
1284 # due to YouTube measures against IP ranges of hosting providers.
1285 # Working around by preferring the first succeeded video_info containing
1286 # the token if no such video_info yet was found.
1287 if 'token' not in video_info:
1288 video_info = get_video_info
1289 break
1290 if 'token' not in video_info:
1291 if 'reason' in video_info:
1292 if 'The uploader has not made this video available in your country.' in video_info['reason']:
1293 regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
1294 if regions_allowed:
1295 raise ExtractorError('YouTube said: This video is available in %s only' % (
1296 ', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
1297 expected=True)
1298 raise ExtractorError(
1299 'YouTube said: %s' % video_info['reason'][0],
1300 expected=True, video_id=video_id)
1301 else:
1302 raise ExtractorError(
1303 '"token" parameter not in video info for unknown reason',
1304 video_id=video_id)
1305
1306 # title
1307 if 'title' in video_info:
1308 video_title = video_info['title'][0]
1309 else:
1310 self._downloader.report_warning('Unable to extract video title')
1311 video_title = '_'
1312
1313 # description
1314 video_description = get_element_by_id("eow-description", video_webpage)
1315 if video_description:
1316 video_description = re.sub(r'''(?x)
1317 <a\s+
1318 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1319 (?:title|href)="([^"]+)"\s+
1320 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1321 class="(?:yt-uix-redirect-link|yt-uix-sessionlink[^"]*)"[^>]*>
1322 [^<]+\.{3}\s*
1323 </a>
1324 ''', r'\1', video_description)
1325 video_description = clean_html(video_description)
1326 else:
1327 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
1328 if fd_mobj:
1329 video_description = unescapeHTML(fd_mobj.group(1))
1330 else:
1331 video_description = ''
1332
1333 if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
1334 if not self._downloader.params.get('noplaylist'):
1335 entries = []
1336 feed_ids = []
1337 multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
1338 for feed in multifeed_metadata_list.split(','):
1339 # Unquote should take place before split on comma (,) since textual
1340 # fields may contain comma as well (see
1341 # https://github.com/rg3/youtube-dl/issues/8536)
1342 feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
1343 entries.append({
1344 '_type': 'url_transparent',
1345 'ie_key': 'Youtube',
1346 'url': smuggle_url(
1347 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1348 {'force_singlefeed': True}),
1349 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1350 })
1351 feed_ids.append(feed_data['id'][0])
1352 self.to_screen(
1353 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1354 % (', '.join(feed_ids), video_id))
1355 return self.playlist_result(entries, video_id, video_title, video_description)
1356 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1357
1358 if 'view_count' in video_info:
1359 view_count = int(video_info['view_count'][0])
1360 else:
1361 view_count = None
1362
1363 # Check for "rental" videos
1364 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1365 raise ExtractorError('"rental" videos not supported')
1366
1367 # Start extracting information
1368 self.report_information_extraction(video_id)
1369
1370 # uploader
1371 if 'author' not in video_info:
1372 raise ExtractorError('Unable to extract uploader name')
1373 video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
1374
1375 # uploader_id
1376 video_uploader_id = None
1377 video_uploader_url = None
1378 mobj = re.search(
1379 r'<link itemprop="url" href="(?P<uploader_url>https?://www.youtube.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
1380 video_webpage)
1381 if mobj is not None:
1382 video_uploader_id = mobj.group('uploader_id')
1383 video_uploader_url = mobj.group('uploader_url')
1384 else:
1385 self._downloader.report_warning('unable to extract uploader nickname')
1386
1387 # thumbnail image
1388 # We try first to get a high quality image:
1389 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
1390 video_webpage, re.DOTALL)
1391 if m_thumb is not None:
1392 video_thumbnail = m_thumb.group(1)
1393 elif 'thumbnail_url' not in video_info:
1394 self._downloader.report_warning('unable to extract video thumbnail')
1395 video_thumbnail = None
1396 else: # don't panic if we can't find it
1397 video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
1398
1399 # upload date
1400 upload_date = self._html_search_meta(
1401 'datePublished', video_webpage, 'upload date', default=None)
1402 if not upload_date:
1403 upload_date = self._search_regex(
1404 [r'(?s)id="eow-date.*?>(.*?)</span>',
1405 r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
1406 video_webpage, 'upload date', default=None)
1407 if upload_date:
1408 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1409 upload_date = unified_strdate(upload_date)
1410
1411 video_license = self._html_search_regex(
1412 r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
1413 video_webpage, 'license', default=None)
1414
1415 m_music = re.search(
1416 r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li',
1417 video_webpage)
1418 if m_music:
1419 video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
1420 video_creator = clean_html(m_music.group('creator'))
1421 else:
1422 video_alt_title = video_creator = None
1423
1424 m_cat_container = self._search_regex(
1425 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
1426 video_webpage, 'categories', default=None)
1427 if m_cat_container:
1428 category = self._html_search_regex(
1429 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
1430 default=None)
1431 video_categories = None if category is None else [category]
1432 else:
1433 video_categories = None
1434
1435 video_tags = [
1436 unescapeHTML(m.group('content'))
1437 for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
1438
1439 def _extract_count(count_name):
1440 return str_to_int(self._search_regex(
1441 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
1442 % re.escape(count_name),
1443 video_webpage, count_name, default=None))
1444
1445 like_count = _extract_count('like')
1446 dislike_count = _extract_count('dislike')
1447
1448 # subtitles
1449 video_subtitles = self.extract_subtitles(video_id, video_webpage)
1450 automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
1451
1452 if 'length_seconds' not in video_info:
1453 self._downloader.report_warning('unable to extract video duration')
1454 video_duration = None
1455 else:
1456 video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
1457
1458 # annotations
1459 video_annotations = None
1460 if self._downloader.params.get('writeannotations', False):
1461 video_annotations = self._extract_annotations(video_id)
1462
1463 def _map_to_format_list(urlmap):
1464 formats = []
1465 for itag, video_real_url in urlmap.items():
1466 dct = {
1467 'format_id': itag,
1468 'url': video_real_url,
1469 'player_url': player_url,
1470 }
1471 if itag in self._formats:
1472 dct.update(self._formats[itag])
1473 formats.append(dct)
1474 return formats
1475
1476 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1477 self.report_rtmp_download()
1478 formats = [{
1479 'format_id': '_rtmp',
1480 'protocol': 'rtmp',
1481 'url': video_info['conn'][0],
1482 'player_url': player_url,
1483 }]
1484 elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
1485 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1486 if 'rtmpe%3Dyes' in encoded_url_map:
1487 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
1488 formats_spec = {}
1489 fmt_list = video_info.get('fmt_list', [''])[0]
1490 if fmt_list:
1491 for fmt in fmt_list.split(','):
1492 spec = fmt.split('/')
1493 if len(spec) > 1:
1494 width_height = spec[1].split('x')
1495 if len(width_height) == 2:
1496 formats_spec[spec[0]] = {
1497 'resolution': spec[1],
1498 'width': int_or_none(width_height[0]),
1499 'height': int_or_none(width_height[1]),
1500 }
1501 formats = []
1502 for url_data_str in encoded_url_map.split(','):
1503 url_data = compat_parse_qs(url_data_str)
1504 if 'itag' not in url_data or 'url' not in url_data:
1505 continue
1506 format_id = url_data['itag'][0]
1507 url = url_data['url'][0]
1508
1509 if 'sig' in url_data:
1510 url += '&signature=' + url_data['sig'][0]
1511 elif 's' in url_data:
1512 encrypted_sig = url_data['s'][0]
1513 ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
1514
1515 jsplayer_url_json = self._search_regex(
1516 ASSETS_RE,
1517 embed_webpage if age_gate else video_webpage,
1518 'JS player URL (1)', default=None)
1519 if not jsplayer_url_json and not age_gate:
1520 # We need the embed website after all
1521 if embed_webpage is None:
1522 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
1523 embed_webpage = self._download_webpage(
1524 embed_url, video_id, 'Downloading embed webpage')
1525 jsplayer_url_json = self._search_regex(
1526 ASSETS_RE, embed_webpage, 'JS player URL')
1527
1528 player_url = json.loads(jsplayer_url_json)
1529 if player_url is None:
1530 player_url_json = self._search_regex(
1531 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
1532 video_webpage, 'age gate player URL')
1533 player_url = json.loads(player_url_json)
1534
1535 if self._downloader.params.get('verbose'):
1536 if player_url is None:
1537 player_version = 'unknown'
1538 player_desc = 'unknown'
1539 else:
1540 if player_url.endswith('swf'):
1541 player_version = self._search_regex(
1542 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
1543 'flash player', fatal=False)
1544 player_desc = 'flash player %s' % player_version
1545 else:
1546 player_version = self._search_regex(
1547 [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
1548 player_url,
1549 'html5 player', fatal=False)
1550 player_desc = 'html5 player %s' % player_version
1551
1552 parts_sizes = self._signature_cache_id(encrypted_sig)
1553 self.to_screen('{%s} signature length %s, %s' %
1554 (format_id, parts_sizes, player_desc))
1555
1556 signature = self._decrypt_signature(
1557 encrypted_sig, video_id, player_url, age_gate)
1558 url += '&signature=' + signature
1559 if 'ratebypass' not in url:
1560 url += '&ratebypass=yes'
1561
1562 dct = {
1563 'format_id': format_id,
1564 'url': url,
1565 'player_url': player_url,
1566 }
1567 if format_id in self._formats:
1568 dct.update(self._formats[format_id])
1569 if format_id in formats_spec:
1570 dct.update(formats_spec[format_id])
1571
1572 # Some itags are not included in DASH manifest thus corresponding formats will
1573 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1574 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1575 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
1576 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
1577
1578 more_fields = {
1579 'filesize': int_or_none(url_data.get('clen', [None])[0]),
1580 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
1581 'width': width,
1582 'height': height,
1583 'fps': int_or_none(url_data.get('fps', [None])[0]),
1584 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
1585 }
1586 for key, value in more_fields.items():
1587 if value:
1588 dct[key] = value
1589 type_ = url_data.get('type', [None])[0]
1590 if type_:
1591 type_split = type_.split(';')
1592 kind_ext = type_split[0].split('/')
1593 if len(kind_ext) == 2:
1594 kind, _ = kind_ext
1595 dct['ext'] = mimetype2ext(type_split[0])
1596 if kind in ('audio', 'video'):
1597 codecs = None
1598 for mobj in re.finditer(
1599 r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
1600 if mobj.group('key') == 'codecs':
1601 codecs = mobj.group('val')
1602 break
1603 if codecs:
1604 codecs = codecs.split(',')
1605 if len(codecs) == 2:
1606 acodec, vcodec = codecs[1], codecs[0]
1607 else:
1608 acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
1609 dct.update({
1610 'acodec': acodec,
1611 'vcodec': vcodec,
1612 })
1613 formats.append(dct)
1614 elif video_info.get('hlsvp'):
1615 manifest_url = video_info['hlsvp'][0]
1616 url_map = self._extract_from_m3u8(manifest_url, video_id)
1617 formats = _map_to_format_list(url_map)
1618 # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
1619 for a_format in formats:
1620 a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
1621 else:
1622 unavailable_message = self._html_search_regex(
1623 r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
1624 video_webpage, 'unavailable message', default=None)
1625 if unavailable_message:
1626 raise ExtractorError(unavailable_message, expected=True)
1627 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
1628
1629 # Look for the DASH manifest
1630 if self._downloader.params.get('youtube_include_dash_manifest', True):
1631 dash_mpd_fatal = True
1632 for mpd_url in dash_mpds:
1633 dash_formats = {}
1634 try:
1635 def decrypt_sig(mobj):
1636 s = mobj.group(1)
1637 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
1638 return '/signature/%s' % dec_s
1639
1640 mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
1641
1642 for df in self._extract_mpd_formats(
1643 mpd_url, video_id, fatal=dash_mpd_fatal,
1644 formats_dict=self._formats):
1645 # Do not overwrite DASH format found in some previous DASH manifest
1646 if df['format_id'] not in dash_formats:
1647 dash_formats[df['format_id']] = df
1648 # Additional DASH manifests may end up in HTTP Error 403 therefore
1649 # allow them to fail without bug report message if we already have
1650 # some DASH manifest succeeded. This is temporary workaround to reduce
1651 # burst of bug reports until we figure out the reason and whether it
1652 # can be fixed at all.
1653 dash_mpd_fatal = False
1654 except (ExtractorError, KeyError) as e:
1655 self.report_warning(
1656 'Skipping DASH manifest: %r' % e, video_id)
1657 if dash_formats:
1658 # Remove the formats we found through non-DASH, they
1659 # contain less info and it can be wrong, because we use
1660 # fixed values (for example the resolution). See
1661 # https://github.com/rg3/youtube-dl/issues/5774 for an
1662 # example.
1663 formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
1664 formats.extend(dash_formats.values())
1665
1666 # Check for malformed aspect ratio
1667 stretched_m = re.search(
1668 r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
1669 video_webpage)
1670 if stretched_m:
1671 w = float(stretched_m.group('w'))
1672 h = float(stretched_m.group('h'))
1673 # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
1674 # We will only process correct ratios.
1675 if w > 0 and h > 0:
1676 ratio = w / h
1677 for f in formats:
1678 if f.get('vcodec') != 'none':
1679 f['stretched_ratio'] = ratio
1680
1681 self._sort_formats(formats)
1682
1683 self.mark_watched(video_id, video_info)
1684
1685 return {
1686 'id': video_id,
1687 'uploader': video_uploader,
1688 'uploader_id': video_uploader_id,
1689 'uploader_url': video_uploader_url,
1690 'upload_date': upload_date,
1691 'license': video_license,
1692 'creator': video_creator,
1693 'title': video_title,
1694 'alt_title': video_alt_title,
1695 'thumbnail': video_thumbnail,
1696 'description': video_description,
1697 'categories': video_categories,
1698 'tags': video_tags,
1699 'subtitles': video_subtitles,
1700 'automatic_captions': automatic_captions,
1701 'duration': video_duration,
1702 'age_limit': 18 if age_gate else 0,
1703 'annotations': video_annotations,
1704 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1705 'view_count': view_count,
1706 'like_count': like_count,
1707 'dislike_count': dislike_count,
1708 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
1709 'formats': formats,
1710 'is_live': is_live,
1711 'start_time': start_time,
1712 'end_time': end_time,
1713 }
1714
1715
1716 class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
1717 IE_DESC = 'YouTube.com playlists'
1718 _VALID_URL = r"""(?x)(?:
1719 (?:https?://)?
1720 (?:\w+\.)?
1721 youtube\.com/
1722 (?:
1723 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
1724 \? (?:.*?[&;])*? (?:p|a|list)=
1725 | p/
1726 )
1727 (
1728 (?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
1729 # Top tracks, they can also include dots
1730 |(?:MC)[\w\.]*
1731 )
1732 .*
1733 |
1734 ((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
1735 )"""
1736 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
1737 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
1738 IE_NAME = 'youtube:playlist'
1739 _TESTS = [{
1740 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1741 'info_dict': {
1742 'title': 'ytdl test PL',
1743 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1744 },
1745 'playlist_count': 3,
1746 }, {
1747 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1748 'info_dict': {
1749 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1750 'title': 'YDL_Empty_List',
1751 },
1752 'playlist_count': 0,
1753 }, {
1754 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1755 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1756 'info_dict': {
1757 'title': '29C3: Not my department',
1758 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1759 },
1760 'playlist_count': 95,
1761 }, {
1762 'note': 'issue #673',
1763 'url': 'PLBB231211A4F62143',
1764 'info_dict': {
1765 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1766 'id': 'PLBB231211A4F62143',
1767 },
1768 'playlist_mincount': 26,
1769 }, {
1770 'note': 'Large playlist',
1771 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1772 'info_dict': {
1773 'title': 'Uploads from Cauchemar',
1774 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
1775 },
1776 'playlist_mincount': 799,
1777 }, {
1778 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1779 'info_dict': {
1780 'title': 'YDL_safe_search',
1781 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1782 },
1783 'playlist_count': 2,
1784 }, {
1785 'note': 'embedded',
1786 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1787 'playlist_count': 4,
1788 'info_dict': {
1789 'title': 'JODA15',
1790 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1791 }
1792 }, {
1793 'note': 'Embedded SWF player',
1794 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1795 'playlist_count': 4,
1796 'info_dict': {
1797 'title': 'JODA7',
1798 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
1799 }
1800 }, {
1801 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
1802 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
1803 'info_dict': {
1804 'title': 'Uploads from Interstellar Movie',
1805 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
1806 },
1807 'playlist_mincout': 21,
1808 }]
1809
1810 def _real_initialize(self):
1811 self._login()
1812
1813 def _extract_mix(self, playlist_id):
1814 # The mixes are generated from a single video
1815 # the id of the playlist is just 'RD' + video_id
1816 url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
1817 webpage = self._download_webpage(
1818 url, playlist_id, 'Downloading Youtube mix')
1819 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
1820 title_span = (
1821 search_title('playlist-title') or
1822 search_title('title long-title') or
1823 search_title('title'))
1824 title = clean_html(title_span)
1825 ids = orderedSet(re.findall(
1826 r'''(?xs)data-video-username=".*?".*?
1827 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
1828 webpage))
1829 url_results = self._ids_to_results(ids)
1830
1831 return self.playlist_result(url_results, playlist_id, title)
1832
1833 def _extract_playlist(self, playlist_id):
1834 url = self._TEMPLATE_URL % playlist_id
1835 page = self._download_webpage(url, playlist_id)
1836
1837 for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
1838 match = match.strip()
1839 # Check if the playlist exists or is private
1840 if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
1841 raise ExtractorError(
1842 'The playlist doesn\'t exist or is private, use --username or '
1843 '--netrc to access it.',
1844 expected=True)
1845 elif re.match(r'[^<]*Invalid parameters[^<]*', match):
1846 raise ExtractorError(
1847 'Invalid parameters. Maybe URL is incorrect.',
1848 expected=True)
1849 elif re.match(r'[^<]*Choose your language[^<]*', match):
1850 continue
1851 else:
1852 self.report_warning('Youtube gives an alert message: ' + match)
1853
1854 playlist_title = self._html_search_regex(
1855 r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
1856 page, 'title')
1857
1858 return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
1859
1860 def _check_download_just_video(self, url, playlist_id):
1861 # Check if it's a video-specific URL
1862 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
1863 if 'v' in query_dict:
1864 video_id = query_dict['v'][0]
1865 if self._downloader.params.get('noplaylist'):
1866 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1867 return self.url_result(video_id, 'Youtube', video_id=video_id)
1868 else:
1869 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
1870
1871 def _real_extract(self, url):
1872 # Extract playlist id
1873 mobj = re.match(self._VALID_URL, url)
1874 if mobj is None:
1875 raise ExtractorError('Invalid URL: %s' % url)
1876 playlist_id = mobj.group(1) or mobj.group(2)
1877
1878 video = self._check_download_just_video(url, playlist_id)
1879 if video:
1880 return video
1881
1882 if playlist_id.startswith('RD') or playlist_id.startswith('UL'):
1883 # Mixes require a custom extraction process
1884 return self._extract_mix(playlist_id)
1885
1886 return self._extract_playlist(playlist_id)
1887
1888
1889 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
1890 IE_DESC = 'YouTube.com channels'
1891 _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
1892 _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
1893 _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
1894 IE_NAME = 'youtube:channel'
1895 _TESTS = [{
1896 'note': 'paginated channel',
1897 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1898 'playlist_mincount': 91,
1899 'info_dict': {
1900 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
1901 'title': 'Uploads from lex will',
1902 }
1903 }, {
1904 'note': 'Age restricted channel',
1905 # from https://www.youtube.com/user/DeusExOfficial
1906 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
1907 'playlist_mincount': 64,
1908 'info_dict': {
1909 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
1910 'title': 'Uploads from Deus Ex',
1911 },
1912 }]
1913
1914 @classmethod
1915 def suitable(cls, url):
1916 return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
1917 else super(YoutubeChannelIE, cls).suitable(url))
1918
1919 def _real_extract(self, url):
1920 channel_id = self._match_id(url)
1921
1922 url = self._TEMPLATE_URL % channel_id
1923
1924 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
1925 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
1926 # otherwise fallback on channel by page extraction
1927 channel_page = self._download_webpage(
1928 url + '?view=57', channel_id,
1929 'Downloading channel page', fatal=False)
1930 if channel_page is False:
1931 channel_playlist_id = False
1932 else:
1933 channel_playlist_id = self._html_search_meta(
1934 'channelId', channel_page, 'channel id', default=None)
1935 if not channel_playlist_id:
1936 channel_playlist_id = self._search_regex(
1937 r'data-(?:channel-external-|yt)id="([^"]+)"',
1938 channel_page, 'channel id', default=None)
1939 if channel_playlist_id and channel_playlist_id.startswith('UC'):
1940 playlist_id = 'UU' + channel_playlist_id[2:]
1941 return self.url_result(
1942 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
1943
1944 channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
1945 autogenerated = re.search(r'''(?x)
1946 class="[^"]*?(?:
1947 channel-header-autogenerated-label|
1948 yt-channel-title-autogenerated
1949 )[^"]*"''', channel_page) is not None
1950
1951 if autogenerated:
1952 # The videos are contained in a single page
1953 # the ajax pages can't be used, they are empty
1954 entries = [
1955 self.url_result(
1956 video_id, 'Youtube', video_id=video_id,
1957 video_title=video_title)
1958 for video_id, video_title in self.extract_videos_from_page(channel_page)]
1959 return self.playlist_result(entries, channel_id)
1960
1961 return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
1962
1963
1964 class YoutubeUserIE(YoutubeChannelIE):
1965 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
1966 _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
1967 _TEMPLATE_URL = 'https://www.youtube.com/user/%s/videos'
1968 IE_NAME = 'youtube:user'
1969
1970 _TESTS = [{
1971 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
1972 'playlist_mincount': 320,
1973 'info_dict': {
1974 'title': 'TheLinuxFoundation',
1975 }
1976 }, {
1977 'url': 'ytuser:phihag',
1978 'only_matching': True,
1979 }]
1980
1981 @classmethod
1982 def suitable(cls, url):
1983 # Don't return True if the url can be extracted with other youtube
1984 # extractor, the regex would is too permissive and it would match.
1985 other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
1986 if any(ie.suitable(url) for ie in other_ies):
1987 return False
1988 else:
1989 return super(YoutubeUserIE, cls).suitable(url)
1990
1991
1992 class YoutubeLiveIE(YoutubeBaseInfoExtractor):
1993 IE_DESC = 'YouTube.com live streams'
1994 _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+))/live'
1995 IE_NAME = 'youtube:live'
1996
1997 _TESTS = [{
1998 'url': 'http://www.youtube.com/user/TheYoungTurks/live',
1999 'info_dict': {
2000 'id': 'a48o2S1cPoo',
2001 'ext': 'mp4',
2002 'title': 'The Young Turks - Live Main Show',
2003 'uploader': 'The Young Turks',
2004 'uploader_id': 'TheYoungTurks',
2005 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
2006 'upload_date': '20150715',
2007 'license': 'Standard YouTube License',
2008 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
2009 'categories': ['News & Politics'],
2010 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
2011 'like_count': int,
2012 'dislike_count': int,
2013 },
2014 'params': {
2015 'skip_download': True,
2016 },
2017 }, {
2018 'url': 'http://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
2019 'only_matching': True,
2020 }]
2021
2022 def _real_extract(self, url):
2023 mobj = re.match(self._VALID_URL, url)
2024 channel_id = mobj.group('id')
2025 base_url = mobj.group('base_url')
2026 webpage = self._download_webpage(url, channel_id, fatal=False)
2027 if webpage:
2028 page_type = self._og_search_property(
2029 'type', webpage, 'page type', default=None)
2030 video_id = self._html_search_meta(
2031 'videoId', webpage, 'video id', default=None)
2032 if page_type == 'video' and video_id and re.match(r'^[0-9A-Za-z_-]{11}$', video_id):
2033 return self.url_result(video_id, YoutubeIE.ie_key())
2034 return self.url_result(base_url)
2035
2036
2037 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
2038 IE_DESC = 'YouTube.com user/channel playlists'
2039 _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
2040 IE_NAME = 'youtube:playlists'
2041
2042 _TESTS = [{
2043 'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
2044 'playlist_mincount': 4,
2045 'info_dict': {
2046 'id': 'ThirstForScience',
2047 'title': 'Thirst for Science',
2048 },
2049 }, {
2050 # with "Load more" button
2051 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
2052 'playlist_mincount': 70,
2053 'info_dict': {
2054 'id': 'igorkle1',
2055 'title': 'Игорь Клейнер',
2056 },
2057 }, {
2058 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
2059 'playlist_mincount': 17,
2060 'info_dict': {
2061 'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
2062 'title': 'Chem Player',
2063 },
2064 }]
2065
2066
2067 class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
2068 IE_DESC = 'YouTube.com searches'
2069 # there doesn't appear to be a real limit, for example if you search for
2070 # 'python' you get more than 8.000.000 results
2071 _MAX_RESULTS = float('inf')
2072 IE_NAME = 'youtube:search'
2073 _SEARCH_KEY = 'ytsearch'
2074 _EXTRA_QUERY_ARGS = {}
2075 _TESTS = []
2076
2077 def _get_n_results(self, query, n):
2078 """Get a specified number of results for a query"""
2079
2080 videos = []
2081 limit = n
2082
2083 for pagenum in itertools.count(1):
2084 url_query = {
2085 'search_query': query.encode('utf-8'),
2086 'page': pagenum,
2087 'spf': 'navigate',
2088 }
2089 url_query.update(self._EXTRA_QUERY_ARGS)
2090 result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
2091 data = self._download_json(
2092 result_url, video_id='query "%s"' % query,
2093 note='Downloading page %s' % pagenum,
2094 errnote='Unable to download API page')
2095 html_content = data[1]['body']['content']
2096
2097 if 'class="search-message' in html_content:
2098 raise ExtractorError(
2099 '[youtube] No video results', expected=True)
2100
2101 new_videos = self._ids_to_results(orderedSet(re.findall(
2102 r'href="/watch\?v=(.{11})', html_content)))
2103 videos += new_videos
2104 if not new_videos or len(videos) > limit:
2105 break
2106
2107 if len(videos) > n:
2108 videos = videos[:n]
2109 return self.playlist_result(videos, query)
2110
2111
2112 class YoutubeSearchDateIE(YoutubeSearchIE):
2113 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
2114 _SEARCH_KEY = 'ytsearchdate'
2115 IE_DESC = 'YouTube.com searches, newest videos first'
2116 _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
2117
2118
2119 class YoutubeSearchURLIE(InfoExtractor):
2120 IE_DESC = 'YouTube.com search URLs'
2121 IE_NAME = 'youtube:search_url'
2122 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
2123 _TESTS = [{
2124 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
2125 'playlist_mincount': 5,
2126 'info_dict': {
2127 'title': 'youtube-dl test video',
2128 }
2129 }, {
2130 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
2131 'only_matching': True,
2132 }]
2133
2134 def _real_extract(self, url):
2135 mobj = re.match(self._VALID_URL, url)
2136 query = compat_urllib_parse_unquote_plus(mobj.group('query'))
2137
2138 webpage = self._download_webpage(url, query)
2139 result_code = self._search_regex(
2140 r'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage, 'result HTML')
2141
2142 part_codes = re.findall(
2143 r'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code)
2144 entries = []
2145 for part_code in part_codes:
2146 part_title = self._html_search_regex(
2147 [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
2148 part_url_snippet = self._html_search_regex(
2149 r'(?s)href="([^"]+)"', part_code, 'item URL')
2150 part_url = compat_urlparse.urljoin(
2151 'https://www.youtube.com/', part_url_snippet)
2152 entries.append({
2153 '_type': 'url',
2154 'url': part_url,
2155 'title': part_title,
2156 })
2157
2158 return {
2159 '_type': 'playlist',
2160 'entries': entries,
2161 'title': query,
2162 }
2163
2164
2165 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
2166 IE_DESC = 'YouTube.com (multi-season) shows'
2167 _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
2168 IE_NAME = 'youtube:show'
2169 _TESTS = [{
2170 'url': 'https://www.youtube.com/show/airdisasters',
2171 'playlist_mincount': 5,
2172 'info_dict': {
2173 'id': 'airdisasters',
2174 'title': 'Air Disasters',
2175 }
2176 }]
2177
2178 def _real_extract(self, url):
2179 playlist_id = self._match_id(url)
2180 return super(YoutubeShowIE, self)._real_extract(
2181 'https://www.youtube.com/show/%s/playlists' % playlist_id)
2182
2183
2184 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
2185 """
2186 Base class for feed extractors
2187 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
2188 """
2189 _LOGIN_REQUIRED = True
2190
2191 @property
2192 def IE_NAME(self):
2193 return 'youtube:%s' % self._FEED_NAME
2194
2195 def _real_initialize(self):
2196 self._login()
2197
2198 def _real_extract(self, url):
2199 page = self._download_webpage(
2200 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
2201
2202 # The extraction process is the same as for playlists, but the regex
2203 # for the video ids doesn't contain an index
2204 ids = []
2205 more_widget_html = content_html = page
2206 for page_num in itertools.count(1):
2207 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
2208
2209 # 'recommended' feed has infinite 'load more' and each new portion spins
2210 # the same videos in (sometimes) slightly different order, so we'll check
2211 # for unicity and break when portion has no new videos
2212 new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
2213 if not new_ids:
2214 break
2215
2216 ids.extend(new_ids)
2217
2218 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
2219 if not mobj:
2220 break
2221
2222 more = self._download_json(
2223 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
2224 'Downloading page #%s' % page_num,
2225 transform_source=uppercase_escape)
2226 content_html = more['content_html']
2227 more_widget_html = more['load_more_widget_html']
2228
2229 return self.playlist_result(
2230 self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
2231
2232
2233 class YoutubeWatchLaterIE(YoutubePlaylistIE):
2234 IE_NAME = 'youtube:watchlater'
2235 IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
2236 _VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
2237
2238 _TESTS = [{
2239 'url': 'https://www.youtube.com/playlist?list=WL',
2240 'only_matching': True,
2241 }, {
2242 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
2243 'only_matching': True,
2244 }]
2245
2246 def _real_extract(self, url):
2247 video = self._check_download_just_video(url, 'WL')
2248 if video:
2249 return video
2250 return self._extract_playlist('WL')
2251
2252
2253 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
2254 IE_NAME = 'youtube:favorites'
2255 IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
2256 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2257 _LOGIN_REQUIRED = True
2258
2259 def _real_extract(self, url):
2260 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2261 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
2262 return self.url_result(playlist_id, 'YoutubePlaylist')
2263
2264
2265 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
2266 IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2267 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2268 _FEED_NAME = 'recommended'
2269 _PLAYLIST_TITLE = 'Youtube Recommended videos'
2270
2271
2272 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
2273 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2274 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2275 _FEED_NAME = 'subscriptions'
2276 _PLAYLIST_TITLE = 'Youtube Subscriptions'
2277
2278
2279 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
2280 IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
2281 _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
2282 _FEED_NAME = 'history'
2283 _PLAYLIST_TITLE = 'Youtube History'
2284
2285
2286 class YoutubeTruncatedURLIE(InfoExtractor):
2287 IE_NAME = 'youtube:truncated_url'
2288 IE_DESC = False # Do not list
2289 _VALID_URL = r'''(?x)
2290 (?:https?://)?
2291 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2292 (?:watch\?(?:
2293 feature=[a-z_]+|
2294 annotation_id=annotation_[^&]+|
2295 x-yt-cl=[0-9]+|
2296 hl=[^&]*|
2297 t=[0-9]+
2298 )?
2299 |
2300 attribution_link\?a=[^&]+
2301 )
2302 $
2303 '''
2304
2305 _TESTS = [{
2306 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
2307 'only_matching': True,
2308 }, {
2309 'url': 'http://www.youtube.com/watch?',
2310 'only_matching': True,
2311 }, {
2312 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2313 'only_matching': True,
2314 }, {
2315 'url': 'https://www.youtube.com/watch?feature=foo',
2316 'only_matching': True,
2317 }, {
2318 'url': 'https://www.youtube.com/watch?hl=en-GB',
2319 'only_matching': True,
2320 }, {
2321 'url': 'https://www.youtube.com/watch?t=2372',
2322 'only_matching': True,
2323 }]
2324
2325 def _real_extract(self, url):
2326 raise ExtractorError(
2327 'Did you forget to quote the URL? Remember that & is a meta '
2328 'character in most shells, so you want to put the URL in quotes, '
2329 'like youtube-dl '
2330 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2331 ' or simply youtube-dl BaW_jenozKc .',
2332 expected=True)
2333
2334
2335 class YoutubeTruncatedIDIE(InfoExtractor):
2336 IE_NAME = 'youtube:truncated_id'
2337 IE_DESC = False # Do not list
2338 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2339
2340 _TESTS = [{
2341 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2342 'only_matching': True,
2343 }]
2344
2345 def _real_extract(self, url):
2346 video_id = self._match_id(url)
2347 raise ExtractorError(
2348 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
2349 expected=True)