]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/youtube.py
[youtube:playlist] Fallback to video extraction for video/playlist URLs when playlist...
[yt-dlp.git] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import random
10 import re
11 import time
12 import traceback
13
14 from .common import InfoExtractor, SearchInfoExtractor
15 from ..jsinterp import JSInterpreter
16 from ..swfinterp import SWFInterpreter
17 from ..compat import (
18 compat_chr,
19 compat_parse_qs,
20 compat_urllib_parse_unquote,
21 compat_urllib_parse_unquote_plus,
22 compat_urllib_parse_urlencode,
23 compat_urllib_parse_urlparse,
24 compat_urlparse,
25 compat_str,
26 )
27 from ..utils import (
28 clean_html,
29 error_to_compat_str,
30 ExtractorError,
31 float_or_none,
32 get_element_by_attribute,
33 get_element_by_id,
34 int_or_none,
35 mimetype2ext,
36 orderedSet,
37 parse_duration,
38 remove_quotes,
39 remove_start,
40 sanitized_Request,
41 smuggle_url,
42 str_to_int,
43 unescapeHTML,
44 unified_strdate,
45 unsmuggle_url,
46 uppercase_escape,
47 urlencode_postdata,
48 ISO3166Utils,
49 )
50
51
52 class YoutubeBaseInfoExtractor(InfoExtractor):
53 """Provide base functions for Youtube extractors"""
54 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
55 _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
56 _PASSWORD_CHALLENGE_URL = 'https://accounts.google.com/signin/challenge/sl/password'
57 _NETRC_MACHINE = 'youtube'
58 # If True it will raise an error if no login info is provided
59 _LOGIN_REQUIRED = False
60
61 def _set_language(self):
62 self._set_cookie(
63 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
64 # YouTube sets the expire time to about two months
65 expire_time=time.time() + 2 * 30 * 24 * 3600)
66
67 def _ids_to_results(self, ids):
68 return [
69 self.url_result(vid_id, 'Youtube', video_id=vid_id)
70 for vid_id in ids]
71
72 def _login(self):
73 """
74 Attempt to log in to YouTube.
75 True is returned if successful or skipped.
76 False is returned if login failed.
77
78 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
79 """
80 (username, password) = self._get_login_info()
81 # No authentication to be performed
82 if username is None:
83 if self._LOGIN_REQUIRED:
84 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
85 return True
86
87 login_page = self._download_webpage(
88 self._LOGIN_URL, None,
89 note='Downloading login page',
90 errnote='unable to fetch login page', fatal=False)
91 if login_page is False:
92 return
93
94 login_form = self._hidden_inputs(login_page)
95
96 login_form.update({
97 'checkConnection': 'youtube',
98 'Email': username,
99 'Passwd': password,
100 })
101
102 login_results = self._download_webpage(
103 self._PASSWORD_CHALLENGE_URL, None,
104 note='Logging in', errnote='unable to log in', fatal=False,
105 data=urlencode_postdata(login_form))
106 if login_results is False:
107 return False
108
109 error_msg = self._html_search_regex(
110 r'<[^>]+id="errormsg_0_Passwd"[^>]*>([^<]+)<',
111 login_results, 'error message', default=None)
112 if error_msg:
113 raise ExtractorError('Unable to login: %s' % error_msg, expected=True)
114
115 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
116 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
117
118 # Two-Factor
119 # TODO add SMS and phone call support - these require making a request and then prompting the user
120
121 if re.search(r'(?i)<form[^>]+id="challenge"', login_results) is not None:
122 tfa_code = self._get_tfa_info('2-step verification code')
123
124 if not tfa_code:
125 self._downloader.report_warning(
126 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
127 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
128 return False
129
130 tfa_code = remove_start(tfa_code, 'G-')
131
132 tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
133
134 tfa_form_strs.update({
135 'Pin': tfa_code,
136 'TrustDevice': 'on',
137 })
138
139 tfa_data = urlencode_postdata(tfa_form_strs)
140
141 tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
142 tfa_results = self._download_webpage(
143 tfa_req, None,
144 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
145
146 if tfa_results is False:
147 return False
148
149 if re.search(r'(?i)<form[^>]+id="challenge"', tfa_results) is not None:
150 self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
151 return False
152 if re.search(r'(?i)<form[^>]+id="gaia_loginform"', tfa_results) is not None:
153 self._downloader.report_warning('unable to log in - did the page structure change?')
154 return False
155 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
156 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
157 return False
158
159 if re.search(r'(?i)<form[^>]+id="gaia_loginform"', login_results) is not None:
160 self._downloader.report_warning('unable to log in: bad username or password')
161 return False
162 return True
163
164 def _real_initialize(self):
165 if self._downloader is None:
166 return
167 self._set_language()
168 if not self._login():
169 return
170
171
172 class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
173 # Extract entries from page with "Load more" button
174 def _entries(self, page, playlist_id):
175 more_widget_html = content_html = page
176 for page_num in itertools.count(1):
177 for entry in self._process_page(content_html):
178 yield entry
179
180 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
181 if not mobj:
182 break
183
184 more = self._download_json(
185 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
186 'Downloading page #%s' % page_num,
187 transform_source=uppercase_escape)
188 content_html = more['content_html']
189 if not content_html.strip():
190 # Some webpages show a "Load more" button but they don't
191 # have more videos
192 break
193 more_widget_html = more['load_more_widget_html']
194
195
196 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
197 def _process_page(self, content):
198 for video_id, video_title in self.extract_videos_from_page(content):
199 yield self.url_result(video_id, 'Youtube', video_id, video_title)
200
201 def extract_videos_from_page(self, page):
202 ids_in_page = []
203 titles_in_page = []
204 for mobj in re.finditer(self._VIDEO_RE, page):
205 # The link with index 0 is not the first video of the playlist (not sure if still actual)
206 if 'index' in mobj.groupdict() and mobj.group('id') == '0':
207 continue
208 video_id = mobj.group('id')
209 video_title = unescapeHTML(mobj.group('title'))
210 if video_title:
211 video_title = video_title.strip()
212 try:
213 idx = ids_in_page.index(video_id)
214 if video_title and not titles_in_page[idx]:
215 titles_in_page[idx] = video_title
216 except ValueError:
217 ids_in_page.append(video_id)
218 titles_in_page.append(video_title)
219 return zip(ids_in_page, titles_in_page)
220
221
222 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
223 def _process_page(self, content):
224 for playlist_id in orderedSet(re.findall(
225 r'<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*><a[^>]+href="/?playlist\?list=([0-9A-Za-z-_]{10,})"',
226 content)):
227 yield self.url_result(
228 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
229
230 def _real_extract(self, url):
231 playlist_id = self._match_id(url)
232 webpage = self._download_webpage(url, playlist_id)
233 title = self._og_search_title(webpage, fatal=False)
234 return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
235
236
237 class YoutubeIE(YoutubeBaseInfoExtractor):
238 IE_DESC = 'YouTube.com'
239 _VALID_URL = r"""(?x)^
240 (
241 (?:https?://|//) # http(s):// or protocol-independent URL
242 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
243 (?:www\.)?deturl\.com/www\.youtube\.com/|
244 (?:www\.)?pwnyoutube\.com/|
245 (?:www\.)?yourepeat\.com/|
246 tube\.majestyc\.net/|
247 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
248 (?:.*?\#/)? # handle anchor (#/) redirect urls
249 (?: # the various things that can precede the ID:
250 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
251 |(?: # or the v= param in all its forms
252 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
253 (?:\?|\#!?) # the params delimiter ? or # or #!
254 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
255 v=
256 )
257 ))
258 |(?:
259 youtu\.be| # just youtu.be/xxxx
260 vid\.plus| # or vid.plus/xxxx
261 zwearz\.com/watch| # or zwearz.com/watch/xxxx
262 )/
263 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
264 )
265 )? # all until now is optional -> you can pass the naked ID
266 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
267 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
268 (?(1).+)? # if we found the ID, everything can follow
269 $"""
270 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
271 _formats = {
272 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
273 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
274 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
275 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
276 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
277 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
278 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
279 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
280 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
281 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
282 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
283 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
284 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
285 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
286 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
287 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
288 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
289 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
290
291
292 # 3D videos
293 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
294 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
295 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
296 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
297 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
298 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
299 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
300
301 # Apple HTTP Live Streaming
302 '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
303 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
304 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
305 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
306 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
307 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
308 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
309 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
310
311 # DASH mp4 video
312 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
313 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
314 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
315 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
316 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
317 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
318 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
319 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
320 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
321 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
322 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
323
324 # Dash mp4 audio
325 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
326 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
327 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
328 '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
329 '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'preference': -50, 'container': 'm4a_dash'},
330
331 # Dash webm
332 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
333 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
334 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
335 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
336 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
337 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
338 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9', 'preference': -40},
339 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
340 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
341 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
342 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
343 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
344 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
345 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
346 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
347 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
348 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
349 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
350 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
351 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
352 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
353 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
354
355 # Dash webm audio
356 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
357 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
358
359 # Dash webm audio with opus inside
360 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
361 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
362 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
363
364 # RTMP (unnamed)
365 '_rtmp': {'protocol': 'rtmp'},
366 }
367 _SUBTITLE_FORMATS = ('ttml', 'vtt')
368
369 IE_NAME = 'youtube'
370 _TESTS = [
371 {
372 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
373 'info_dict': {
374 'id': 'BaW_jenozKc',
375 'ext': 'mp4',
376 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
377 'uploader': 'Philipp Hagemeister',
378 'uploader_id': 'phihag',
379 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
380 'upload_date': '20121002',
381 'license': 'Standard YouTube License',
382 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
383 'categories': ['Science & Technology'],
384 'tags': ['youtube-dl'],
385 'like_count': int,
386 'dislike_count': int,
387 'start_time': 1,
388 'end_time': 9,
389 }
390 },
391 {
392 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
393 'note': 'Test generic use_cipher_signature video (#897)',
394 'info_dict': {
395 'id': 'UxxajLWwzqY',
396 'ext': 'mp4',
397 'upload_date': '20120506',
398 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
399 'alt_title': 'I Love It (feat. Charli XCX)',
400 'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
401 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
402 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
403 'iconic ep', 'iconic', 'love', 'it'],
404 'uploader': 'Icona Pop',
405 'uploader_id': 'IconaPop',
406 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IconaPop',
407 'license': 'Standard YouTube License',
408 'creator': 'Icona Pop',
409 }
410 },
411 {
412 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
413 'note': 'Test VEVO video with age protection (#956)',
414 'info_dict': {
415 'id': '07FYdnEawAQ',
416 'ext': 'mp4',
417 'upload_date': '20130703',
418 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
419 'alt_title': 'Tunnel Vision',
420 'description': 'md5:64249768eec3bc4276236606ea996373',
421 'uploader': 'justintimberlakeVEVO',
422 'uploader_id': 'justintimberlakeVEVO',
423 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/justintimberlakeVEVO',
424 'license': 'Standard YouTube License',
425 'creator': 'Justin Timberlake',
426 'age_limit': 18,
427 }
428 },
429 {
430 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
431 'note': 'Embed-only video (#1746)',
432 'info_dict': {
433 'id': 'yZIXLfi8CZQ',
434 'ext': 'mp4',
435 'upload_date': '20120608',
436 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
437 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
438 'uploader': 'SET India',
439 'uploader_id': 'setindia',
440 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/setindia',
441 'license': 'Standard YouTube License',
442 'age_limit': 18,
443 }
444 },
445 {
446 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
447 'note': 'Use the first video ID in the URL',
448 'info_dict': {
449 'id': 'BaW_jenozKc',
450 'ext': 'mp4',
451 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
452 'uploader': 'Philipp Hagemeister',
453 'uploader_id': 'phihag',
454 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/phihag',
455 'upload_date': '20121002',
456 'license': 'Standard YouTube License',
457 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
458 'categories': ['Science & Technology'],
459 'tags': ['youtube-dl'],
460 'like_count': int,
461 'dislike_count': int,
462 },
463 'params': {
464 'skip_download': True,
465 },
466 },
467 {
468 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
469 'note': '256k DASH audio (format 141) via DASH manifest',
470 'info_dict': {
471 'id': 'a9LDPn-MO4I',
472 'ext': 'm4a',
473 'upload_date': '20121002',
474 'uploader_id': '8KVIDEO',
475 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
476 'description': '',
477 'uploader': '8KVIDEO',
478 'license': 'Standard YouTube License',
479 'title': 'UHDTV TEST 8K VIDEO.mp4'
480 },
481 'params': {
482 'youtube_include_dash_manifest': True,
483 'format': '141',
484 },
485 'skip': 'format 141 not served anymore',
486 },
487 # DASH manifest with encrypted signature
488 {
489 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
490 'info_dict': {
491 'id': 'IB3lcPjvWLA',
492 'ext': 'm4a',
493 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
494 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
495 'uploader': 'AfrojackVEVO',
496 'uploader_id': 'AfrojackVEVO',
497 'upload_date': '20131011',
498 'license': 'Standard YouTube License',
499 },
500 'params': {
501 'youtube_include_dash_manifest': True,
502 'format': '141/bestaudio[ext=m4a]',
503 },
504 },
505 # JS player signature function name containing $
506 {
507 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
508 'info_dict': {
509 'id': 'nfWlot6h_JM',
510 'ext': 'm4a',
511 'title': 'Taylor Swift - Shake It Off',
512 'alt_title': 'Shake It Off',
513 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
514 'uploader': 'TaylorSwiftVEVO',
515 'uploader_id': 'TaylorSwiftVEVO',
516 'upload_date': '20140818',
517 'license': 'Standard YouTube License',
518 'creator': 'Taylor Swift',
519 },
520 'params': {
521 'youtube_include_dash_manifest': True,
522 'format': '141/bestaudio[ext=m4a]',
523 },
524 },
525 # Controversy video
526 {
527 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
528 'info_dict': {
529 'id': 'T4XJQO3qol8',
530 'ext': 'mp4',
531 'upload_date': '20100909',
532 'uploader': 'The Amazing Atheist',
533 'uploader_id': 'TheAmazingAtheist',
534 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheAmazingAtheist',
535 'license': 'Standard YouTube License',
536 'title': 'Burning Everyone\'s Koran',
537 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
538 }
539 },
540 # Normal age-gate video (No vevo, embed allowed)
541 {
542 'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
543 'info_dict': {
544 'id': 'HtVdAasjOgU',
545 'ext': 'mp4',
546 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
547 'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
548 'uploader': 'The Witcher',
549 'uploader_id': 'WitcherGame',
550 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
551 'upload_date': '20140605',
552 'license': 'Standard YouTube License',
553 'age_limit': 18,
554 },
555 },
556 # Age-gate video with encrypted signature
557 {
558 'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
559 'info_dict': {
560 'id': '6kLq3WMV1nU',
561 'ext': 'mp4',
562 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
563 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
564 'uploader': 'LloydVEVO',
565 'uploader_id': 'LloydVEVO',
566 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/LloydVEVO',
567 'upload_date': '20110629',
568 'license': 'Standard YouTube License',
569 'age_limit': 18,
570 },
571 },
572 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
573 {
574 'url': '__2ABJjxzNo',
575 'info_dict': {
576 'id': '__2ABJjxzNo',
577 'ext': 'mp4',
578 'upload_date': '20100430',
579 'uploader_id': 'deadmau5',
580 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/deadmau5',
581 'creator': 'deadmau5',
582 'description': 'md5:12c56784b8032162bb936a5f76d55360',
583 'uploader': 'deadmau5',
584 'license': 'Standard YouTube License',
585 'title': 'Deadmau5 - Some Chords (HD)',
586 'alt_title': 'Some Chords',
587 },
588 'expected_warnings': [
589 'DASH manifest missing',
590 ]
591 },
592 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
593 {
594 'url': 'lqQg6PlCWgI',
595 'info_dict': {
596 'id': 'lqQg6PlCWgI',
597 'ext': 'mp4',
598 'upload_date': '20150827',
599 'uploader_id': 'olympic',
600 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/olympic',
601 'license': 'Standard YouTube License',
602 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
603 'uploader': 'Olympic',
604 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
605 },
606 'params': {
607 'skip_download': 'requires avconv',
608 }
609 },
610 # Non-square pixels
611 {
612 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
613 'info_dict': {
614 'id': '_b-2C3KPAM0',
615 'ext': 'mp4',
616 'stretched_ratio': 16 / 9.,
617 'upload_date': '20110310',
618 'uploader_id': 'AllenMeow',
619 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
620 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
621 'uploader': '孫艾倫',
622 'license': 'Standard YouTube License',
623 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
624 },
625 },
626 # url_encoded_fmt_stream_map is empty string
627 {
628 'url': 'qEJwOuvDf7I',
629 'info_dict': {
630 'id': 'qEJwOuvDf7I',
631 'ext': 'webm',
632 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
633 'description': '',
634 'upload_date': '20150404',
635 'uploader_id': 'spbelect',
636 'uploader': 'Наблюдатели Петербурга',
637 },
638 'params': {
639 'skip_download': 'requires avconv',
640 },
641 'skip': 'This live event has ended.',
642 },
643 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
644 {
645 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
646 'info_dict': {
647 'id': 'FIl7x6_3R5Y',
648 'ext': 'mp4',
649 'title': 'md5:7b81415841e02ecd4313668cde88737a',
650 'description': 'md5:116377fd2963b81ec4ce64b542173306',
651 'upload_date': '20150625',
652 'uploader_id': 'dorappi2000',
653 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
654 'uploader': 'dorappi2000',
655 'license': 'Standard YouTube License',
656 'formats': 'mincount:32',
657 },
658 },
659 # DASH manifest with segment_list
660 {
661 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
662 'md5': '8ce563a1d667b599d21064e982ab9e31',
663 'info_dict': {
664 'id': 'CsmdDsKjzN8',
665 'ext': 'mp4',
666 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
667 'uploader': 'Airtek',
668 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
669 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
670 'license': 'Standard YouTube License',
671 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
672 },
673 'params': {
674 'youtube_include_dash_manifest': True,
675 'format': '135', # bestvideo
676 },
677 'skip': 'This live event has ended.',
678 },
679 {
680 # Multifeed videos (multiple cameras), URL is for Main Camera
681 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
682 'info_dict': {
683 'id': 'jqWvoWXjCVs',
684 'title': 'teamPGP: Rocket League Noob Stream',
685 'description': 'md5:dc7872fb300e143831327f1bae3af010',
686 },
687 'playlist': [{
688 'info_dict': {
689 'id': 'jqWvoWXjCVs',
690 'ext': 'mp4',
691 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
692 'description': 'md5:dc7872fb300e143831327f1bae3af010',
693 'upload_date': '20150721',
694 'uploader': 'Beer Games Beer',
695 'uploader_id': 'beergamesbeer',
696 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
697 'license': 'Standard YouTube License',
698 },
699 }, {
700 'info_dict': {
701 'id': '6h8e8xoXJzg',
702 'ext': 'mp4',
703 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
704 'description': 'md5:dc7872fb300e143831327f1bae3af010',
705 'upload_date': '20150721',
706 'uploader': 'Beer Games Beer',
707 'uploader_id': 'beergamesbeer',
708 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
709 'license': 'Standard YouTube License',
710 },
711 }, {
712 'info_dict': {
713 'id': 'PUOgX5z9xZw',
714 'ext': 'mp4',
715 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
716 'description': 'md5:dc7872fb300e143831327f1bae3af010',
717 'upload_date': '20150721',
718 'uploader': 'Beer Games Beer',
719 'uploader_id': 'beergamesbeer',
720 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
721 'license': 'Standard YouTube License',
722 },
723 }, {
724 'info_dict': {
725 'id': 'teuwxikvS5k',
726 'ext': 'mp4',
727 'title': 'teamPGP: Rocket League Noob Stream (zim)',
728 'description': 'md5:dc7872fb300e143831327f1bae3af010',
729 'upload_date': '20150721',
730 'uploader': 'Beer Games Beer',
731 'uploader_id': 'beergamesbeer',
732 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/beergamesbeer',
733 'license': 'Standard YouTube License',
734 },
735 }],
736 'params': {
737 'skip_download': True,
738 },
739 },
740 {
741 # Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
742 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
743 'info_dict': {
744 'id': 'gVfLd0zydlo',
745 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
746 },
747 'playlist_count': 2,
748 'skip': 'Not multifeed anymore',
749 },
750 {
751 'url': 'http://vid.plus/FlRa-iH7PGw',
752 'only_matching': True,
753 },
754 {
755 'url': 'http://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
756 'only_matching': True,
757 },
758 {
759 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
760 # Also tests cut-off URL expansion in video description (see
761 # https://github.com/rg3/youtube-dl/issues/1892,
762 # https://github.com/rg3/youtube-dl/issues/8164)
763 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
764 'info_dict': {
765 'id': 'lsguqyKfVQg',
766 'ext': 'mp4',
767 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
768 'alt_title': 'Dark Walk',
769 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
770 'upload_date': '20151119',
771 'uploader_id': 'IronSoulElf',
772 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
773 'uploader': 'IronSoulElf',
774 'license': 'Standard YouTube License',
775 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
776 },
777 'params': {
778 'skip_download': True,
779 },
780 },
781 {
782 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
783 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
784 'only_matching': True,
785 },
786 {
787 # Video with yt:stretch=17:0
788 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
789 'info_dict': {
790 'id': 'Q39EVAstoRM',
791 'ext': 'mp4',
792 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
793 'description': 'md5:ee18a25c350637c8faff806845bddee9',
794 'upload_date': '20151107',
795 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
796 'uploader': 'CH GAMER DROID',
797 },
798 'params': {
799 'skip_download': True,
800 },
801 'skip': 'This video does not exist.',
802 },
803 {
804 # Video licensed under Creative Commons
805 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
806 'info_dict': {
807 'id': 'M4gD1WSo5mA',
808 'ext': 'mp4',
809 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
810 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
811 'upload_date': '20150127',
812 'uploader_id': 'BerkmanCenter',
813 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
814 'uploader': 'BerkmanCenter',
815 'license': 'Creative Commons Attribution license (reuse allowed)',
816 },
817 'params': {
818 'skip_download': True,
819 },
820 },
821 {
822 # Channel-like uploader_url
823 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
824 'info_dict': {
825 'id': 'eQcmzGIKrzg',
826 'ext': 'mp4',
827 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
828 'description': 'md5:dda0d780d5a6e120758d1711d062a867',
829 'upload_date': '20151119',
830 'uploader': 'Bernie 2016',
831 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
832 'uploader_url': 're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
833 'license': 'Creative Commons Attribution license (reuse allowed)',
834 },
835 'params': {
836 'skip_download': True,
837 },
838 },
839 {
840 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
841 'only_matching': True,
842 },
843 {
844 # YouTube Red paid video (https://github.com/rg3/youtube-dl/issues/10059)
845 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
846 'only_matching': True,
847 }
848 ]
849
850 def __init__(self, *args, **kwargs):
851 super(YoutubeIE, self).__init__(*args, **kwargs)
852 self._player_cache = {}
853
854 def report_video_info_webpage_download(self, video_id):
855 """Report attempt to download video info webpage."""
856 self.to_screen('%s: Downloading video info webpage' % video_id)
857
858 def report_information_extraction(self, video_id):
859 """Report attempt to extract video information."""
860 self.to_screen('%s: Extracting video information' % video_id)
861
862 def report_unavailable_format(self, video_id, format):
863 """Report extracted video URL."""
864 self.to_screen('%s: Format %s not available' % (video_id, format))
865
866 def report_rtmp_download(self):
867 """Indicate the download will use the RTMP protocol."""
868 self.to_screen('RTMP download detected')
869
870 def _signature_cache_id(self, example_sig):
871 """ Return a string representation of a signature """
872 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
873
874 def _extract_signature_function(self, video_id, player_url, example_sig):
875 id_m = re.match(
876 r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
877 player_url)
878 if not id_m:
879 raise ExtractorError('Cannot identify player %r' % player_url)
880 player_type = id_m.group('ext')
881 player_id = id_m.group('id')
882
883 # Read from filesystem cache
884 func_id = '%s_%s_%s' % (
885 player_type, player_id, self._signature_cache_id(example_sig))
886 assert os.path.basename(func_id) == func_id
887
888 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
889 if cache_spec is not None:
890 return lambda s: ''.join(s[i] for i in cache_spec)
891
892 download_note = (
893 'Downloading player %s' % player_url
894 if self._downloader.params.get('verbose') else
895 'Downloading %s player %s' % (player_type, player_id)
896 )
897 if player_type == 'js':
898 code = self._download_webpage(
899 player_url, video_id,
900 note=download_note,
901 errnote='Download of %s failed' % player_url)
902 res = self._parse_sig_js(code)
903 elif player_type == 'swf':
904 urlh = self._request_webpage(
905 player_url, video_id,
906 note=download_note,
907 errnote='Download of %s failed' % player_url)
908 code = urlh.read()
909 res = self._parse_sig_swf(code)
910 else:
911 assert False, 'Invalid player type %r' % player_type
912
913 test_string = ''.join(map(compat_chr, range(len(example_sig))))
914 cache_res = res(test_string)
915 cache_spec = [ord(c) for c in cache_res]
916
917 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
918 return res
919
920 def _print_sig_code(self, func, example_sig):
921 def gen_sig_code(idxs):
922 def _genslice(start, end, step):
923 starts = '' if start == 0 else str(start)
924 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
925 steps = '' if step == 1 else (':%d' % step)
926 return 's[%s%s%s]' % (starts, ends, steps)
927
928 step = None
929 # Quelch pyflakes warnings - start will be set when step is set
930 start = '(Never used)'
931 for i, prev in zip(idxs[1:], idxs[:-1]):
932 if step is not None:
933 if i - prev == step:
934 continue
935 yield _genslice(start, prev, step)
936 step = None
937 continue
938 if i - prev in [-1, 1]:
939 step = i - prev
940 start = prev
941 continue
942 else:
943 yield 's[%d]' % prev
944 if step is None:
945 yield 's[%d]' % i
946 else:
947 yield _genslice(start, i, step)
948
949 test_string = ''.join(map(compat_chr, range(len(example_sig))))
950 cache_res = func(test_string)
951 cache_spec = [ord(c) for c in cache_res]
952 expr_code = ' + '.join(gen_sig_code(cache_spec))
953 signature_id_tuple = '(%s)' % (
954 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
955 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
956 ' return %s\n') % (signature_id_tuple, expr_code)
957 self.to_screen('Extracted signature function:\n' + code)
958
959 def _parse_sig_js(self, jscode):
960 funcname = self._search_regex(
961 r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
962 'Initial JS player signature function name')
963
964 jsi = JSInterpreter(jscode)
965 initial_function = jsi.extract_function(funcname)
966 return lambda s: initial_function([s])
967
968 def _parse_sig_swf(self, file_contents):
969 swfi = SWFInterpreter(file_contents)
970 TARGET_CLASSNAME = 'SignatureDecipher'
971 searched_class = swfi.extract_class(TARGET_CLASSNAME)
972 initial_function = swfi.extract_function(searched_class, 'decipher')
973 return lambda s: initial_function([s])
974
975 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
976 """Turn the encrypted s field into a working signature"""
977
978 if player_url is None:
979 raise ExtractorError('Cannot decrypt signature without player_url')
980
981 if player_url.startswith('//'):
982 player_url = 'https:' + player_url
983 try:
984 player_id = (player_url, self._signature_cache_id(s))
985 if player_id not in self._player_cache:
986 func = self._extract_signature_function(
987 video_id, player_url, s
988 )
989 self._player_cache[player_id] = func
990 func = self._player_cache[player_id]
991 if self._downloader.params.get('youtube_print_sig_code'):
992 self._print_sig_code(func, s)
993 return func(s)
994 except Exception as e:
995 tb = traceback.format_exc()
996 raise ExtractorError(
997 'Signature extraction failed: ' + tb, cause=e)
998
999 def _get_subtitles(self, video_id, webpage):
1000 try:
1001 subs_doc = self._download_xml(
1002 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
1003 video_id, note=False)
1004 except ExtractorError as err:
1005 self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
1006 return {}
1007
1008 sub_lang_list = {}
1009 for track in subs_doc.findall('track'):
1010 lang = track.attrib['lang_code']
1011 if lang in sub_lang_list:
1012 continue
1013 sub_formats = []
1014 for ext in self._SUBTITLE_FORMATS:
1015 params = compat_urllib_parse_urlencode({
1016 'lang': lang,
1017 'v': video_id,
1018 'fmt': ext,
1019 'name': track.attrib['name'].encode('utf-8'),
1020 })
1021 sub_formats.append({
1022 'url': 'https://www.youtube.com/api/timedtext?' + params,
1023 'ext': ext,
1024 })
1025 sub_lang_list[lang] = sub_formats
1026 if not sub_lang_list:
1027 self._downloader.report_warning('video doesn\'t have subtitles')
1028 return {}
1029 return sub_lang_list
1030
1031 def _get_ytplayer_config(self, video_id, webpage):
1032 patterns = (
1033 # User data may contain arbitrary character sequences that may affect
1034 # JSON extraction with regex, e.g. when '};' is contained the second
1035 # regex won't capture the whole JSON. Yet working around by trying more
1036 # concrete regex first keeping in mind proper quoted string handling
1037 # to be implemented in future that will replace this workaround (see
1038 # https://github.com/rg3/youtube-dl/issues/7468,
1039 # https://github.com/rg3/youtube-dl/pull/7599)
1040 r';ytplayer\.config\s*=\s*({.+?});ytplayer',
1041 r';ytplayer\.config\s*=\s*({.+?});',
1042 )
1043 config = self._search_regex(
1044 patterns, webpage, 'ytplayer.config', default=None)
1045 if config:
1046 return self._parse_json(
1047 uppercase_escape(config), video_id, fatal=False)
1048
1049 def _get_automatic_captions(self, video_id, webpage):
1050 """We need the webpage for getting the captions url, pass it as an
1051 argument to speed up the process."""
1052 self.to_screen('%s: Looking for automatic captions' % video_id)
1053 player_config = self._get_ytplayer_config(video_id, webpage)
1054 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
1055 if not player_config:
1056 self._downloader.report_warning(err_msg)
1057 return {}
1058 try:
1059 args = player_config['args']
1060 caption_url = args.get('ttsurl')
1061 if caption_url:
1062 timestamp = args['timestamp']
1063 # We get the available subtitles
1064 list_params = compat_urllib_parse_urlencode({
1065 'type': 'list',
1066 'tlangs': 1,
1067 'asrs': 1,
1068 })
1069 list_url = caption_url + '&' + list_params
1070 caption_list = self._download_xml(list_url, video_id)
1071 original_lang_node = caption_list.find('track')
1072 if original_lang_node is None:
1073 self._downloader.report_warning('Video doesn\'t have automatic captions')
1074 return {}
1075 original_lang = original_lang_node.attrib['lang_code']
1076 caption_kind = original_lang_node.attrib.get('kind', '')
1077
1078 sub_lang_list = {}
1079 for lang_node in caption_list.findall('target'):
1080 sub_lang = lang_node.attrib['lang_code']
1081 sub_formats = []
1082 for ext in self._SUBTITLE_FORMATS:
1083 params = compat_urllib_parse_urlencode({
1084 'lang': original_lang,
1085 'tlang': sub_lang,
1086 'fmt': ext,
1087 'ts': timestamp,
1088 'kind': caption_kind,
1089 })
1090 sub_formats.append({
1091 'url': caption_url + '&' + params,
1092 'ext': ext,
1093 })
1094 sub_lang_list[sub_lang] = sub_formats
1095 return sub_lang_list
1096
1097 # Some videos don't provide ttsurl but rather caption_tracks and
1098 # caption_translation_languages (e.g. 20LmZk1hakA)
1099 caption_tracks = args['caption_tracks']
1100 caption_translation_languages = args['caption_translation_languages']
1101 caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
1102 parsed_caption_url = compat_urllib_parse_urlparse(caption_url)
1103 caption_qs = compat_parse_qs(parsed_caption_url.query)
1104
1105 sub_lang_list = {}
1106 for lang in caption_translation_languages.split(','):
1107 lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
1108 sub_lang = lang_qs.get('lc', [None])[0]
1109 if not sub_lang:
1110 continue
1111 sub_formats = []
1112 for ext in self._SUBTITLE_FORMATS:
1113 caption_qs.update({
1114 'tlang': [sub_lang],
1115 'fmt': [ext],
1116 })
1117 sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
1118 query=compat_urllib_parse_urlencode(caption_qs, True)))
1119 sub_formats.append({
1120 'url': sub_url,
1121 'ext': ext,
1122 })
1123 sub_lang_list[sub_lang] = sub_formats
1124 return sub_lang_list
1125 # An extractor error can be raise by the download process if there are
1126 # no automatic captions but there are subtitles
1127 except (KeyError, ExtractorError):
1128 self._downloader.report_warning(err_msg)
1129 return {}
1130
1131 def _mark_watched(self, video_id, video_info):
1132 playback_url = video_info.get('videostats_playback_base_url', [None])[0]
1133 if not playback_url:
1134 return
1135 parsed_playback_url = compat_urlparse.urlparse(playback_url)
1136 qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1137
1138 # cpn generation algorithm is reverse engineered from base.js.
1139 # In fact it works even with dummy cpn.
1140 CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1141 cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1142
1143 qs.update({
1144 'ver': ['2'],
1145 'cpn': [cpn],
1146 })
1147 playback_url = compat_urlparse.urlunparse(
1148 parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1149
1150 self._download_webpage(
1151 playback_url, video_id, 'Marking watched',
1152 'Unable to mark watched', fatal=False)
1153
1154 @classmethod
1155 def extract_id(cls, url):
1156 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1157 if mobj is None:
1158 raise ExtractorError('Invalid URL: %s' % url)
1159 video_id = mobj.group(2)
1160 return video_id
1161
1162 def _extract_from_m3u8(self, manifest_url, video_id):
1163 url_map = {}
1164
1165 def _get_urls(_manifest):
1166 lines = _manifest.split('\n')
1167 urls = filter(lambda l: l and not l.startswith('#'),
1168 lines)
1169 return urls
1170 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
1171 formats_urls = _get_urls(manifest)
1172 for format_url in formats_urls:
1173 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
1174 url_map[itag] = format_url
1175 return url_map
1176
1177 def _extract_annotations(self, video_id):
1178 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
1179 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
1180
1181 def _real_extract(self, url):
1182 url, smuggled_data = unsmuggle_url(url, {})
1183
1184 proto = (
1185 'http' if self._downloader.params.get('prefer_insecure', False)
1186 else 'https')
1187
1188 start_time = None
1189 end_time = None
1190 parsed_url = compat_urllib_parse_urlparse(url)
1191 for component in [parsed_url.fragment, parsed_url.query]:
1192 query = compat_parse_qs(component)
1193 if start_time is None and 't' in query:
1194 start_time = parse_duration(query['t'][0])
1195 if start_time is None and 'start' in query:
1196 start_time = parse_duration(query['start'][0])
1197 if end_time is None and 'end' in query:
1198 end_time = parse_duration(query['end'][0])
1199
1200 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1201 mobj = re.search(self._NEXT_URL_RE, url)
1202 if mobj:
1203 url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1204 video_id = self.extract_id(url)
1205
1206 # Get video webpage
1207 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1208 video_webpage = self._download_webpage(url, video_id)
1209
1210 # Attempt to extract SWF player URL
1211 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1212 if mobj is not None:
1213 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1214 else:
1215 player_url = None
1216
1217 dash_mpds = []
1218
1219 def add_dash_mpd(video_info):
1220 dash_mpd = video_info.get('dashmpd')
1221 if dash_mpd and dash_mpd[0] not in dash_mpds:
1222 dash_mpds.append(dash_mpd[0])
1223
1224 # Get video info
1225 embed_webpage = None
1226 is_live = None
1227 if re.search(r'player-age-gate-content">', video_webpage) is not None:
1228 age_gate = True
1229 # We simulate the access to the video from www.youtube.com/v/{video_id}
1230 # this can be viewed without login into Youtube
1231 url = proto + '://www.youtube.com/embed/%s' % video_id
1232 embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1233 data = compat_urllib_parse_urlencode({
1234 'video_id': video_id,
1235 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1236 'sts': self._search_regex(
1237 r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1238 })
1239 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1240 video_info_webpage = self._download_webpage(
1241 video_info_url, video_id,
1242 note='Refetching age-gated info webpage',
1243 errnote='unable to download video info webpage')
1244 video_info = compat_parse_qs(video_info_webpage)
1245 add_dash_mpd(video_info)
1246 else:
1247 age_gate = False
1248 video_info = None
1249 # Try looking directly into the video webpage
1250 ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1251 if ytplayer_config:
1252 args = ytplayer_config['args']
1253 if args.get('url_encoded_fmt_stream_map'):
1254 # Convert to the same format returned by compat_parse_qs
1255 video_info = dict((k, [v]) for k, v in args.items())
1256 add_dash_mpd(video_info)
1257 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1258 is_live = True
1259 if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1260 # We also try looking in get_video_info since it may contain different dashmpd
1261 # URL that points to a DASH manifest with possibly different itag set (some itags
1262 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1263 # manifest pointed by get_video_info's dashmpd).
1264 # The general idea is to take a union of itags of both DASH manifests (for example
1265 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1266 self.report_video_info_webpage_download(video_id)
1267 for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1268 video_info_url = (
1269 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1270 % (proto, video_id, el_type))
1271 video_info_webpage = self._download_webpage(
1272 video_info_url,
1273 video_id, note=False,
1274 errnote='unable to download video info webpage')
1275 get_video_info = compat_parse_qs(video_info_webpage)
1276 if get_video_info.get('use_cipher_signature') != ['True']:
1277 add_dash_mpd(get_video_info)
1278 if not video_info:
1279 video_info = get_video_info
1280 if 'token' in get_video_info:
1281 # Different get_video_info requests may report different results, e.g.
1282 # some may report video unavailability, but some may serve it without
1283 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1284 # the original webpage as well as el=info and el=embedded get_video_info
1285 # requests report video unavailability due to geo restriction while
1286 # el=detailpage succeeds and returns valid data). This is probably
1287 # due to YouTube measures against IP ranges of hosting providers.
1288 # Working around by preferring the first succeeded video_info containing
1289 # the token if no such video_info yet was found.
1290 if 'token' not in video_info:
1291 video_info = get_video_info
1292 break
1293 if 'token' not in video_info:
1294 if 'reason' in video_info:
1295 if 'The uploader has not made this video available in your country.' in video_info['reason']:
1296 regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
1297 if regions_allowed:
1298 raise ExtractorError('YouTube said: This video is available in %s only' % (
1299 ', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
1300 expected=True)
1301 raise ExtractorError(
1302 'YouTube said: %s' % video_info['reason'][0],
1303 expected=True, video_id=video_id)
1304 else:
1305 raise ExtractorError(
1306 '"token" parameter not in video info for unknown reason',
1307 video_id=video_id)
1308
1309 # title
1310 if 'title' in video_info:
1311 video_title = video_info['title'][0]
1312 else:
1313 self._downloader.report_warning('Unable to extract video title')
1314 video_title = '_'
1315
1316 # description
1317 video_description = get_element_by_id("eow-description", video_webpage)
1318 if video_description:
1319 video_description = re.sub(r'''(?x)
1320 <a\s+
1321 (?:[a-zA-Z-]+="[^"]*"\s+)*?
1322 (?:title|href)="([^"]+)"\s+
1323 (?:[a-zA-Z-]+="[^"]*"\s+)*?
1324 class="[^"]*"[^>]*>
1325 [^<]+\.{3}\s*
1326 </a>
1327 ''', r'\1', video_description)
1328 video_description = clean_html(video_description)
1329 else:
1330 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
1331 if fd_mobj:
1332 video_description = unescapeHTML(fd_mobj.group(1))
1333 else:
1334 video_description = ''
1335
1336 if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
1337 if not self._downloader.params.get('noplaylist'):
1338 entries = []
1339 feed_ids = []
1340 multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
1341 for feed in multifeed_metadata_list.split(','):
1342 # Unquote should take place before split on comma (,) since textual
1343 # fields may contain comma as well (see
1344 # https://github.com/rg3/youtube-dl/issues/8536)
1345 feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
1346 entries.append({
1347 '_type': 'url_transparent',
1348 'ie_key': 'Youtube',
1349 'url': smuggle_url(
1350 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1351 {'force_singlefeed': True}),
1352 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1353 })
1354 feed_ids.append(feed_data['id'][0])
1355 self.to_screen(
1356 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1357 % (', '.join(feed_ids), video_id))
1358 return self.playlist_result(entries, video_id, video_title, video_description)
1359 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1360
1361 if 'view_count' in video_info:
1362 view_count = int(video_info['view_count'][0])
1363 else:
1364 view_count = None
1365
1366 # Check for "rental" videos
1367 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1368 raise ExtractorError('"rental" videos not supported')
1369
1370 # Start extracting information
1371 self.report_information_extraction(video_id)
1372
1373 # uploader
1374 if 'author' not in video_info:
1375 raise ExtractorError('Unable to extract uploader name')
1376 video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
1377
1378 # uploader_id
1379 video_uploader_id = None
1380 video_uploader_url = None
1381 mobj = re.search(
1382 r'<link itemprop="url" href="(?P<uploader_url>https?://www.youtube.com/(?:user|channel)/(?P<uploader_id>[^"]+))">',
1383 video_webpage)
1384 if mobj is not None:
1385 video_uploader_id = mobj.group('uploader_id')
1386 video_uploader_url = mobj.group('uploader_url')
1387 else:
1388 self._downloader.report_warning('unable to extract uploader nickname')
1389
1390 # thumbnail image
1391 # We try first to get a high quality image:
1392 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
1393 video_webpage, re.DOTALL)
1394 if m_thumb is not None:
1395 video_thumbnail = m_thumb.group(1)
1396 elif 'thumbnail_url' not in video_info:
1397 self._downloader.report_warning('unable to extract video thumbnail')
1398 video_thumbnail = None
1399 else: # don't panic if we can't find it
1400 video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
1401
1402 # upload date
1403 upload_date = self._html_search_meta(
1404 'datePublished', video_webpage, 'upload date', default=None)
1405 if not upload_date:
1406 upload_date = self._search_regex(
1407 [r'(?s)id="eow-date.*?>(.*?)</span>',
1408 r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
1409 video_webpage, 'upload date', default=None)
1410 if upload_date:
1411 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1412 upload_date = unified_strdate(upload_date)
1413
1414 video_license = self._html_search_regex(
1415 r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
1416 video_webpage, 'license', default=None)
1417
1418 m_music = re.search(
1419 r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li',
1420 video_webpage)
1421 if m_music:
1422 video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
1423 video_creator = clean_html(m_music.group('creator'))
1424 else:
1425 video_alt_title = video_creator = None
1426
1427 m_cat_container = self._search_regex(
1428 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
1429 video_webpage, 'categories', default=None)
1430 if m_cat_container:
1431 category = self._html_search_regex(
1432 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
1433 default=None)
1434 video_categories = None if category is None else [category]
1435 else:
1436 video_categories = None
1437
1438 video_tags = [
1439 unescapeHTML(m.group('content'))
1440 for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
1441
1442 def _extract_count(count_name):
1443 return str_to_int(self._search_regex(
1444 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
1445 % re.escape(count_name),
1446 video_webpage, count_name, default=None))
1447
1448 like_count = _extract_count('like')
1449 dislike_count = _extract_count('dislike')
1450
1451 # subtitles
1452 video_subtitles = self.extract_subtitles(video_id, video_webpage)
1453 automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
1454
1455 if 'length_seconds' not in video_info:
1456 self._downloader.report_warning('unable to extract video duration')
1457 video_duration = None
1458 else:
1459 video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
1460
1461 # annotations
1462 video_annotations = None
1463 if self._downloader.params.get('writeannotations', False):
1464 video_annotations = self._extract_annotations(video_id)
1465
1466 def _map_to_format_list(urlmap):
1467 formats = []
1468 for itag, video_real_url in urlmap.items():
1469 dct = {
1470 'format_id': itag,
1471 'url': video_real_url,
1472 'player_url': player_url,
1473 }
1474 if itag in self._formats:
1475 dct.update(self._formats[itag])
1476 formats.append(dct)
1477 return formats
1478
1479 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1480 self.report_rtmp_download()
1481 formats = [{
1482 'format_id': '_rtmp',
1483 'protocol': 'rtmp',
1484 'url': video_info['conn'][0],
1485 'player_url': player_url,
1486 }]
1487 elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
1488 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1489 if 'rtmpe%3Dyes' in encoded_url_map:
1490 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
1491 formats_spec = {}
1492 fmt_list = video_info.get('fmt_list', [''])[0]
1493 if fmt_list:
1494 for fmt in fmt_list.split(','):
1495 spec = fmt.split('/')
1496 if len(spec) > 1:
1497 width_height = spec[1].split('x')
1498 if len(width_height) == 2:
1499 formats_spec[spec[0]] = {
1500 'resolution': spec[1],
1501 'width': int_or_none(width_height[0]),
1502 'height': int_or_none(width_height[1]),
1503 }
1504 formats = []
1505 for url_data_str in encoded_url_map.split(','):
1506 url_data = compat_parse_qs(url_data_str)
1507 if 'itag' not in url_data or 'url' not in url_data:
1508 continue
1509 format_id = url_data['itag'][0]
1510 url = url_data['url'][0]
1511
1512 if 'sig' in url_data:
1513 url += '&signature=' + url_data['sig'][0]
1514 elif 's' in url_data:
1515 encrypted_sig = url_data['s'][0]
1516 ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
1517
1518 jsplayer_url_json = self._search_regex(
1519 ASSETS_RE,
1520 embed_webpage if age_gate else video_webpage,
1521 'JS player URL (1)', default=None)
1522 if not jsplayer_url_json and not age_gate:
1523 # We need the embed website after all
1524 if embed_webpage is None:
1525 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
1526 embed_webpage = self._download_webpage(
1527 embed_url, video_id, 'Downloading embed webpage')
1528 jsplayer_url_json = self._search_regex(
1529 ASSETS_RE, embed_webpage, 'JS player URL')
1530
1531 player_url = json.loads(jsplayer_url_json)
1532 if player_url is None:
1533 player_url_json = self._search_regex(
1534 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
1535 video_webpage, 'age gate player URL')
1536 player_url = json.loads(player_url_json)
1537
1538 if self._downloader.params.get('verbose'):
1539 if player_url is None:
1540 player_version = 'unknown'
1541 player_desc = 'unknown'
1542 else:
1543 if player_url.endswith('swf'):
1544 player_version = self._search_regex(
1545 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
1546 'flash player', fatal=False)
1547 player_desc = 'flash player %s' % player_version
1548 else:
1549 player_version = self._search_regex(
1550 [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
1551 player_url,
1552 'html5 player', fatal=False)
1553 player_desc = 'html5 player %s' % player_version
1554
1555 parts_sizes = self._signature_cache_id(encrypted_sig)
1556 self.to_screen('{%s} signature length %s, %s' %
1557 (format_id, parts_sizes, player_desc))
1558
1559 signature = self._decrypt_signature(
1560 encrypted_sig, video_id, player_url, age_gate)
1561 url += '&signature=' + signature
1562 if 'ratebypass' not in url:
1563 url += '&ratebypass=yes'
1564
1565 dct = {
1566 'format_id': format_id,
1567 'url': url,
1568 'player_url': player_url,
1569 }
1570 if format_id in self._formats:
1571 dct.update(self._formats[format_id])
1572 if format_id in formats_spec:
1573 dct.update(formats_spec[format_id])
1574
1575 # Some itags are not included in DASH manifest thus corresponding formats will
1576 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1577 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1578 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
1579 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
1580
1581 more_fields = {
1582 'filesize': int_or_none(url_data.get('clen', [None])[0]),
1583 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
1584 'width': width,
1585 'height': height,
1586 'fps': int_or_none(url_data.get('fps', [None])[0]),
1587 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
1588 }
1589 for key, value in more_fields.items():
1590 if value:
1591 dct[key] = value
1592 type_ = url_data.get('type', [None])[0]
1593 if type_:
1594 type_split = type_.split(';')
1595 kind_ext = type_split[0].split('/')
1596 if len(kind_ext) == 2:
1597 kind, _ = kind_ext
1598 dct['ext'] = mimetype2ext(type_split[0])
1599 if kind in ('audio', 'video'):
1600 codecs = None
1601 for mobj in re.finditer(
1602 r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
1603 if mobj.group('key') == 'codecs':
1604 codecs = mobj.group('val')
1605 break
1606 if codecs:
1607 codecs = codecs.split(',')
1608 if len(codecs) == 2:
1609 acodec, vcodec = codecs[1], codecs[0]
1610 else:
1611 acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
1612 dct.update({
1613 'acodec': acodec,
1614 'vcodec': vcodec,
1615 })
1616 formats.append(dct)
1617 elif video_info.get('hlsvp'):
1618 manifest_url = video_info['hlsvp'][0]
1619 url_map = self._extract_from_m3u8(manifest_url, video_id)
1620 formats = _map_to_format_list(url_map)
1621 # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
1622 for a_format in formats:
1623 a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
1624 else:
1625 unavailable_message = self._html_search_regex(
1626 r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
1627 video_webpage, 'unavailable message', default=None)
1628 if unavailable_message:
1629 raise ExtractorError(unavailable_message, expected=True)
1630 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
1631
1632 # Look for the DASH manifest
1633 if self._downloader.params.get('youtube_include_dash_manifest', True):
1634 dash_mpd_fatal = True
1635 for mpd_url in dash_mpds:
1636 dash_formats = {}
1637 try:
1638 def decrypt_sig(mobj):
1639 s = mobj.group(1)
1640 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
1641 return '/signature/%s' % dec_s
1642
1643 mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
1644
1645 for df in self._extract_mpd_formats(
1646 mpd_url, video_id, fatal=dash_mpd_fatal,
1647 formats_dict=self._formats):
1648 # Do not overwrite DASH format found in some previous DASH manifest
1649 if df['format_id'] not in dash_formats:
1650 dash_formats[df['format_id']] = df
1651 # Additional DASH manifests may end up in HTTP Error 403 therefore
1652 # allow them to fail without bug report message if we already have
1653 # some DASH manifest succeeded. This is temporary workaround to reduce
1654 # burst of bug reports until we figure out the reason and whether it
1655 # can be fixed at all.
1656 dash_mpd_fatal = False
1657 except (ExtractorError, KeyError) as e:
1658 self.report_warning(
1659 'Skipping DASH manifest: %r' % e, video_id)
1660 if dash_formats:
1661 # Remove the formats we found through non-DASH, they
1662 # contain less info and it can be wrong, because we use
1663 # fixed values (for example the resolution). See
1664 # https://github.com/rg3/youtube-dl/issues/5774 for an
1665 # example.
1666 formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
1667 formats.extend(dash_formats.values())
1668
1669 # Check for malformed aspect ratio
1670 stretched_m = re.search(
1671 r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
1672 video_webpage)
1673 if stretched_m:
1674 w = float(stretched_m.group('w'))
1675 h = float(stretched_m.group('h'))
1676 # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
1677 # We will only process correct ratios.
1678 if w > 0 and h > 0:
1679 ratio = w / h
1680 for f in formats:
1681 if f.get('vcodec') != 'none':
1682 f['stretched_ratio'] = ratio
1683
1684 self._sort_formats(formats)
1685
1686 self.mark_watched(video_id, video_info)
1687
1688 return {
1689 'id': video_id,
1690 'uploader': video_uploader,
1691 'uploader_id': video_uploader_id,
1692 'uploader_url': video_uploader_url,
1693 'upload_date': upload_date,
1694 'license': video_license,
1695 'creator': video_creator,
1696 'title': video_title,
1697 'alt_title': video_alt_title,
1698 'thumbnail': video_thumbnail,
1699 'description': video_description,
1700 'categories': video_categories,
1701 'tags': video_tags,
1702 'subtitles': video_subtitles,
1703 'automatic_captions': automatic_captions,
1704 'duration': video_duration,
1705 'age_limit': 18 if age_gate else 0,
1706 'annotations': video_annotations,
1707 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1708 'view_count': view_count,
1709 'like_count': like_count,
1710 'dislike_count': dislike_count,
1711 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
1712 'formats': formats,
1713 'is_live': is_live,
1714 'start_time': start_time,
1715 'end_time': end_time,
1716 }
1717
1718
1719 class YoutubeSharedVideoIE(InfoExtractor):
1720 _VALID_URL = r'(?:https?:)?//(?:www\.)?youtube\.com/shared\?.*\bci=(?P<id>[0-9A-Za-z_-]{11})'
1721 IE_NAME = 'youtube:shared'
1722
1723 _TEST = {
1724 'url': 'https://www.youtube.com/shared?ci=1nEzmT-M4fU',
1725 'info_dict': {
1726 'id': 'uPDB5I9wfp8',
1727 'ext': 'webm',
1728 'title': 'Pocoyo: 90 minutos de episódios completos Português para crianças - PARTE 3',
1729 'description': 'md5:d9e4d9346a2dfff4c7dc4c8cec0f546d',
1730 'upload_date': '20160219',
1731 'uploader': 'Pocoyo - Português (BR)',
1732 'uploader_id': 'PocoyoBrazil',
1733 },
1734 'add_ie': ['Youtube'],
1735 'params': {
1736 # There are already too many Youtube downloads
1737 'skip_download': True,
1738 },
1739 }
1740
1741 def _real_extract(self, url):
1742 video_id = self._match_id(url)
1743
1744 webpage = self._download_webpage(url, video_id)
1745
1746 real_video_id = self._html_search_meta(
1747 'videoId', webpage, 'YouTube video id', fatal=True)
1748
1749 return self.url_result(real_video_id, YoutubeIE.ie_key())
1750
1751
1752 class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
1753 IE_DESC = 'YouTube.com playlists'
1754 _VALID_URL = r"""(?x)(?:
1755 (?:https?://)?
1756 (?:\w+\.)?
1757 youtube\.com/
1758 (?:
1759 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
1760 \? (?:.*?[&;])*? (?:p|a|list)=
1761 | p/
1762 )
1763 (
1764 (?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
1765 # Top tracks, they can also include dots
1766 |(?:MC)[\w\.]*
1767 )
1768 .*
1769 |
1770 ((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
1771 )"""
1772 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
1773 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
1774 IE_NAME = 'youtube:playlist'
1775 _TESTS = [{
1776 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1777 'info_dict': {
1778 'title': 'ytdl test PL',
1779 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1780 },
1781 'playlist_count': 3,
1782 }, {
1783 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1784 'info_dict': {
1785 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1786 'title': 'YDL_Empty_List',
1787 },
1788 'playlist_count': 0,
1789 }, {
1790 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1791 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1792 'info_dict': {
1793 'title': '29C3: Not my department',
1794 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1795 },
1796 'playlist_count': 95,
1797 }, {
1798 'note': 'issue #673',
1799 'url': 'PLBB231211A4F62143',
1800 'info_dict': {
1801 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1802 'id': 'PLBB231211A4F62143',
1803 },
1804 'playlist_mincount': 26,
1805 }, {
1806 'note': 'Large playlist',
1807 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1808 'info_dict': {
1809 'title': 'Uploads from Cauchemar',
1810 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
1811 },
1812 'playlist_mincount': 799,
1813 }, {
1814 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1815 'info_dict': {
1816 'title': 'YDL_safe_search',
1817 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1818 },
1819 'playlist_count': 2,
1820 }, {
1821 'note': 'embedded',
1822 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1823 'playlist_count': 4,
1824 'info_dict': {
1825 'title': 'JODA15',
1826 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1827 }
1828 }, {
1829 'note': 'Embedded SWF player',
1830 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1831 'playlist_count': 4,
1832 'info_dict': {
1833 'title': 'JODA7',
1834 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
1835 }
1836 }, {
1837 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
1838 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
1839 'info_dict': {
1840 'title': 'Uploads from Interstellar Movie',
1841 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
1842 },
1843 'playlist_mincout': 21,
1844 }, {
1845 # Playlist URL that does not actually serve a playlist
1846 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
1847 'info_dict': {
1848 'id': 'FqZTN594JQw',
1849 'ext': 'webm',
1850 'title': "Smiley's People 01 detective, Adventure Series, Action",
1851 'uploader': 'STREEM',
1852 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
1853 'uploader_url': 're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
1854 'upload_date': '20150526',
1855 'license': 'Standard YouTube License',
1856 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
1857 'categories': ['People & Blogs'],
1858 'tags': list,
1859 'like_count': int,
1860 'dislike_count': int,
1861 },
1862 'params': {
1863 'skip_download': True,
1864 },
1865 'add_ie': [YoutubeIE.ie_key()],
1866 }]
1867
1868 def _real_initialize(self):
1869 self._login()
1870
1871 def _extract_mix(self, playlist_id):
1872 # The mixes are generated from a single video
1873 # the id of the playlist is just 'RD' + video_id
1874 ids = []
1875 last_id = playlist_id[-11:]
1876 for n in itertools.count(1):
1877 url = 'https://youtube.com/watch?v=%s&list=%s' % (last_id, playlist_id)
1878 webpage = self._download_webpage(
1879 url, playlist_id, 'Downloading page {0} of Youtube mix'.format(n))
1880 new_ids = orderedSet(re.findall(
1881 r'''(?xs)data-video-username=".*?".*?
1882 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
1883 webpage))
1884 # Fetch new pages until all the videos are repeated, it seems that
1885 # there are always 51 unique videos.
1886 new_ids = [_id for _id in new_ids if _id not in ids]
1887 if not new_ids:
1888 break
1889 ids.extend(new_ids)
1890 last_id = ids[-1]
1891
1892 url_results = self._ids_to_results(ids)
1893
1894 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
1895 title_span = (
1896 search_title('playlist-title') or
1897 search_title('title long-title') or
1898 search_title('title'))
1899 title = clean_html(title_span)
1900
1901 return self.playlist_result(url_results, playlist_id, title)
1902
1903 def _extract_playlist(self, playlist_id):
1904 url = self._TEMPLATE_URL % playlist_id
1905 page = self._download_webpage(url, playlist_id)
1906
1907 for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
1908 match = match.strip()
1909 # Check if the playlist exists or is private
1910 if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
1911 raise ExtractorError(
1912 'The playlist doesn\'t exist or is private, use --username or '
1913 '--netrc to access it.',
1914 expected=True)
1915 elif re.match(r'[^<]*Invalid parameters[^<]*', match):
1916 raise ExtractorError(
1917 'Invalid parameters. Maybe URL is incorrect.',
1918 expected=True)
1919 elif re.match(r'[^<]*Choose your language[^<]*', match):
1920 continue
1921 else:
1922 self.report_warning('Youtube gives an alert message: ' + match)
1923
1924 playlist_title = self._html_search_regex(
1925 r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
1926 page, 'title', default=None)
1927
1928 has_videos = True
1929
1930 if not playlist_title:
1931 try:
1932 # Some playlist URLs don't actually serve a playlist (e.g.
1933 # https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4)
1934 next(self._entries(page, playlist_id))
1935 except StopIteration:
1936 has_videos = False
1937
1938 return has_videos, self.playlist_result(
1939 self._entries(page, playlist_id), playlist_id, playlist_title)
1940
1941 def _check_download_just_video(self, url, playlist_id):
1942 # Check if it's a video-specific URL
1943 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
1944 if 'v' in query_dict:
1945 video_id = query_dict['v'][0]
1946 if self._downloader.params.get('noplaylist'):
1947 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1948 return video_id, self.url_result(video_id, 'Youtube', video_id=video_id)
1949 else:
1950 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
1951 return video_id, None
1952 return None, None
1953
1954 def _real_extract(self, url):
1955 # Extract playlist id
1956 mobj = re.match(self._VALID_URL, url)
1957 if mobj is None:
1958 raise ExtractorError('Invalid URL: %s' % url)
1959 playlist_id = mobj.group(1) or mobj.group(2)
1960
1961 video_id, video = self._check_download_just_video(url, playlist_id)
1962 if video:
1963 return video
1964
1965 if playlist_id.startswith(('RD', 'UL', 'PU')):
1966 # Mixes require a custom extraction process
1967 return self._extract_mix(playlist_id)
1968
1969 has_videos, playlist = self._extract_playlist(playlist_id)
1970 if has_videos or not video_id:
1971 return playlist
1972
1973 # Some playlist URLs don't actually serve a playlist (see
1974 # https://github.com/rg3/youtube-dl/issues/10537).
1975 # Fallback to plain video extraction if there is a video id
1976 # along with playlist id.
1977 return self.url_result(video_id, 'Youtube', video_id=video_id)
1978
1979
1980 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
1981 IE_DESC = 'YouTube.com channels'
1982 _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
1983 _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
1984 _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
1985 IE_NAME = 'youtube:channel'
1986 _TESTS = [{
1987 'note': 'paginated channel',
1988 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1989 'playlist_mincount': 91,
1990 'info_dict': {
1991 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
1992 'title': 'Uploads from lex will',
1993 }
1994 }, {
1995 'note': 'Age restricted channel',
1996 # from https://www.youtube.com/user/DeusExOfficial
1997 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
1998 'playlist_mincount': 64,
1999 'info_dict': {
2000 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
2001 'title': 'Uploads from Deus Ex',
2002 },
2003 }]
2004
2005 @classmethod
2006 def suitable(cls, url):
2007 return (False if YoutubePlaylistsIE.suitable(url) or YoutubeLiveIE.suitable(url)
2008 else super(YoutubeChannelIE, cls).suitable(url))
2009
2010 def _build_template_url(self, url, channel_id):
2011 return self._TEMPLATE_URL % channel_id
2012
2013 def _real_extract(self, url):
2014 channel_id = self._match_id(url)
2015
2016 url = self._build_template_url(url, channel_id)
2017
2018 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
2019 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
2020 # otherwise fallback on channel by page extraction
2021 channel_page = self._download_webpage(
2022 url + '?view=57', channel_id,
2023 'Downloading channel page', fatal=False)
2024 if channel_page is False:
2025 channel_playlist_id = False
2026 else:
2027 channel_playlist_id = self._html_search_meta(
2028 'channelId', channel_page, 'channel id', default=None)
2029 if not channel_playlist_id:
2030 channel_url = self._html_search_meta(
2031 ('al:ios:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad'),
2032 channel_page, 'channel url', default=None)
2033 if channel_url:
2034 channel_playlist_id = self._search_regex(
2035 r'vnd\.youtube://user/([0-9A-Za-z_-]+)',
2036 channel_url, 'channel id', default=None)
2037 if channel_playlist_id and channel_playlist_id.startswith('UC'):
2038 playlist_id = 'UU' + channel_playlist_id[2:]
2039 return self.url_result(
2040 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
2041
2042 channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
2043 autogenerated = re.search(r'''(?x)
2044 class="[^"]*?(?:
2045 channel-header-autogenerated-label|
2046 yt-channel-title-autogenerated
2047 )[^"]*"''', channel_page) is not None
2048
2049 if autogenerated:
2050 # The videos are contained in a single page
2051 # the ajax pages can't be used, they are empty
2052 entries = [
2053 self.url_result(
2054 video_id, 'Youtube', video_id=video_id,
2055 video_title=video_title)
2056 for video_id, video_title in self.extract_videos_from_page(channel_page)]
2057 return self.playlist_result(entries, channel_id)
2058
2059 try:
2060 next(self._entries(channel_page, channel_id))
2061 except StopIteration:
2062 alert_message = self._html_search_regex(
2063 r'(?s)<div[^>]+class=(["\']).*?\byt-alert-message\b.*?\1[^>]*>(?P<alert>[^<]+)</div>',
2064 channel_page, 'alert', default=None, group='alert')
2065 if alert_message:
2066 raise ExtractorError('Youtube said: %s' % alert_message, expected=True)
2067
2068 return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
2069
2070
2071 class YoutubeUserIE(YoutubeChannelIE):
2072 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
2073 _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:(?P<user>user|c)/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
2074 _TEMPLATE_URL = 'https://www.youtube.com/%s/%s/videos'
2075 IE_NAME = 'youtube:user'
2076
2077 _TESTS = [{
2078 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
2079 'playlist_mincount': 320,
2080 'info_dict': {
2081 'id': 'UUfX55Sx5hEFjoC3cNs6mCUQ',
2082 'title': 'Uploads from The Linux Foundation',
2083 }
2084 }, {
2085 # Only available via https://www.youtube.com/c/12minuteathlete/videos
2086 # but not https://www.youtube.com/user/12minuteathlete/videos
2087 'url': 'https://www.youtube.com/c/12minuteathlete/videos',
2088 'playlist_mincount': 249,
2089 'info_dict': {
2090 'id': 'UUVjM-zV6_opMDx7WYxnjZiQ',
2091 'title': 'Uploads from 12 Minute Athlete',
2092 }
2093 }, {
2094 'url': 'ytuser:phihag',
2095 'only_matching': True,
2096 }, {
2097 'url': 'https://www.youtube.com/c/gametrailers',
2098 'only_matching': True,
2099 }, {
2100 'url': 'https://www.youtube.com/gametrailers',
2101 'only_matching': True,
2102 }, {
2103 # This channel is not available.
2104 'url': 'https://www.youtube.com/user/kananishinoSMEJ/videos',
2105 'only_matching': True,
2106 }]
2107
2108 @classmethod
2109 def suitable(cls, url):
2110 # Don't return True if the url can be extracted with other youtube
2111 # extractor, the regex would is too permissive and it would match.
2112 other_yt_ies = iter(klass for (name, klass) in globals().items() if name.startswith('Youtube') and name.endswith('IE') and klass is not cls)
2113 if any(ie.suitable(url) for ie in other_yt_ies):
2114 return False
2115 else:
2116 return super(YoutubeUserIE, cls).suitable(url)
2117
2118 def _build_template_url(self, url, channel_id):
2119 mobj = re.match(self._VALID_URL, url)
2120 return self._TEMPLATE_URL % (mobj.group('user') or 'user', mobj.group('id'))
2121
2122
2123 class YoutubeLiveIE(YoutubeBaseInfoExtractor):
2124 IE_DESC = 'YouTube.com live streams'
2125 _VALID_URL = r'(?P<base_url>https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+))/live'
2126 IE_NAME = 'youtube:live'
2127
2128 _TESTS = [{
2129 'url': 'http://www.youtube.com/user/TheYoungTurks/live',
2130 'info_dict': {
2131 'id': 'a48o2S1cPoo',
2132 'ext': 'mp4',
2133 'title': 'The Young Turks - Live Main Show',
2134 'uploader': 'The Young Turks',
2135 'uploader_id': 'TheYoungTurks',
2136 'uploader_url': 're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
2137 'upload_date': '20150715',
2138 'license': 'Standard YouTube License',
2139 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
2140 'categories': ['News & Politics'],
2141 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
2142 'like_count': int,
2143 'dislike_count': int,
2144 },
2145 'params': {
2146 'skip_download': True,
2147 },
2148 }, {
2149 'url': 'http://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
2150 'only_matching': True,
2151 }]
2152
2153 def _real_extract(self, url):
2154 mobj = re.match(self._VALID_URL, url)
2155 channel_id = mobj.group('id')
2156 base_url = mobj.group('base_url')
2157 webpage = self._download_webpage(url, channel_id, fatal=False)
2158 if webpage:
2159 page_type = self._og_search_property(
2160 'type', webpage, 'page type', default=None)
2161 video_id = self._html_search_meta(
2162 'videoId', webpage, 'video id', default=None)
2163 if page_type == 'video' and video_id and re.match(r'^[0-9A-Za-z_-]{11}$', video_id):
2164 return self.url_result(video_id, YoutubeIE.ie_key())
2165 return self.url_result(base_url)
2166
2167
2168 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
2169 IE_DESC = 'YouTube.com user/channel playlists'
2170 _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
2171 IE_NAME = 'youtube:playlists'
2172
2173 _TESTS = [{
2174 'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
2175 'playlist_mincount': 4,
2176 'info_dict': {
2177 'id': 'ThirstForScience',
2178 'title': 'Thirst for Science',
2179 },
2180 }, {
2181 # with "Load more" button
2182 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
2183 'playlist_mincount': 70,
2184 'info_dict': {
2185 'id': 'igorkle1',
2186 'title': 'Игорь Клейнер',
2187 },
2188 }, {
2189 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
2190 'playlist_mincount': 17,
2191 'info_dict': {
2192 'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
2193 'title': 'Chem Player',
2194 },
2195 }]
2196
2197
2198 class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
2199 IE_DESC = 'YouTube.com searches'
2200 # there doesn't appear to be a real limit, for example if you search for
2201 # 'python' you get more than 8.000.000 results
2202 _MAX_RESULTS = float('inf')
2203 IE_NAME = 'youtube:search'
2204 _SEARCH_KEY = 'ytsearch'
2205 _EXTRA_QUERY_ARGS = {}
2206 _TESTS = []
2207
2208 def _get_n_results(self, query, n):
2209 """Get a specified number of results for a query"""
2210
2211 videos = []
2212 limit = n
2213
2214 for pagenum in itertools.count(1):
2215 url_query = {
2216 'search_query': query.encode('utf-8'),
2217 'page': pagenum,
2218 'spf': 'navigate',
2219 }
2220 url_query.update(self._EXTRA_QUERY_ARGS)
2221 result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
2222 data = self._download_json(
2223 result_url, video_id='query "%s"' % query,
2224 note='Downloading page %s' % pagenum,
2225 errnote='Unable to download API page')
2226 html_content = data[1]['body']['content']
2227
2228 if 'class="search-message' in html_content:
2229 raise ExtractorError(
2230 '[youtube] No video results', expected=True)
2231
2232 new_videos = self._ids_to_results(orderedSet(re.findall(
2233 r'href="/watch\?v=(.{11})', html_content)))
2234 videos += new_videos
2235 if not new_videos or len(videos) > limit:
2236 break
2237
2238 if len(videos) > n:
2239 videos = videos[:n]
2240 return self.playlist_result(videos, query)
2241
2242
2243 class YoutubeSearchDateIE(YoutubeSearchIE):
2244 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
2245 _SEARCH_KEY = 'ytsearchdate'
2246 IE_DESC = 'YouTube.com searches, newest videos first'
2247 _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
2248
2249
2250 class YoutubeSearchURLIE(YoutubePlaylistBaseInfoExtractor):
2251 IE_DESC = 'YouTube.com search URLs'
2252 IE_NAME = 'youtube:search_url'
2253 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
2254 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})(?:[^"]*"[^>]+\btitle="(?P<title>[^"]+))?'
2255 _TESTS = [{
2256 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
2257 'playlist_mincount': 5,
2258 'info_dict': {
2259 'title': 'youtube-dl test video',
2260 }
2261 }, {
2262 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
2263 'only_matching': True,
2264 }]
2265
2266 def _real_extract(self, url):
2267 mobj = re.match(self._VALID_URL, url)
2268 query = compat_urllib_parse_unquote_plus(mobj.group('query'))
2269 webpage = self._download_webpage(url, query)
2270 return self.playlist_result(self._process_page(webpage), playlist_title=query)
2271
2272
2273 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
2274 IE_DESC = 'YouTube.com (multi-season) shows'
2275 _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
2276 IE_NAME = 'youtube:show'
2277 _TESTS = [{
2278 'url': 'https://www.youtube.com/show/airdisasters',
2279 'playlist_mincount': 5,
2280 'info_dict': {
2281 'id': 'airdisasters',
2282 'title': 'Air Disasters',
2283 }
2284 }]
2285
2286 def _real_extract(self, url):
2287 playlist_id = self._match_id(url)
2288 return super(YoutubeShowIE, self)._real_extract(
2289 'https://www.youtube.com/show/%s/playlists' % playlist_id)
2290
2291
2292 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
2293 """
2294 Base class for feed extractors
2295 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
2296 """
2297 _LOGIN_REQUIRED = True
2298
2299 @property
2300 def IE_NAME(self):
2301 return 'youtube:%s' % self._FEED_NAME
2302
2303 def _real_initialize(self):
2304 self._login()
2305
2306 def _real_extract(self, url):
2307 page = self._download_webpage(
2308 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
2309
2310 # The extraction process is the same as for playlists, but the regex
2311 # for the video ids doesn't contain an index
2312 ids = []
2313 more_widget_html = content_html = page
2314 for page_num in itertools.count(1):
2315 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
2316
2317 # 'recommended' feed has infinite 'load more' and each new portion spins
2318 # the same videos in (sometimes) slightly different order, so we'll check
2319 # for unicity and break when portion has no new videos
2320 new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
2321 if not new_ids:
2322 break
2323
2324 ids.extend(new_ids)
2325
2326 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
2327 if not mobj:
2328 break
2329
2330 more = self._download_json(
2331 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
2332 'Downloading page #%s' % page_num,
2333 transform_source=uppercase_escape)
2334 content_html = more['content_html']
2335 more_widget_html = more['load_more_widget_html']
2336
2337 return self.playlist_result(
2338 self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
2339
2340
2341 class YoutubeWatchLaterIE(YoutubePlaylistIE):
2342 IE_NAME = 'youtube:watchlater'
2343 IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
2344 _VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
2345
2346 _TESTS = [{
2347 'url': 'https://www.youtube.com/playlist?list=WL',
2348 'only_matching': True,
2349 }, {
2350 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
2351 'only_matching': True,
2352 }]
2353
2354 def _real_extract(self, url):
2355 video = self._check_download_just_video(url, 'WL')
2356 if video:
2357 return video
2358 _, playlist = self._extract_playlist('WL')
2359 return playlist
2360
2361
2362 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
2363 IE_NAME = 'youtube:favorites'
2364 IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
2365 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2366 _LOGIN_REQUIRED = True
2367
2368 def _real_extract(self, url):
2369 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2370 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
2371 return self.url_result(playlist_id, 'YoutubePlaylist')
2372
2373
2374 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
2375 IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2376 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2377 _FEED_NAME = 'recommended'
2378 _PLAYLIST_TITLE = 'Youtube Recommended videos'
2379
2380
2381 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
2382 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2383 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2384 _FEED_NAME = 'subscriptions'
2385 _PLAYLIST_TITLE = 'Youtube Subscriptions'
2386
2387
2388 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
2389 IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
2390 _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
2391 _FEED_NAME = 'history'
2392 _PLAYLIST_TITLE = 'Youtube History'
2393
2394
2395 class YoutubeTruncatedURLIE(InfoExtractor):
2396 IE_NAME = 'youtube:truncated_url'
2397 IE_DESC = False # Do not list
2398 _VALID_URL = r'''(?x)
2399 (?:https?://)?
2400 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2401 (?:watch\?(?:
2402 feature=[a-z_]+|
2403 annotation_id=annotation_[^&]+|
2404 x-yt-cl=[0-9]+|
2405 hl=[^&]*|
2406 t=[0-9]+
2407 )?
2408 |
2409 attribution_link\?a=[^&]+
2410 )
2411 $
2412 '''
2413
2414 _TESTS = [{
2415 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
2416 'only_matching': True,
2417 }, {
2418 'url': 'http://www.youtube.com/watch?',
2419 'only_matching': True,
2420 }, {
2421 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2422 'only_matching': True,
2423 }, {
2424 'url': 'https://www.youtube.com/watch?feature=foo',
2425 'only_matching': True,
2426 }, {
2427 'url': 'https://www.youtube.com/watch?hl=en-GB',
2428 'only_matching': True,
2429 }, {
2430 'url': 'https://www.youtube.com/watch?t=2372',
2431 'only_matching': True,
2432 }]
2433
2434 def _real_extract(self, url):
2435 raise ExtractorError(
2436 'Did you forget to quote the URL? Remember that & is a meta '
2437 'character in most shells, so you want to put the URL in quotes, '
2438 'like youtube-dl '
2439 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2440 ' or simply youtube-dl BaW_jenozKc .',
2441 expected=True)
2442
2443
2444 class YoutubeTruncatedIDIE(InfoExtractor):
2445 IE_NAME = 'youtube:truncated_id'
2446 IE_DESC = False # Do not list
2447 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2448
2449 _TESTS = [{
2450 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2451 'only_matching': True,
2452 }]
2453
2454 def _real_extract(self, url):
2455 video_id = self._match_id(url)
2456 raise ExtractorError(
2457 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
2458 expected=True)