]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/youtube.py
[youtube] Extract license (Closes #8725)
[yt-dlp.git] / youtube_dl / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5
6 import itertools
7 import json
8 import os.path
9 import random
10 import re
11 import time
12 import traceback
13
14 from .common import InfoExtractor, SearchInfoExtractor
15 from ..jsinterp import JSInterpreter
16 from ..swfinterp import SWFInterpreter
17 from ..compat import (
18 compat_chr,
19 compat_parse_qs,
20 compat_urllib_parse,
21 compat_urllib_parse_unquote,
22 compat_urllib_parse_unquote_plus,
23 compat_urllib_parse_urlparse,
24 compat_urlparse,
25 compat_str,
26 )
27 from ..utils import (
28 clean_html,
29 encode_dict,
30 error_to_compat_str,
31 ExtractorError,
32 float_or_none,
33 get_element_by_attribute,
34 get_element_by_id,
35 int_or_none,
36 mimetype2ext,
37 orderedSet,
38 parse_duration,
39 remove_quotes,
40 remove_start,
41 sanitized_Request,
42 smuggle_url,
43 str_to_int,
44 unescapeHTML,
45 unified_strdate,
46 unsmuggle_url,
47 uppercase_escape,
48 ISO3166Utils,
49 )
50
51
52 class YoutubeBaseInfoExtractor(InfoExtractor):
53 """Provide base functions for Youtube extractors"""
54 _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
55 _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
56 _NETRC_MACHINE = 'youtube'
57 # If True it will raise an error if no login info is provided
58 _LOGIN_REQUIRED = False
59
60 def _set_language(self):
61 self._set_cookie(
62 '.youtube.com', 'PREF', 'f1=50000000&hl=en',
63 # YouTube sets the expire time to about two months
64 expire_time=time.time() + 2 * 30 * 24 * 3600)
65
66 def _ids_to_results(self, ids):
67 return [
68 self.url_result(vid_id, 'Youtube', video_id=vid_id)
69 for vid_id in ids]
70
71 def _login(self):
72 """
73 Attempt to log in to YouTube.
74 True is returned if successful or skipped.
75 False is returned if login failed.
76
77 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
78 """
79 (username, password) = self._get_login_info()
80 # No authentication to be performed
81 if username is None:
82 if self._LOGIN_REQUIRED:
83 raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
84 return True
85
86 login_page = self._download_webpage(
87 self._LOGIN_URL, None,
88 note='Downloading login page',
89 errnote='unable to fetch login page', fatal=False)
90 if login_page is False:
91 return
92
93 galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
94 login_page, 'Login GALX parameter')
95
96 # Log in
97 login_form_strs = {
98 'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
99 'Email': username,
100 'GALX': galx,
101 'Passwd': password,
102
103 'PersistentCookie': 'yes',
104 '_utf8': '霱',
105 'bgresponse': 'js_disabled',
106 'checkConnection': '',
107 'checkedDomains': 'youtube',
108 'dnConn': '',
109 'pstMsg': '0',
110 'rmShown': '1',
111 'secTok': '',
112 'signIn': 'Sign in',
113 'timeStmp': '',
114 'service': 'youtube',
115 'uilel': '3',
116 'hl': 'en_US',
117 }
118
119 login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
120
121 req = sanitized_Request(self._LOGIN_URL, login_data)
122 login_results = self._download_webpage(
123 req, None,
124 note='Logging in', errnote='unable to log in', fatal=False)
125 if login_results is False:
126 return False
127
128 if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
129 raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
130
131 # Two-Factor
132 # TODO add SMS and phone call support - these require making a request and then prompting the user
133
134 if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
135 tfa_code = self._get_tfa_info('2-step verification code')
136
137 if not tfa_code:
138 self._downloader.report_warning(
139 'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
140 '(Note that only TOTP (Google Authenticator App) codes work at this time.)')
141 return False
142
143 tfa_code = remove_start(tfa_code, 'G-')
144
145 tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
146
147 tfa_form_strs.update({
148 'Pin': tfa_code,
149 'TrustDevice': 'on',
150 })
151
152 tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
153
154 tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
155 tfa_results = self._download_webpage(
156 tfa_req, None,
157 note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
158
159 if tfa_results is False:
160 return False
161
162 if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
163 self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
164 return False
165 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
166 self._downloader.report_warning('unable to log in - did the page structure change?')
167 return False
168 if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
169 self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
170 return False
171
172 if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
173 self._downloader.report_warning('unable to log in: bad username or password')
174 return False
175 return True
176
177 def _real_initialize(self):
178 if self._downloader is None:
179 return
180 self._set_language()
181 if not self._login():
182 return
183
184
185 class YoutubeEntryListBaseInfoExtractor(YoutubeBaseInfoExtractor):
186 # Extract entries from page with "Load more" button
187 def _entries(self, page, playlist_id):
188 more_widget_html = content_html = page
189 for page_num in itertools.count(1):
190 for entry in self._process_page(content_html):
191 yield entry
192
193 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
194 if not mobj:
195 break
196
197 more = self._download_json(
198 'https://youtube.com/%s' % mobj.group('more'), playlist_id,
199 'Downloading page #%s' % page_num,
200 transform_source=uppercase_escape)
201 content_html = more['content_html']
202 if not content_html.strip():
203 # Some webpages show a "Load more" button but they don't
204 # have more videos
205 break
206 more_widget_html = more['load_more_widget_html']
207
208
209 class YoutubePlaylistBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
210 def _process_page(self, content):
211 for video_id, video_title in self.extract_videos_from_page(content):
212 yield self.url_result(video_id, 'Youtube', video_id, video_title)
213
214 def extract_videos_from_page(self, page):
215 ids_in_page = []
216 titles_in_page = []
217 for mobj in re.finditer(self._VIDEO_RE, page):
218 # The link with index 0 is not the first video of the playlist (not sure if still actual)
219 if 'index' in mobj.groupdict() and mobj.group('id') == '0':
220 continue
221 video_id = mobj.group('id')
222 video_title = unescapeHTML(mobj.group('title'))
223 if video_title:
224 video_title = video_title.strip()
225 try:
226 idx = ids_in_page.index(video_id)
227 if video_title and not titles_in_page[idx]:
228 titles_in_page[idx] = video_title
229 except ValueError:
230 ids_in_page.append(video_id)
231 titles_in_page.append(video_title)
232 return zip(ids_in_page, titles_in_page)
233
234
235 class YoutubePlaylistsBaseInfoExtractor(YoutubeEntryListBaseInfoExtractor):
236 def _process_page(self, content):
237 for playlist_id in orderedSet(re.findall(r'href="/?playlist\?list=([0-9A-Za-z-_]{10,})"', content)):
238 yield self.url_result(
239 'https://www.youtube.com/playlist?list=%s' % playlist_id, 'YoutubePlaylist')
240
241 def _real_extract(self, url):
242 playlist_id = self._match_id(url)
243 webpage = self._download_webpage(url, playlist_id)
244 title = self._og_search_title(webpage, fatal=False)
245 return self.playlist_result(self._entries(webpage, playlist_id), playlist_id, title)
246
247
248 class YoutubeIE(YoutubeBaseInfoExtractor):
249 IE_DESC = 'YouTube.com'
250 _VALID_URL = r"""(?x)^
251 (
252 (?:https?://|//) # http(s):// or protocol-independent URL
253 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
254 (?:www\.)?deturl\.com/www\.youtube\.com/|
255 (?:www\.)?pwnyoutube\.com/|
256 (?:www\.)?yourepeat\.com/|
257 tube\.majestyc\.net/|
258 youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
259 (?:.*?\#/)? # handle anchor (#/) redirect urls
260 (?: # the various things that can precede the ID:
261 (?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
262 |(?: # or the v= param in all its forms
263 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
264 (?:\?|\#!?) # the params delimiter ? or # or #!
265 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
266 v=
267 )
268 ))
269 |(?:
270 youtu\.be| # just youtu.be/xxxx
271 vid\.plus # or vid.plus/xxxx
272 )/
273 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
274 )
275 )? # all until now is optional -> you can pass the naked ID
276 ([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
277 (?!.*?&list=) # combined list/video URLs are handled by the playlist IE
278 (?(1).+)? # if we found the ID, everything can follow
279 $"""
280 _NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
281 _formats = {
282 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
283 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
284 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
285 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
286 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
287 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
288 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
289 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
290 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
291 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
292 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
293 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
294 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
295 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
296 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
297 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
298 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
299 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
300
301
302 # 3D videos
303 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
304 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
305 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
306 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
307 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
308 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
309 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
310
311 # Apple HTTP Live Streaming
312 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
313 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
314 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
315 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
316 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
317 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
318 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
319
320 # DASH mp4 video
321 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
322 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
323 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
324 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
325 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
326 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
327 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
328 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
329 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
330 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60, 'preference': -40},
331 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264', 'preference': -40},
332
333 # Dash mp4 audio
334 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
335 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
336 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
337
338 # Dash webm
339 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
340 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
341 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
342 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
343 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
344 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
345 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9', 'preference': -40},
346 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
347 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
348 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
349 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
350 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
351 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
352 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
353 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
354 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
355 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
356 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
357 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
358 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
359 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'preference': -40},
360 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60, 'preference': -40},
361
362 # Dash webm audio
363 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
364 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
365
366 # Dash webm audio with opus inside
367 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
368 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
369 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
370
371 # RTMP (unnamed)
372 '_rtmp': {'protocol': 'rtmp'},
373 }
374 _SUBTITLE_FORMATS = ('ttml', 'vtt')
375
376 IE_NAME = 'youtube'
377 _TESTS = [
378 {
379 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
380 'info_dict': {
381 'id': 'BaW_jenozKc',
382 'ext': 'mp4',
383 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
384 'uploader': 'Philipp Hagemeister',
385 'uploader_id': 'phihag',
386 'upload_date': '20121002',
387 'license': 'Standard YouTube License',
388 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
389 'categories': ['Science & Technology'],
390 'tags': ['youtube-dl'],
391 'like_count': int,
392 'dislike_count': int,
393 'start_time': 1,
394 'end_time': 9,
395 }
396 },
397 {
398 'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
399 'note': 'Test generic use_cipher_signature video (#897)',
400 'info_dict': {
401 'id': 'UxxajLWwzqY',
402 'ext': 'mp4',
403 'upload_date': '20120506',
404 'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
405 'alt_title': 'I Love It (feat. Charli XCX)',
406 'description': 'md5:f3ceb5ef83a08d95b9d146f973157cc8',
407 'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
408 'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
409 'iconic ep', 'iconic', 'love', 'it'],
410 'uploader': 'Icona Pop',
411 'uploader_id': 'IconaPop',
412 'license': 'Standard YouTube License',
413 'creator': 'Icona Pop',
414 }
415 },
416 {
417 'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
418 'note': 'Test VEVO video with age protection (#956)',
419 'info_dict': {
420 'id': '07FYdnEawAQ',
421 'ext': 'mp4',
422 'upload_date': '20130703',
423 'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
424 'alt_title': 'Tunnel Vision',
425 'description': 'md5:64249768eec3bc4276236606ea996373',
426 'uploader': 'justintimberlakeVEVO',
427 'uploader_id': 'justintimberlakeVEVO',
428 'license': 'Standard YouTube License',
429 'creator': 'Justin Timberlake',
430 'age_limit': 18,
431 }
432 },
433 {
434 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
435 'note': 'Embed-only video (#1746)',
436 'info_dict': {
437 'id': 'yZIXLfi8CZQ',
438 'ext': 'mp4',
439 'upload_date': '20120608',
440 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
441 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
442 'uploader': 'SET India',
443 'uploader_id': 'setindia',
444 'license': 'Standard YouTube License',
445 'age_limit': 18,
446 }
447 },
448 {
449 'url': 'http://www.youtube.com/watch?v=BaW_jenozKc&v=UxxajLWwzqY',
450 'note': 'Use the first video ID in the URL',
451 'info_dict': {
452 'id': 'BaW_jenozKc',
453 'ext': 'mp4',
454 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
455 'uploader': 'Philipp Hagemeister',
456 'uploader_id': 'phihag',
457 'upload_date': '20121002',
458 'license': 'Standard YouTube License',
459 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
460 'categories': ['Science & Technology'],
461 'tags': ['youtube-dl'],
462 'like_count': int,
463 'dislike_count': int,
464 },
465 'params': {
466 'skip_download': True,
467 },
468 },
469 {
470 'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
471 'note': '256k DASH audio (format 141) via DASH manifest',
472 'info_dict': {
473 'id': 'a9LDPn-MO4I',
474 'ext': 'm4a',
475 'upload_date': '20121002',
476 'uploader_id': '8KVIDEO',
477 'description': '',
478 'uploader': '8KVIDEO',
479 'license': 'Standard YouTube License',
480 'title': 'UHDTV TEST 8K VIDEO.mp4'
481 },
482 'params': {
483 'youtube_include_dash_manifest': True,
484 'format': '141',
485 },
486 },
487 # DASH manifest with encrypted signature
488 {
489 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
490 'info_dict': {
491 'id': 'IB3lcPjvWLA',
492 'ext': 'm4a',
493 'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
494 'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
495 'uploader': 'AfrojackVEVO',
496 'uploader_id': 'AfrojackVEVO',
497 'upload_date': '20131011',
498 'license': 'Standard YouTube License',
499 },
500 'params': {
501 'youtube_include_dash_manifest': True,
502 'format': '141',
503 },
504 },
505 # JS player signature function name containing $
506 {
507 'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
508 'info_dict': {
509 'id': 'nfWlot6h_JM',
510 'ext': 'm4a',
511 'title': 'Taylor Swift - Shake It Off',
512 'alt_title': 'Shake It Off',
513 'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
514 'uploader': 'TaylorSwiftVEVO',
515 'uploader_id': 'TaylorSwiftVEVO',
516 'upload_date': '20140818',
517 'license': 'Standard YouTube License',
518 'creator': 'Taylor Swift',
519 },
520 'params': {
521 'youtube_include_dash_manifest': True,
522 'format': '141',
523 },
524 },
525 # Controversy video
526 {
527 'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
528 'info_dict': {
529 'id': 'T4XJQO3qol8',
530 'ext': 'mp4',
531 'upload_date': '20100909',
532 'uploader': 'The Amazing Atheist',
533 'uploader_id': 'TheAmazingAtheist',
534 'license': 'Standard YouTube License',
535 'title': 'Burning Everyone\'s Koran',
536 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
537 }
538 },
539 # Normal age-gate video (No vevo, embed allowed)
540 {
541 'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
542 'info_dict': {
543 'id': 'HtVdAasjOgU',
544 'ext': 'mp4',
545 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
546 'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
547 'uploader': 'The Witcher',
548 'uploader_id': 'WitcherGame',
549 'upload_date': '20140605',
550 'license': 'Standard YouTube License',
551 'age_limit': 18,
552 },
553 },
554 # Age-gate video with encrypted signature
555 {
556 'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
557 'info_dict': {
558 'id': '6kLq3WMV1nU',
559 'ext': 'mp4',
560 'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
561 'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
562 'uploader': 'LloydVEVO',
563 'uploader_id': 'LloydVEVO',
564 'upload_date': '20110629',
565 'license': 'Standard YouTube License',
566 'age_limit': 18,
567 },
568 },
569 # video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
570 {
571 'url': '__2ABJjxzNo',
572 'info_dict': {
573 'id': '__2ABJjxzNo',
574 'ext': 'mp4',
575 'upload_date': '20100430',
576 'uploader_id': 'deadmau5',
577 'creator': 'deadmau5',
578 'description': 'md5:12c56784b8032162bb936a5f76d55360',
579 'uploader': 'deadmau5',
580 'license': 'Standard YouTube License',
581 'title': 'Deadmau5 - Some Chords (HD)',
582 'alt_title': 'Some Chords',
583 },
584 'expected_warnings': [
585 'DASH manifest missing',
586 ]
587 },
588 # Olympics (https://github.com/rg3/youtube-dl/issues/4431)
589 {
590 'url': 'lqQg6PlCWgI',
591 'info_dict': {
592 'id': 'lqQg6PlCWgI',
593 'ext': 'mp4',
594 'upload_date': '20150827',
595 'uploader_id': 'olympic',
596 'license': 'Standard YouTube License',
597 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
598 'uploader': 'Olympics',
599 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
600 },
601 'params': {
602 'skip_download': 'requires avconv',
603 }
604 },
605 # Non-square pixels
606 {
607 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
608 'info_dict': {
609 'id': '_b-2C3KPAM0',
610 'ext': 'mp4',
611 'stretched_ratio': 16 / 9.,
612 'upload_date': '20110310',
613 'uploader_id': 'AllenMeow',
614 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
615 'uploader': '孫艾倫',
616 'license': 'Standard YouTube License',
617 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
618 },
619 },
620 # url_encoded_fmt_stream_map is empty string
621 {
622 'url': 'qEJwOuvDf7I',
623 'info_dict': {
624 'id': 'qEJwOuvDf7I',
625 'ext': 'webm',
626 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
627 'description': '',
628 'upload_date': '20150404',
629 'uploader_id': 'spbelect',
630 'uploader': 'Наблюдатели Петербурга',
631 },
632 'params': {
633 'skip_download': 'requires avconv',
634 },
635 'skip': 'This live event has ended.',
636 },
637 # Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
638 {
639 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
640 'info_dict': {
641 'id': 'FIl7x6_3R5Y',
642 'ext': 'mp4',
643 'title': 'md5:7b81415841e02ecd4313668cde88737a',
644 'description': 'md5:116377fd2963b81ec4ce64b542173306',
645 'upload_date': '20150625',
646 'uploader_id': 'dorappi2000',
647 'uploader': 'dorappi2000',
648 'license': 'Standard YouTube License',
649 'formats': 'mincount:33',
650 },
651 },
652 # DASH manifest with segment_list
653 {
654 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
655 'md5': '8ce563a1d667b599d21064e982ab9e31',
656 'info_dict': {
657 'id': 'CsmdDsKjzN8',
658 'ext': 'mp4',
659 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
660 'uploader': 'Airtek',
661 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
662 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
663 'license': 'Standard YouTube License',
664 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
665 },
666 'params': {
667 'youtube_include_dash_manifest': True,
668 'format': '135', # bestvideo
669 }
670 },
671 {
672 # Multifeed videos (multiple cameras), URL is for Main Camera
673 'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
674 'info_dict': {
675 'id': 'jqWvoWXjCVs',
676 'title': 'teamPGP: Rocket League Noob Stream',
677 'description': 'md5:dc7872fb300e143831327f1bae3af010',
678 },
679 'playlist': [{
680 'info_dict': {
681 'id': 'jqWvoWXjCVs',
682 'ext': 'mp4',
683 'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
684 'description': 'md5:dc7872fb300e143831327f1bae3af010',
685 'upload_date': '20150721',
686 'uploader': 'Beer Games Beer',
687 'uploader_id': 'beergamesbeer',
688 'license': 'Standard YouTube License',
689 },
690 }, {
691 'info_dict': {
692 'id': '6h8e8xoXJzg',
693 'ext': 'mp4',
694 'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
695 'description': 'md5:dc7872fb300e143831327f1bae3af010',
696 'upload_date': '20150721',
697 'uploader': 'Beer Games Beer',
698 'uploader_id': 'beergamesbeer',
699 'license': 'Standard YouTube License',
700 },
701 }, {
702 'info_dict': {
703 'id': 'PUOgX5z9xZw',
704 'ext': 'mp4',
705 'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
706 'description': 'md5:dc7872fb300e143831327f1bae3af010',
707 'upload_date': '20150721',
708 'uploader': 'Beer Games Beer',
709 'uploader_id': 'beergamesbeer',
710 'license': 'Standard YouTube License',
711 },
712 }, {
713 'info_dict': {
714 'id': 'teuwxikvS5k',
715 'ext': 'mp4',
716 'title': 'teamPGP: Rocket League Noob Stream (zim)',
717 'description': 'md5:dc7872fb300e143831327f1bae3af010',
718 'upload_date': '20150721',
719 'uploader': 'Beer Games Beer',
720 'uploader_id': 'beergamesbeer',
721 'license': 'Standard YouTube License',
722 },
723 }],
724 'params': {
725 'skip_download': True,
726 },
727 },
728 {
729 # Multifeed video with comma in title (see https://github.com/rg3/youtube-dl/issues/8536)
730 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
731 'info_dict': {
732 'id': 'gVfLd0zydlo',
733 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
734 },
735 'playlist_count': 2,
736 },
737 {
738 'url': 'http://vid.plus/FlRa-iH7PGw',
739 'only_matching': True,
740 },
741 {
742 # Title with JS-like syntax "};" (see https://github.com/rg3/youtube-dl/issues/7468)
743 # Also tests cut-off URL expansion in video description (see
744 # https://github.com/rg3/youtube-dl/issues/1892,
745 # https://github.com/rg3/youtube-dl/issues/8164)
746 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
747 'info_dict': {
748 'id': 'lsguqyKfVQg',
749 'ext': 'mp4',
750 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
751 'alt_title': 'Dark Walk',
752 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
753 'upload_date': '20151119',
754 'uploader_id': 'IronSoulElf',
755 'uploader': 'IronSoulElf',
756 'license': 'Standard YouTube License',
757 'creator': 'Todd Haberman, Daniel Law Heath & Aaron Kaplan',
758 },
759 'params': {
760 'skip_download': True,
761 },
762 },
763 {
764 # Tags with '};' (see https://github.com/rg3/youtube-dl/issues/7468)
765 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
766 'only_matching': True,
767 },
768 {
769 # Video with yt:stretch=17:0
770 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
771 'info_dict': {
772 'id': 'Q39EVAstoRM',
773 'ext': 'mp4',
774 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
775 'description': 'md5:ee18a25c350637c8faff806845bddee9',
776 'upload_date': '20151107',
777 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
778 'uploader': 'CH GAMER DROID',
779 },
780 'params': {
781 'skip_download': True,
782 },
783 },
784 {
785 # Video licensed under Creative Commons
786 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
787 'info_dict': {
788 'id': 'M4gD1WSo5mA',
789 'ext': 'mp4',
790 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
791 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
792 'upload_date': '20150127',
793 'uploader_id': 'BerkmanCenter',
794 'uploader': 'BerkmanCenter',
795 'license': 'Creative Commons Attribution license (reuse allowed)',
796 },
797 'params': {
798 'skip_download': True,
799 },
800 },
801 {
802 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
803 'only_matching': True,
804 }
805 ]
806
807 def __init__(self, *args, **kwargs):
808 super(YoutubeIE, self).__init__(*args, **kwargs)
809 self._player_cache = {}
810
811 def report_video_info_webpage_download(self, video_id):
812 """Report attempt to download video info webpage."""
813 self.to_screen('%s: Downloading video info webpage' % video_id)
814
815 def report_information_extraction(self, video_id):
816 """Report attempt to extract video information."""
817 self.to_screen('%s: Extracting video information' % video_id)
818
819 def report_unavailable_format(self, video_id, format):
820 """Report extracted video URL."""
821 self.to_screen('%s: Format %s not available' % (video_id, format))
822
823 def report_rtmp_download(self):
824 """Indicate the download will use the RTMP protocol."""
825 self.to_screen('RTMP download detected')
826
827 def _signature_cache_id(self, example_sig):
828 """ Return a string representation of a signature """
829 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
830
831 def _extract_signature_function(self, video_id, player_url, example_sig):
832 id_m = re.match(
833 r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
834 player_url)
835 if not id_m:
836 raise ExtractorError('Cannot identify player %r' % player_url)
837 player_type = id_m.group('ext')
838 player_id = id_m.group('id')
839
840 # Read from filesystem cache
841 func_id = '%s_%s_%s' % (
842 player_type, player_id, self._signature_cache_id(example_sig))
843 assert os.path.basename(func_id) == func_id
844
845 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
846 if cache_spec is not None:
847 return lambda s: ''.join(s[i] for i in cache_spec)
848
849 download_note = (
850 'Downloading player %s' % player_url
851 if self._downloader.params.get('verbose') else
852 'Downloading %s player %s' % (player_type, player_id)
853 )
854 if player_type == 'js':
855 code = self._download_webpage(
856 player_url, video_id,
857 note=download_note,
858 errnote='Download of %s failed' % player_url)
859 res = self._parse_sig_js(code)
860 elif player_type == 'swf':
861 urlh = self._request_webpage(
862 player_url, video_id,
863 note=download_note,
864 errnote='Download of %s failed' % player_url)
865 code = urlh.read()
866 res = self._parse_sig_swf(code)
867 else:
868 assert False, 'Invalid player type %r' % player_type
869
870 test_string = ''.join(map(compat_chr, range(len(example_sig))))
871 cache_res = res(test_string)
872 cache_spec = [ord(c) for c in cache_res]
873
874 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
875 return res
876
877 def _print_sig_code(self, func, example_sig):
878 def gen_sig_code(idxs):
879 def _genslice(start, end, step):
880 starts = '' if start == 0 else str(start)
881 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
882 steps = '' if step == 1 else (':%d' % step)
883 return 's[%s%s%s]' % (starts, ends, steps)
884
885 step = None
886 # Quelch pyflakes warnings - start will be set when step is set
887 start = '(Never used)'
888 for i, prev in zip(idxs[1:], idxs[:-1]):
889 if step is not None:
890 if i - prev == step:
891 continue
892 yield _genslice(start, prev, step)
893 step = None
894 continue
895 if i - prev in [-1, 1]:
896 step = i - prev
897 start = prev
898 continue
899 else:
900 yield 's[%d]' % prev
901 if step is None:
902 yield 's[%d]' % i
903 else:
904 yield _genslice(start, i, step)
905
906 test_string = ''.join(map(compat_chr, range(len(example_sig))))
907 cache_res = func(test_string)
908 cache_spec = [ord(c) for c in cache_res]
909 expr_code = ' + '.join(gen_sig_code(cache_spec))
910 signature_id_tuple = '(%s)' % (
911 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
912 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
913 ' return %s\n') % (signature_id_tuple, expr_code)
914 self.to_screen('Extracted signature function:\n' + code)
915
916 def _parse_sig_js(self, jscode):
917 funcname = self._search_regex(
918 r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
919 'Initial JS player signature function name')
920
921 jsi = JSInterpreter(jscode)
922 initial_function = jsi.extract_function(funcname)
923 return lambda s: initial_function([s])
924
925 def _parse_sig_swf(self, file_contents):
926 swfi = SWFInterpreter(file_contents)
927 TARGET_CLASSNAME = 'SignatureDecipher'
928 searched_class = swfi.extract_class(TARGET_CLASSNAME)
929 initial_function = swfi.extract_function(searched_class, 'decipher')
930 return lambda s: initial_function([s])
931
932 def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
933 """Turn the encrypted s field into a working signature"""
934
935 if player_url is None:
936 raise ExtractorError('Cannot decrypt signature without player_url')
937
938 if player_url.startswith('//'):
939 player_url = 'https:' + player_url
940 try:
941 player_id = (player_url, self._signature_cache_id(s))
942 if player_id not in self._player_cache:
943 func = self._extract_signature_function(
944 video_id, player_url, s
945 )
946 self._player_cache[player_id] = func
947 func = self._player_cache[player_id]
948 if self._downloader.params.get('youtube_print_sig_code'):
949 self._print_sig_code(func, s)
950 return func(s)
951 except Exception as e:
952 tb = traceback.format_exc()
953 raise ExtractorError(
954 'Signature extraction failed: ' + tb, cause=e)
955
956 def _get_subtitles(self, video_id, webpage):
957 try:
958 subs_doc = self._download_xml(
959 'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
960 video_id, note=False)
961 except ExtractorError as err:
962 self._downloader.report_warning('unable to download video subtitles: %s' % error_to_compat_str(err))
963 return {}
964
965 sub_lang_list = {}
966 for track in subs_doc.findall('track'):
967 lang = track.attrib['lang_code']
968 if lang in sub_lang_list:
969 continue
970 sub_formats = []
971 for ext in self._SUBTITLE_FORMATS:
972 params = compat_urllib_parse.urlencode({
973 'lang': lang,
974 'v': video_id,
975 'fmt': ext,
976 'name': track.attrib['name'].encode('utf-8'),
977 })
978 sub_formats.append({
979 'url': 'https://www.youtube.com/api/timedtext?' + params,
980 'ext': ext,
981 })
982 sub_lang_list[lang] = sub_formats
983 if not sub_lang_list:
984 self._downloader.report_warning('video doesn\'t have subtitles')
985 return {}
986 return sub_lang_list
987
988 def _get_ytplayer_config(self, video_id, webpage):
989 patterns = (
990 # User data may contain arbitrary character sequences that may affect
991 # JSON extraction with regex, e.g. when '};' is contained the second
992 # regex won't capture the whole JSON. Yet working around by trying more
993 # concrete regex first keeping in mind proper quoted string handling
994 # to be implemented in future that will replace this workaround (see
995 # https://github.com/rg3/youtube-dl/issues/7468,
996 # https://github.com/rg3/youtube-dl/pull/7599)
997 r';ytplayer\.config\s*=\s*({.+?});ytplayer',
998 r';ytplayer\.config\s*=\s*({.+?});',
999 )
1000 config = self._search_regex(
1001 patterns, webpage, 'ytplayer.config', default=None)
1002 if config:
1003 return self._parse_json(
1004 uppercase_escape(config), video_id, fatal=False)
1005
1006 def _get_automatic_captions(self, video_id, webpage):
1007 """We need the webpage for getting the captions url, pass it as an
1008 argument to speed up the process."""
1009 self.to_screen('%s: Looking for automatic captions' % video_id)
1010 player_config = self._get_ytplayer_config(video_id, webpage)
1011 err_msg = 'Couldn\'t find automatic captions for %s' % video_id
1012 if not player_config:
1013 self._downloader.report_warning(err_msg)
1014 return {}
1015 try:
1016 args = player_config['args']
1017 caption_url = args.get('ttsurl')
1018 if caption_url:
1019 timestamp = args['timestamp']
1020 # We get the available subtitles
1021 list_params = compat_urllib_parse.urlencode({
1022 'type': 'list',
1023 'tlangs': 1,
1024 'asrs': 1,
1025 })
1026 list_url = caption_url + '&' + list_params
1027 caption_list = self._download_xml(list_url, video_id)
1028 original_lang_node = caption_list.find('track')
1029 if original_lang_node is None:
1030 self._downloader.report_warning('Video doesn\'t have automatic captions')
1031 return {}
1032 original_lang = original_lang_node.attrib['lang_code']
1033 caption_kind = original_lang_node.attrib.get('kind', '')
1034
1035 sub_lang_list = {}
1036 for lang_node in caption_list.findall('target'):
1037 sub_lang = lang_node.attrib['lang_code']
1038 sub_formats = []
1039 for ext in self._SUBTITLE_FORMATS:
1040 params = compat_urllib_parse.urlencode({
1041 'lang': original_lang,
1042 'tlang': sub_lang,
1043 'fmt': ext,
1044 'ts': timestamp,
1045 'kind': caption_kind,
1046 })
1047 sub_formats.append({
1048 'url': caption_url + '&' + params,
1049 'ext': ext,
1050 })
1051 sub_lang_list[sub_lang] = sub_formats
1052 return sub_lang_list
1053
1054 # Some videos don't provide ttsurl but rather caption_tracks and
1055 # caption_translation_languages (e.g. 20LmZk1hakA)
1056 caption_tracks = args['caption_tracks']
1057 caption_translation_languages = args['caption_translation_languages']
1058 caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
1059 parsed_caption_url = compat_urlparse.urlparse(caption_url)
1060 caption_qs = compat_parse_qs(parsed_caption_url.query)
1061
1062 sub_lang_list = {}
1063 for lang in caption_translation_languages.split(','):
1064 lang_qs = compat_parse_qs(compat_urllib_parse_unquote_plus(lang))
1065 sub_lang = lang_qs.get('lc', [None])[0]
1066 if not sub_lang:
1067 continue
1068 sub_formats = []
1069 for ext in self._SUBTITLE_FORMATS:
1070 caption_qs.update({
1071 'tlang': [sub_lang],
1072 'fmt': [ext],
1073 })
1074 sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
1075 query=compat_urllib_parse.urlencode(caption_qs, True)))
1076 sub_formats.append({
1077 'url': sub_url,
1078 'ext': ext,
1079 })
1080 sub_lang_list[sub_lang] = sub_formats
1081 return sub_lang_list
1082 # An extractor error can be raise by the download process if there are
1083 # no automatic captions but there are subtitles
1084 except (KeyError, ExtractorError):
1085 self._downloader.report_warning(err_msg)
1086 return {}
1087
1088 def _mark_watched(self, video_id, video_info):
1089 playback_url = video_info.get('videostats_playback_base_url', [None])[0]
1090 if not playback_url:
1091 return
1092 parsed_playback_url = compat_urlparse.urlparse(playback_url)
1093 qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1094
1095 # cpn generation algorithm is reverse engineered from base.js.
1096 # In fact it works even with dummy cpn.
1097 CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1098 cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1099
1100 qs.update({
1101 'ver': ['2'],
1102 'cpn': [cpn],
1103 })
1104 playback_url = compat_urlparse.urlunparse(
1105 parsed_playback_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
1106
1107 self._download_webpage(
1108 playback_url, video_id, 'Marking watched',
1109 'Unable to mark watched', fatal=False)
1110
1111 @classmethod
1112 def extract_id(cls, url):
1113 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1114 if mobj is None:
1115 raise ExtractorError('Invalid URL: %s' % url)
1116 video_id = mobj.group(2)
1117 return video_id
1118
1119 def _extract_from_m3u8(self, manifest_url, video_id):
1120 url_map = {}
1121
1122 def _get_urls(_manifest):
1123 lines = _manifest.split('\n')
1124 urls = filter(lambda l: l and not l.startswith('#'),
1125 lines)
1126 return urls
1127 manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
1128 formats_urls = _get_urls(manifest)
1129 for format_url in formats_urls:
1130 itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
1131 url_map[itag] = format_url
1132 return url_map
1133
1134 def _extract_annotations(self, video_id):
1135 url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
1136 return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
1137
1138 def _real_extract(self, url):
1139 url, smuggled_data = unsmuggle_url(url, {})
1140
1141 proto = (
1142 'http' if self._downloader.params.get('prefer_insecure', False)
1143 else 'https')
1144
1145 start_time = None
1146 end_time = None
1147 parsed_url = compat_urllib_parse_urlparse(url)
1148 for component in [parsed_url.fragment, parsed_url.query]:
1149 query = compat_parse_qs(component)
1150 if start_time is None and 't' in query:
1151 start_time = parse_duration(query['t'][0])
1152 if start_time is None and 'start' in query:
1153 start_time = parse_duration(query['start'][0])
1154 if end_time is None and 'end' in query:
1155 end_time = parse_duration(query['end'][0])
1156
1157 # Extract original video URL from URL with redirection, like age verification, using next_url parameter
1158 mobj = re.search(self._NEXT_URL_RE, url)
1159 if mobj:
1160 url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
1161 video_id = self.extract_id(url)
1162
1163 # Get video webpage
1164 url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
1165 video_webpage = self._download_webpage(url, video_id)
1166
1167 # Attempt to extract SWF player URL
1168 mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
1169 if mobj is not None:
1170 player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
1171 else:
1172 player_url = None
1173
1174 dash_mpds = []
1175
1176 def add_dash_mpd(video_info):
1177 dash_mpd = video_info.get('dashmpd')
1178 if dash_mpd and dash_mpd[0] not in dash_mpds:
1179 dash_mpds.append(dash_mpd[0])
1180
1181 # Get video info
1182 embed_webpage = None
1183 is_live = None
1184 if re.search(r'player-age-gate-content">', video_webpage) is not None:
1185 age_gate = True
1186 # We simulate the access to the video from www.youtube.com/v/{video_id}
1187 # this can be viewed without login into Youtube
1188 url = proto + '://www.youtube.com/embed/%s' % video_id
1189 embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
1190 data = compat_urllib_parse.urlencode({
1191 'video_id': video_id,
1192 'eurl': 'https://youtube.googleapis.com/v/' + video_id,
1193 'sts': self._search_regex(
1194 r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
1195 })
1196 video_info_url = proto + '://www.youtube.com/get_video_info?' + data
1197 video_info_webpage = self._download_webpage(
1198 video_info_url, video_id,
1199 note='Refetching age-gated info webpage',
1200 errnote='unable to download video info webpage')
1201 video_info = compat_parse_qs(video_info_webpage)
1202 add_dash_mpd(video_info)
1203 else:
1204 age_gate = False
1205 video_info = None
1206 # Try looking directly into the video webpage
1207 ytplayer_config = self._get_ytplayer_config(video_id, video_webpage)
1208 if ytplayer_config:
1209 args = ytplayer_config['args']
1210 if args.get('url_encoded_fmt_stream_map'):
1211 # Convert to the same format returned by compat_parse_qs
1212 video_info = dict((k, [v]) for k, v in args.items())
1213 add_dash_mpd(video_info)
1214 if args.get('livestream') == '1' or args.get('live_playback') == 1:
1215 is_live = True
1216 if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
1217 # We also try looking in get_video_info since it may contain different dashmpd
1218 # URL that points to a DASH manifest with possibly different itag set (some itags
1219 # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
1220 # manifest pointed by get_video_info's dashmpd).
1221 # The general idea is to take a union of itags of both DASH manifests (for example
1222 # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
1223 self.report_video_info_webpage_download(video_id)
1224 for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
1225 video_info_url = (
1226 '%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
1227 % (proto, video_id, el_type))
1228 video_info_webpage = self._download_webpage(
1229 video_info_url,
1230 video_id, note=False,
1231 errnote='unable to download video info webpage')
1232 get_video_info = compat_parse_qs(video_info_webpage)
1233 if get_video_info.get('use_cipher_signature') != ['True']:
1234 add_dash_mpd(get_video_info)
1235 if not video_info:
1236 video_info = get_video_info
1237 if 'token' in get_video_info:
1238 # Different get_video_info requests may report different results, e.g.
1239 # some may report video unavailability, but some may serve it without
1240 # any complaint (see https://github.com/rg3/youtube-dl/issues/7362,
1241 # the original webpage as well as el=info and el=embedded get_video_info
1242 # requests report video unavailability due to geo restriction while
1243 # el=detailpage succeeds and returns valid data). This is probably
1244 # due to YouTube measures against IP ranges of hosting providers.
1245 # Working around by preferring the first succeeded video_info containing
1246 # the token if no such video_info yet was found.
1247 if 'token' not in video_info:
1248 video_info = get_video_info
1249 break
1250 if 'token' not in video_info:
1251 if 'reason' in video_info:
1252 if 'The uploader has not made this video available in your country.' in video_info['reason']:
1253 regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
1254 if regions_allowed:
1255 raise ExtractorError('YouTube said: This video is available in %s only' % (
1256 ', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
1257 expected=True)
1258 raise ExtractorError(
1259 'YouTube said: %s' % video_info['reason'][0],
1260 expected=True, video_id=video_id)
1261 else:
1262 raise ExtractorError(
1263 '"token" parameter not in video info for unknown reason',
1264 video_id=video_id)
1265
1266 # title
1267 if 'title' in video_info:
1268 video_title = video_info['title'][0]
1269 else:
1270 self._downloader.report_warning('Unable to extract video title')
1271 video_title = '_'
1272
1273 # description
1274 video_description = get_element_by_id("eow-description", video_webpage)
1275 if video_description:
1276 video_description = re.sub(r'''(?x)
1277 <a\s+
1278 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1279 (?:title|href)="([^"]+)"\s+
1280 (?:[a-zA-Z-]+="[^"]+"\s+)*?
1281 class="(?:yt-uix-redirect-link|yt-uix-sessionlink[^"]*)"[^>]*>
1282 [^<]+\.{3}\s*
1283 </a>
1284 ''', r'\1', video_description)
1285 video_description = clean_html(video_description)
1286 else:
1287 fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
1288 if fd_mobj:
1289 video_description = unescapeHTML(fd_mobj.group(1))
1290 else:
1291 video_description = ''
1292
1293 if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
1294 if not self._downloader.params.get('noplaylist'):
1295 entries = []
1296 feed_ids = []
1297 multifeed_metadata_list = video_info['multifeed_metadata_list'][0]
1298 for feed in multifeed_metadata_list.split(','):
1299 # Unquote should take place before split on comma (,) since textual
1300 # fields may contain comma as well (see
1301 # https://github.com/rg3/youtube-dl/issues/8536)
1302 feed_data = compat_parse_qs(compat_urllib_parse_unquote_plus(feed))
1303 entries.append({
1304 '_type': 'url_transparent',
1305 'ie_key': 'Youtube',
1306 'url': smuggle_url(
1307 '%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
1308 {'force_singlefeed': True}),
1309 'title': '%s (%s)' % (video_title, feed_data['title'][0]),
1310 })
1311 feed_ids.append(feed_data['id'][0])
1312 self.to_screen(
1313 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
1314 % (', '.join(feed_ids), video_id))
1315 return self.playlist_result(entries, video_id, video_title, video_description)
1316 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1317
1318 if 'view_count' in video_info:
1319 view_count = int(video_info['view_count'][0])
1320 else:
1321 view_count = None
1322
1323 # Check for "rental" videos
1324 if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
1325 raise ExtractorError('"rental" videos not supported')
1326
1327 # Start extracting information
1328 self.report_information_extraction(video_id)
1329
1330 # uploader
1331 if 'author' not in video_info:
1332 raise ExtractorError('Unable to extract uploader name')
1333 video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
1334
1335 # uploader_id
1336 video_uploader_id = None
1337 mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
1338 if mobj is not None:
1339 video_uploader_id = mobj.group(1)
1340 else:
1341 self._downloader.report_warning('unable to extract uploader nickname')
1342
1343 # thumbnail image
1344 # We try first to get a high quality image:
1345 m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
1346 video_webpage, re.DOTALL)
1347 if m_thumb is not None:
1348 video_thumbnail = m_thumb.group(1)
1349 elif 'thumbnail_url' not in video_info:
1350 self._downloader.report_warning('unable to extract video thumbnail')
1351 video_thumbnail = None
1352 else: # don't panic if we can't find it
1353 video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
1354
1355 # upload date
1356 upload_date = self._html_search_meta(
1357 'datePublished', video_webpage, 'upload date', default=None)
1358 if not upload_date:
1359 upload_date = self._search_regex(
1360 [r'(?s)id="eow-date.*?>(.*?)</span>',
1361 r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
1362 video_webpage, 'upload date', default=None)
1363 if upload_date:
1364 upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
1365 upload_date = unified_strdate(upload_date)
1366
1367 video_license = self._html_search_regex(
1368 r'<h4[^>]+class="title"[^>]*>\s*License\s*</h4>\s*<ul[^>]*>\s*<li>(.+?)</li',
1369 video_webpage, 'license', default=None)
1370
1371 m_music = re.search(
1372 r'<h4[^>]+class="title"[^>]*>\s*Music\s*</h4>\s*<ul[^>]*>\s*<li>(?P<title>.+?) by (?P<creator>.+?)(?:\(.+?\))?</li',
1373 video_webpage)
1374 if m_music:
1375 video_alt_title = remove_quotes(unescapeHTML(m_music.group('title')))
1376 video_creator = clean_html(m_music.group('creator'))
1377 else:
1378 video_alt_title = video_creator = None
1379
1380 m_cat_container = self._search_regex(
1381 r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
1382 video_webpage, 'categories', default=None)
1383 if m_cat_container:
1384 category = self._html_search_regex(
1385 r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
1386 default=None)
1387 video_categories = None if category is None else [category]
1388 else:
1389 video_categories = None
1390
1391 video_tags = [
1392 unescapeHTML(m.group('content'))
1393 for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
1394
1395 def _extract_count(count_name):
1396 return str_to_int(self._search_regex(
1397 r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
1398 % re.escape(count_name),
1399 video_webpage, count_name, default=None))
1400
1401 like_count = _extract_count('like')
1402 dislike_count = _extract_count('dislike')
1403
1404 # subtitles
1405 video_subtitles = self.extract_subtitles(video_id, video_webpage)
1406 automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
1407
1408 if 'length_seconds' not in video_info:
1409 self._downloader.report_warning('unable to extract video duration')
1410 video_duration = None
1411 else:
1412 video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
1413
1414 # annotations
1415 video_annotations = None
1416 if self._downloader.params.get('writeannotations', False):
1417 video_annotations = self._extract_annotations(video_id)
1418
1419 def _map_to_format_list(urlmap):
1420 formats = []
1421 for itag, video_real_url in urlmap.items():
1422 dct = {
1423 'format_id': itag,
1424 'url': video_real_url,
1425 'player_url': player_url,
1426 }
1427 if itag in self._formats:
1428 dct.update(self._formats[itag])
1429 formats.append(dct)
1430 return formats
1431
1432 if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
1433 self.report_rtmp_download()
1434 formats = [{
1435 'format_id': '_rtmp',
1436 'protocol': 'rtmp',
1437 'url': video_info['conn'][0],
1438 'player_url': player_url,
1439 }]
1440 elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
1441 encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
1442 if 'rtmpe%3Dyes' in encoded_url_map:
1443 raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
1444 formats_spec = {}
1445 fmt_list = video_info.get('fmt_list', [''])[0]
1446 if fmt_list:
1447 for fmt in fmt_list.split(','):
1448 spec = fmt.split('/')
1449 if len(spec) > 1:
1450 width_height = spec[1].split('x')
1451 if len(width_height) == 2:
1452 formats_spec[spec[0]] = {
1453 'resolution': spec[1],
1454 'width': int_or_none(width_height[0]),
1455 'height': int_or_none(width_height[1]),
1456 }
1457 formats = []
1458 for url_data_str in encoded_url_map.split(','):
1459 url_data = compat_parse_qs(url_data_str)
1460 if 'itag' not in url_data or 'url' not in url_data:
1461 continue
1462 format_id = url_data['itag'][0]
1463 url = url_data['url'][0]
1464
1465 if 'sig' in url_data:
1466 url += '&signature=' + url_data['sig'][0]
1467 elif 's' in url_data:
1468 encrypted_sig = url_data['s'][0]
1469 ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
1470
1471 jsplayer_url_json = self._search_regex(
1472 ASSETS_RE,
1473 embed_webpage if age_gate else video_webpage,
1474 'JS player URL (1)', default=None)
1475 if not jsplayer_url_json and not age_gate:
1476 # We need the embed website after all
1477 if embed_webpage is None:
1478 embed_url = proto + '://www.youtube.com/embed/%s' % video_id
1479 embed_webpage = self._download_webpage(
1480 embed_url, video_id, 'Downloading embed webpage')
1481 jsplayer_url_json = self._search_regex(
1482 ASSETS_RE, embed_webpage, 'JS player URL')
1483
1484 player_url = json.loads(jsplayer_url_json)
1485 if player_url is None:
1486 player_url_json = self._search_regex(
1487 r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
1488 video_webpage, 'age gate player URL')
1489 player_url = json.loads(player_url_json)
1490
1491 if self._downloader.params.get('verbose'):
1492 if player_url is None:
1493 player_version = 'unknown'
1494 player_desc = 'unknown'
1495 else:
1496 if player_url.endswith('swf'):
1497 player_version = self._search_regex(
1498 r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
1499 'flash player', fatal=False)
1500 player_desc = 'flash player %s' % player_version
1501 else:
1502 player_version = self._search_regex(
1503 [r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js', r'(?:www|player)-([^/]+)/base\.js'],
1504 player_url,
1505 'html5 player', fatal=False)
1506 player_desc = 'html5 player %s' % player_version
1507
1508 parts_sizes = self._signature_cache_id(encrypted_sig)
1509 self.to_screen('{%s} signature length %s, %s' %
1510 (format_id, parts_sizes, player_desc))
1511
1512 signature = self._decrypt_signature(
1513 encrypted_sig, video_id, player_url, age_gate)
1514 url += '&signature=' + signature
1515 if 'ratebypass' not in url:
1516 url += '&ratebypass=yes'
1517
1518 dct = {
1519 'format_id': format_id,
1520 'url': url,
1521 'player_url': player_url,
1522 }
1523 if format_id in self._formats:
1524 dct.update(self._formats[format_id])
1525 if format_id in formats_spec:
1526 dct.update(formats_spec[format_id])
1527
1528 # Some itags are not included in DASH manifest thus corresponding formats will
1529 # lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
1530 # Trying to extract metadata from url_encoded_fmt_stream_map entry.
1531 mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
1532 width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
1533
1534 more_fields = {
1535 'filesize': int_or_none(url_data.get('clen', [None])[0]),
1536 'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
1537 'width': width,
1538 'height': height,
1539 'fps': int_or_none(url_data.get('fps', [None])[0]),
1540 'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
1541 }
1542 for key, value in more_fields.items():
1543 if value:
1544 dct[key] = value
1545 type_ = url_data.get('type', [None])[0]
1546 if type_:
1547 type_split = type_.split(';')
1548 kind_ext = type_split[0].split('/')
1549 if len(kind_ext) == 2:
1550 kind, _ = kind_ext
1551 dct['ext'] = mimetype2ext(type_split[0])
1552 if kind in ('audio', 'video'):
1553 codecs = None
1554 for mobj in re.finditer(
1555 r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
1556 if mobj.group('key') == 'codecs':
1557 codecs = mobj.group('val')
1558 break
1559 if codecs:
1560 codecs = codecs.split(',')
1561 if len(codecs) == 2:
1562 acodec, vcodec = codecs[1], codecs[0]
1563 else:
1564 acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
1565 dct.update({
1566 'acodec': acodec,
1567 'vcodec': vcodec,
1568 })
1569 formats.append(dct)
1570 elif video_info.get('hlsvp'):
1571 manifest_url = video_info['hlsvp'][0]
1572 url_map = self._extract_from_m3u8(manifest_url, video_id)
1573 formats = _map_to_format_list(url_map)
1574 # Accept-Encoding header causes failures in live streams on Youtube and Youtube Gaming
1575 for a_format in formats:
1576 a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = 'True'
1577 else:
1578 unavailable_message = self._html_search_regex(
1579 r'(?s)<h1[^>]+id="unavailable-message"[^>]*>(.+?)</h1>',
1580 video_webpage, 'unavailable message', default=None)
1581 if unavailable_message:
1582 raise ExtractorError(unavailable_message, expected=True)
1583 raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
1584
1585 # Look for the DASH manifest
1586 if self._downloader.params.get('youtube_include_dash_manifest', True):
1587 dash_mpd_fatal = True
1588 for mpd_url in dash_mpds:
1589 dash_formats = {}
1590 try:
1591 def decrypt_sig(mobj):
1592 s = mobj.group(1)
1593 dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
1594 return '/signature/%s' % dec_s
1595
1596 mpd_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, mpd_url)
1597
1598 for df in self._extract_mpd_formats(
1599 mpd_url, video_id, fatal=dash_mpd_fatal,
1600 formats_dict=self._formats):
1601 # Do not overwrite DASH format found in some previous DASH manifest
1602 if df['format_id'] not in dash_formats:
1603 dash_formats[df['format_id']] = df
1604 # Additional DASH manifests may end up in HTTP Error 403 therefore
1605 # allow them to fail without bug report message if we already have
1606 # some DASH manifest succeeded. This is temporary workaround to reduce
1607 # burst of bug reports until we figure out the reason and whether it
1608 # can be fixed at all.
1609 dash_mpd_fatal = False
1610 except (ExtractorError, KeyError) as e:
1611 self.report_warning(
1612 'Skipping DASH manifest: %r' % e, video_id)
1613 if dash_formats:
1614 # Remove the formats we found through non-DASH, they
1615 # contain less info and it can be wrong, because we use
1616 # fixed values (for example the resolution). See
1617 # https://github.com/rg3/youtube-dl/issues/5774 for an
1618 # example.
1619 formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
1620 formats.extend(dash_formats.values())
1621
1622 # Check for malformed aspect ratio
1623 stretched_m = re.search(
1624 r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
1625 video_webpage)
1626 if stretched_m:
1627 w = float(stretched_m.group('w'))
1628 h = float(stretched_m.group('h'))
1629 # yt:stretch may hold invalid ratio data (e.g. for Q39EVAstoRM ratio is 17:0).
1630 # We will only process correct ratios.
1631 if w > 0 and h > 0:
1632 ratio = w / h
1633 for f in formats:
1634 if f.get('vcodec') != 'none':
1635 f['stretched_ratio'] = ratio
1636
1637 self._sort_formats(formats)
1638
1639 self.mark_watched(video_id, video_info)
1640
1641 return {
1642 'id': video_id,
1643 'uploader': video_uploader,
1644 'uploader_id': video_uploader_id,
1645 'upload_date': upload_date,
1646 'license': video_license,
1647 'creator': video_creator,
1648 'title': video_title,
1649 'alt_title': video_alt_title,
1650 'thumbnail': video_thumbnail,
1651 'description': video_description,
1652 'categories': video_categories,
1653 'tags': video_tags,
1654 'subtitles': video_subtitles,
1655 'automatic_captions': automatic_captions,
1656 'duration': video_duration,
1657 'age_limit': 18 if age_gate else 0,
1658 'annotations': video_annotations,
1659 'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
1660 'view_count': view_count,
1661 'like_count': like_count,
1662 'dislike_count': dislike_count,
1663 'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
1664 'formats': formats,
1665 'is_live': is_live,
1666 'start_time': start_time,
1667 'end_time': end_time,
1668 }
1669
1670
1671 class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor):
1672 IE_DESC = 'YouTube.com playlists'
1673 _VALID_URL = r"""(?x)(?:
1674 (?:https?://)?
1675 (?:\w+\.)?
1676 youtube\.com/
1677 (?:
1678 (?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
1679 \? (?:.*?[&;])*? (?:p|a|list)=
1680 | p/
1681 )
1682 (
1683 (?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
1684 # Top tracks, they can also include dots
1685 |(?:MC)[\w\.]*
1686 )
1687 .*
1688 |
1689 ((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
1690 )"""
1691 _TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
1692 _VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&amp;[^"]*?index=(?P<index>\d+)(?:[^>]+>(?P<title>[^<]+))?'
1693 IE_NAME = 'youtube:playlist'
1694 _TESTS = [{
1695 'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1696 'info_dict': {
1697 'title': 'ytdl test PL',
1698 'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
1699 },
1700 'playlist_count': 3,
1701 }, {
1702 'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1703 'info_dict': {
1704 'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
1705 'title': 'YDL_Empty_List',
1706 },
1707 'playlist_count': 0,
1708 }, {
1709 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
1710 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1711 'info_dict': {
1712 'title': '29C3: Not my department',
1713 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
1714 },
1715 'playlist_count': 95,
1716 }, {
1717 'note': 'issue #673',
1718 'url': 'PLBB231211A4F62143',
1719 'info_dict': {
1720 'title': '[OLD]Team Fortress 2 (Class-based LP)',
1721 'id': 'PLBB231211A4F62143',
1722 },
1723 'playlist_mincount': 26,
1724 }, {
1725 'note': 'Large playlist',
1726 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
1727 'info_dict': {
1728 'title': 'Uploads from Cauchemar',
1729 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
1730 },
1731 'playlist_mincount': 799,
1732 }, {
1733 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1734 'info_dict': {
1735 'title': 'YDL_safe_search',
1736 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
1737 },
1738 'playlist_count': 2,
1739 }, {
1740 'note': 'embedded',
1741 'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1742 'playlist_count': 4,
1743 'info_dict': {
1744 'title': 'JODA15',
1745 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
1746 }
1747 }, {
1748 'note': 'Embedded SWF player',
1749 'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
1750 'playlist_count': 4,
1751 'info_dict': {
1752 'title': 'JODA7',
1753 'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
1754 }
1755 }, {
1756 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
1757 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
1758 'info_dict': {
1759 'title': 'Uploads from Interstellar Movie',
1760 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
1761 },
1762 'playlist_mincout': 21,
1763 }]
1764
1765 def _real_initialize(self):
1766 self._login()
1767
1768 def _extract_mix(self, playlist_id):
1769 # The mixes are generated from a single video
1770 # the id of the playlist is just 'RD' + video_id
1771 url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
1772 webpage = self._download_webpage(
1773 url, playlist_id, 'Downloading Youtube mix')
1774 search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
1775 title_span = (
1776 search_title('playlist-title') or
1777 search_title('title long-title') or
1778 search_title('title'))
1779 title = clean_html(title_span)
1780 ids = orderedSet(re.findall(
1781 r'''(?xs)data-video-username=".*?".*?
1782 href="/watch\?v=([0-9A-Za-z_-]{11})&amp;[^"]*?list=%s''' % re.escape(playlist_id),
1783 webpage))
1784 url_results = self._ids_to_results(ids)
1785
1786 return self.playlist_result(url_results, playlist_id, title)
1787
1788 def _extract_playlist(self, playlist_id):
1789 url = self._TEMPLATE_URL % playlist_id
1790 page = self._download_webpage(url, playlist_id)
1791
1792 for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
1793 match = match.strip()
1794 # Check if the playlist exists or is private
1795 if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
1796 raise ExtractorError(
1797 'The playlist doesn\'t exist or is private, use --username or '
1798 '--netrc to access it.',
1799 expected=True)
1800 elif re.match(r'[^<]*Invalid parameters[^<]*', match):
1801 raise ExtractorError(
1802 'Invalid parameters. Maybe URL is incorrect.',
1803 expected=True)
1804 elif re.match(r'[^<]*Choose your language[^<]*', match):
1805 continue
1806 else:
1807 self.report_warning('Youtube gives an alert message: ' + match)
1808
1809 playlist_title = self._html_search_regex(
1810 r'(?s)<h1 class="pl-header-title[^"]*"[^>]*>\s*(.*?)\s*</h1>',
1811 page, 'title')
1812
1813 return self.playlist_result(self._entries(page, playlist_id), playlist_id, playlist_title)
1814
1815 def _check_download_just_video(self, url, playlist_id):
1816 # Check if it's a video-specific URL
1817 query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
1818 if 'v' in query_dict:
1819 video_id = query_dict['v'][0]
1820 if self._downloader.params.get('noplaylist'):
1821 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
1822 return self.url_result(video_id, 'Youtube', video_id=video_id)
1823 else:
1824 self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
1825
1826 def _real_extract(self, url):
1827 # Extract playlist id
1828 mobj = re.match(self._VALID_URL, url)
1829 if mobj is None:
1830 raise ExtractorError('Invalid URL: %s' % url)
1831 playlist_id = mobj.group(1) or mobj.group(2)
1832
1833 video = self._check_download_just_video(url, playlist_id)
1834 if video:
1835 return video
1836
1837 if playlist_id.startswith('RD') or playlist_id.startswith('UL'):
1838 # Mixes require a custom extraction process
1839 return self._extract_mix(playlist_id)
1840
1841 return self._extract_playlist(playlist_id)
1842
1843
1844 class YoutubeChannelIE(YoutubePlaylistBaseInfoExtractor):
1845 IE_DESC = 'YouTube.com channels'
1846 _VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
1847 _TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
1848 _VIDEO_RE = r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?'
1849 IE_NAME = 'youtube:channel'
1850 _TESTS = [{
1851 'note': 'paginated channel',
1852 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
1853 'playlist_mincount': 91,
1854 'info_dict': {
1855 'id': 'UUKfVa3S1e4PHvxWcwyMMg8w',
1856 'title': 'Uploads from lex will',
1857 }
1858 }, {
1859 'note': 'Age restricted channel',
1860 # from https://www.youtube.com/user/DeusExOfficial
1861 'url': 'https://www.youtube.com/channel/UCs0ifCMCm1icqRbqhUINa0w',
1862 'playlist_mincount': 64,
1863 'info_dict': {
1864 'id': 'UUs0ifCMCm1icqRbqhUINa0w',
1865 'title': 'Uploads from Deus Ex',
1866 },
1867 }]
1868
1869 @classmethod
1870 def suitable(cls, url):
1871 return False if YoutubePlaylistsIE.suitable(url) else super(YoutubeChannelIE, cls).suitable(url)
1872
1873 def _real_extract(self, url):
1874 channel_id = self._match_id(url)
1875
1876 url = self._TEMPLATE_URL % channel_id
1877
1878 # Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
1879 # Workaround by extracting as a playlist if managed to obtain channel playlist URL
1880 # otherwise fallback on channel by page extraction
1881 channel_page = self._download_webpage(
1882 url + '?view=57', channel_id,
1883 'Downloading channel page', fatal=False)
1884 if channel_page is False:
1885 channel_playlist_id = False
1886 else:
1887 channel_playlist_id = self._html_search_meta(
1888 'channelId', channel_page, 'channel id', default=None)
1889 if not channel_playlist_id:
1890 channel_playlist_id = self._search_regex(
1891 r'data-(?:channel-external-|yt)id="([^"]+)"',
1892 channel_page, 'channel id', default=None)
1893 if channel_playlist_id and channel_playlist_id.startswith('UC'):
1894 playlist_id = 'UU' + channel_playlist_id[2:]
1895 return self.url_result(
1896 compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
1897
1898 channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
1899 autogenerated = re.search(r'''(?x)
1900 class="[^"]*?(?:
1901 channel-header-autogenerated-label|
1902 yt-channel-title-autogenerated
1903 )[^"]*"''', channel_page) is not None
1904
1905 if autogenerated:
1906 # The videos are contained in a single page
1907 # the ajax pages can't be used, they are empty
1908 entries = [
1909 self.url_result(
1910 video_id, 'Youtube', video_id=video_id,
1911 video_title=video_title)
1912 for video_id, video_title in self.extract_videos_from_page(channel_page)]
1913 return self.playlist_result(entries, channel_id)
1914
1915 return self.playlist_result(self._entries(channel_page, channel_id), channel_id)
1916
1917
1918 class YoutubeUserIE(YoutubeChannelIE):
1919 IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
1920 _VALID_URL = r'(?:(?:https?://(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
1921 _TEMPLATE_URL = 'https://www.youtube.com/user/%s/videos'
1922 IE_NAME = 'youtube:user'
1923
1924 _TESTS = [{
1925 'url': 'https://www.youtube.com/user/TheLinuxFoundation',
1926 'playlist_mincount': 320,
1927 'info_dict': {
1928 'title': 'TheLinuxFoundation',
1929 }
1930 }, {
1931 'url': 'ytuser:phihag',
1932 'only_matching': True,
1933 }]
1934
1935 @classmethod
1936 def suitable(cls, url):
1937 # Don't return True if the url can be extracted with other youtube
1938 # extractor, the regex would is too permissive and it would match.
1939 other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
1940 if any(ie.suitable(url) for ie in other_ies):
1941 return False
1942 else:
1943 return super(YoutubeUserIE, cls).suitable(url)
1944
1945
1946 class YoutubePlaylistsIE(YoutubePlaylistsBaseInfoExtractor):
1947 IE_DESC = 'YouTube.com user/channel playlists'
1948 _VALID_URL = r'https?://(?:\w+\.)?youtube\.com/(?:user|channel)/(?P<id>[^/]+)/playlists'
1949 IE_NAME = 'youtube:playlists'
1950
1951 _TESTS = [{
1952 'url': 'http://www.youtube.com/user/ThirstForScience/playlists',
1953 'playlist_mincount': 4,
1954 'info_dict': {
1955 'id': 'ThirstForScience',
1956 'title': 'Thirst for Science',
1957 },
1958 }, {
1959 # with "Load more" button
1960 'url': 'http://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
1961 'playlist_mincount': 70,
1962 'info_dict': {
1963 'id': 'igorkle1',
1964 'title': 'Игорь Клейнер',
1965 },
1966 }, {
1967 'url': 'https://www.youtube.com/channel/UCiU1dHvZObB2iP6xkJ__Icw/playlists',
1968 'playlist_mincount': 17,
1969 'info_dict': {
1970 'id': 'UCiU1dHvZObB2iP6xkJ__Icw',
1971 'title': 'Chem Player',
1972 },
1973 }]
1974
1975
1976 class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
1977 IE_DESC = 'YouTube.com searches'
1978 # there doesn't appear to be a real limit, for example if you search for
1979 # 'python' you get more than 8.000.000 results
1980 _MAX_RESULTS = float('inf')
1981 IE_NAME = 'youtube:search'
1982 _SEARCH_KEY = 'ytsearch'
1983 _EXTRA_QUERY_ARGS = {}
1984 _TESTS = []
1985
1986 def _get_n_results(self, query, n):
1987 """Get a specified number of results for a query"""
1988
1989 videos = []
1990 limit = n
1991
1992 for pagenum in itertools.count(1):
1993 url_query = {
1994 'search_query': query.encode('utf-8'),
1995 'page': pagenum,
1996 'spf': 'navigate',
1997 }
1998 url_query.update(self._EXTRA_QUERY_ARGS)
1999 result_url = 'https://www.youtube.com/results?' + compat_urllib_parse.urlencode(url_query)
2000 data = self._download_json(
2001 result_url, video_id='query "%s"' % query,
2002 note='Downloading page %s' % pagenum,
2003 errnote='Unable to download API page')
2004 html_content = data[1]['body']['content']
2005
2006 if 'class="search-message' in html_content:
2007 raise ExtractorError(
2008 '[youtube] No video results', expected=True)
2009
2010 new_videos = self._ids_to_results(orderedSet(re.findall(
2011 r'href="/watch\?v=(.{11})', html_content)))
2012 videos += new_videos
2013 if not new_videos or len(videos) > limit:
2014 break
2015
2016 if len(videos) > n:
2017 videos = videos[:n]
2018 return self.playlist_result(videos, query)
2019
2020
2021 class YoutubeSearchDateIE(YoutubeSearchIE):
2022 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
2023 _SEARCH_KEY = 'ytsearchdate'
2024 IE_DESC = 'YouTube.com searches, newest videos first'
2025 _EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
2026
2027
2028 class YoutubeSearchURLIE(InfoExtractor):
2029 IE_DESC = 'YouTube.com search URLs'
2030 IE_NAME = 'youtube:search_url'
2031 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?P<query>[^&]+)(?:[&]|$)'
2032 _TESTS = [{
2033 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
2034 'playlist_mincount': 5,
2035 'info_dict': {
2036 'title': 'youtube-dl test video',
2037 }
2038 }, {
2039 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
2040 'only_matching': True,
2041 }]
2042
2043 def _real_extract(self, url):
2044 mobj = re.match(self._VALID_URL, url)
2045 query = compat_urllib_parse_unquote_plus(mobj.group('query'))
2046
2047 webpage = self._download_webpage(url, query)
2048 result_code = self._search_regex(
2049 r'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage, 'result HTML')
2050
2051 part_codes = re.findall(
2052 r'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code)
2053 entries = []
2054 for part_code in part_codes:
2055 part_title = self._html_search_regex(
2056 [r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
2057 part_url_snippet = self._html_search_regex(
2058 r'(?s)href="([^"]+)"', part_code, 'item URL')
2059 part_url = compat_urlparse.urljoin(
2060 'https://www.youtube.com/', part_url_snippet)
2061 entries.append({
2062 '_type': 'url',
2063 'url': part_url,
2064 'title': part_title,
2065 })
2066
2067 return {
2068 '_type': 'playlist',
2069 'entries': entries,
2070 'title': query,
2071 }
2072
2073
2074 class YoutubeShowIE(YoutubePlaylistsBaseInfoExtractor):
2075 IE_DESC = 'YouTube.com (multi-season) shows'
2076 _VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
2077 IE_NAME = 'youtube:show'
2078 _TESTS = [{
2079 'url': 'https://www.youtube.com/show/airdisasters',
2080 'playlist_mincount': 5,
2081 'info_dict': {
2082 'id': 'airdisasters',
2083 'title': 'Air Disasters',
2084 }
2085 }]
2086
2087 def _real_extract(self, url):
2088 playlist_id = self._match_id(url)
2089 return super(YoutubeShowIE, self)._real_extract(
2090 'https://www.youtube.com/show/%s/playlists' % playlist_id)
2091
2092
2093 class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
2094 """
2095 Base class for feed extractors
2096 Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
2097 """
2098 _LOGIN_REQUIRED = True
2099
2100 @property
2101 def IE_NAME(self):
2102 return 'youtube:%s' % self._FEED_NAME
2103
2104 def _real_initialize(self):
2105 self._login()
2106
2107 def _real_extract(self, url):
2108 page = self._download_webpage(
2109 'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
2110
2111 # The extraction process is the same as for playlists, but the regex
2112 # for the video ids doesn't contain an index
2113 ids = []
2114 more_widget_html = content_html = page
2115 for page_num in itertools.count(1):
2116 matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
2117
2118 # 'recommended' feed has infinite 'load more' and each new portion spins
2119 # the same videos in (sometimes) slightly different order, so we'll check
2120 # for unicity and break when portion has no new videos
2121 new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
2122 if not new_ids:
2123 break
2124
2125 ids.extend(new_ids)
2126
2127 mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
2128 if not mobj:
2129 break
2130
2131 more = self._download_json(
2132 'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
2133 'Downloading page #%s' % page_num,
2134 transform_source=uppercase_escape)
2135 content_html = more['content_html']
2136 more_widget_html = more['load_more_widget_html']
2137
2138 return self.playlist_result(
2139 self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
2140
2141
2142 class YoutubeWatchLaterIE(YoutubePlaylistIE):
2143 IE_NAME = 'youtube:watchlater'
2144 IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
2145 _VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
2146
2147 _TESTS = [{
2148 'url': 'https://www.youtube.com/playlist?list=WL',
2149 'only_matching': True,
2150 }, {
2151 'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
2152 'only_matching': True,
2153 }]
2154
2155 def _real_extract(self, url):
2156 video = self._check_download_just_video(url, 'WL')
2157 if video:
2158 return video
2159 return self._extract_playlist('WL')
2160
2161
2162 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
2163 IE_NAME = 'youtube:favorites'
2164 IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
2165 _VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
2166 _LOGIN_REQUIRED = True
2167
2168 def _real_extract(self, url):
2169 webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
2170 playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
2171 return self.url_result(playlist_id, 'YoutubePlaylist')
2172
2173
2174 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
2175 IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
2176 _VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
2177 _FEED_NAME = 'recommended'
2178 _PLAYLIST_TITLE = 'Youtube Recommended videos'
2179
2180
2181 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
2182 IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
2183 _VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
2184 _FEED_NAME = 'subscriptions'
2185 _PLAYLIST_TITLE = 'Youtube Subscriptions'
2186
2187
2188 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
2189 IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
2190 _VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
2191 _FEED_NAME = 'history'
2192 _PLAYLIST_TITLE = 'Youtube History'
2193
2194
2195 class YoutubeTruncatedURLIE(InfoExtractor):
2196 IE_NAME = 'youtube:truncated_url'
2197 IE_DESC = False # Do not list
2198 _VALID_URL = r'''(?x)
2199 (?:https?://)?
2200 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
2201 (?:watch\?(?:
2202 feature=[a-z_]+|
2203 annotation_id=annotation_[^&]+|
2204 x-yt-cl=[0-9]+|
2205 hl=[^&]*|
2206 t=[0-9]+
2207 )?
2208 |
2209 attribution_link\?a=[^&]+
2210 )
2211 $
2212 '''
2213
2214 _TESTS = [{
2215 'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
2216 'only_matching': True,
2217 }, {
2218 'url': 'http://www.youtube.com/watch?',
2219 'only_matching': True,
2220 }, {
2221 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
2222 'only_matching': True,
2223 }, {
2224 'url': 'https://www.youtube.com/watch?feature=foo',
2225 'only_matching': True,
2226 }, {
2227 'url': 'https://www.youtube.com/watch?hl=en-GB',
2228 'only_matching': True,
2229 }, {
2230 'url': 'https://www.youtube.com/watch?t=2372',
2231 'only_matching': True,
2232 }]
2233
2234 def _real_extract(self, url):
2235 raise ExtractorError(
2236 'Did you forget to quote the URL? Remember that & is a meta '
2237 'character in most shells, so you want to put the URL in quotes, '
2238 'like youtube-dl '
2239 '"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
2240 ' or simply youtube-dl BaW_jenozKc .',
2241 expected=True)
2242
2243
2244 class YoutubeTruncatedIDIE(InfoExtractor):
2245 IE_NAME = 'youtube:truncated_id'
2246 IE_DESC = False # Do not list
2247 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
2248
2249 _TESTS = [{
2250 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
2251 'only_matching': True,
2252 }]
2253
2254 def _real_extract(self, url):
2255 video_id = self._match_id(url)
2256 raise ExtractorError(
2257 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
2258 expected=True)