]> jfr.im git - yt-dlp.git/blame_incremental - yt_dlp/extractor/youtube.py
[cleanup,youtube] Reorganize Tab and Search extractor inheritances
[yt-dlp.git] / yt_dlp / extractor / youtube.py
... / ...
CommitLineData
1# coding: utf-8
2
3from __future__ import unicode_literals
4
5import base64
6import calendar
7import copy
8import datetime
9import hashlib
10import itertools
11import json
12import math
13import os.path
14import random
15import re
16import time
17import traceback
18
19from .common import InfoExtractor, SearchInfoExtractor
20from ..compat import (
21 compat_chr,
22 compat_HTTPError,
23 compat_parse_qs,
24 compat_str,
25 compat_urllib_parse_unquote_plus,
26 compat_urllib_parse_urlencode,
27 compat_urllib_parse_urlparse,
28 compat_urlparse,
29)
30from ..jsinterp import JSInterpreter
31from ..utils import (
32 bug_reports_message,
33 bytes_to_intlist,
34 clean_html,
35 datetime_from_str,
36 dict_get,
37 error_to_compat_str,
38 ExtractorError,
39 float_or_none,
40 format_field,
41 int_or_none,
42 intlist_to_bytes,
43 is_html,
44 join_nonempty,
45 mimetype2ext,
46 network_exceptions,
47 NO_DEFAULT,
48 orderedSet,
49 parse_codecs,
50 parse_count,
51 parse_duration,
52 parse_iso8601,
53 parse_qs,
54 qualities,
55 remove_end,
56 remove_start,
57 smuggle_url,
58 str_or_none,
59 str_to_int,
60 traverse_obj,
61 try_get,
62 unescapeHTML,
63 unified_strdate,
64 unsmuggle_url,
65 update_url_query,
66 url_or_none,
67 urljoin,
68 variadic,
69)
70
71
72def get_first(obj, keys, **kwargs):
73 return traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False)
74
75
76# any clients starting with _ cannot be explicity requested by the user
77INNERTUBE_CLIENTS = {
78 'web': {
79 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
80 'INNERTUBE_CONTEXT': {
81 'client': {
82 'clientName': 'WEB',
83 'clientVersion': '2.20210622.10.00',
84 }
85 },
86 'INNERTUBE_CONTEXT_CLIENT_NAME': 1
87 },
88 'web_embedded': {
89 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
90 'INNERTUBE_CONTEXT': {
91 'client': {
92 'clientName': 'WEB_EMBEDDED_PLAYER',
93 'clientVersion': '1.20210620.0.1',
94 },
95 },
96 'INNERTUBE_CONTEXT_CLIENT_NAME': 56
97 },
98 'web_music': {
99 'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
100 'INNERTUBE_HOST': 'music.youtube.com',
101 'INNERTUBE_CONTEXT': {
102 'client': {
103 'clientName': 'WEB_REMIX',
104 'clientVersion': '1.20210621.00.00',
105 }
106 },
107 'INNERTUBE_CONTEXT_CLIENT_NAME': 67,
108 },
109 'web_creator': {
110 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
111 'INNERTUBE_CONTEXT': {
112 'client': {
113 'clientName': 'WEB_CREATOR',
114 'clientVersion': '1.20210621.00.00',
115 }
116 },
117 'INNERTUBE_CONTEXT_CLIENT_NAME': 62,
118 },
119 'android': {
120 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
121 'INNERTUBE_CONTEXT': {
122 'client': {
123 'clientName': 'ANDROID',
124 'clientVersion': '16.20',
125 }
126 },
127 'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
128 'REQUIRE_JS_PLAYER': False
129 },
130 'android_embedded': {
131 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
132 'INNERTUBE_CONTEXT': {
133 'client': {
134 'clientName': 'ANDROID_EMBEDDED_PLAYER',
135 'clientVersion': '16.20',
136 },
137 },
138 'INNERTUBE_CONTEXT_CLIENT_NAME': 55,
139 'REQUIRE_JS_PLAYER': False
140 },
141 'android_music': {
142 'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
143 'INNERTUBE_HOST': 'music.youtube.com',
144 'INNERTUBE_CONTEXT': {
145 'client': {
146 'clientName': 'ANDROID_MUSIC',
147 'clientVersion': '4.32',
148 }
149 },
150 'INNERTUBE_CONTEXT_CLIENT_NAME': 21,
151 'REQUIRE_JS_PLAYER': False
152 },
153 'android_creator': {
154 'INNERTUBE_CONTEXT': {
155 'client': {
156 'clientName': 'ANDROID_CREATOR',
157 'clientVersion': '21.24.100',
158 },
159 },
160 'INNERTUBE_CONTEXT_CLIENT_NAME': 14,
161 'REQUIRE_JS_PLAYER': False
162 },
163 # ios has HLS live streams
164 # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680
165 'ios': {
166 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
167 'INNERTUBE_CONTEXT': {
168 'client': {
169 'clientName': 'IOS',
170 'clientVersion': '16.20',
171 }
172 },
173 'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
174 'REQUIRE_JS_PLAYER': False
175 },
176 'ios_embedded': {
177 'INNERTUBE_API_KEY': 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8',
178 'INNERTUBE_CONTEXT': {
179 'client': {
180 'clientName': 'IOS_MESSAGES_EXTENSION',
181 'clientVersion': '16.20',
182 },
183 },
184 'INNERTUBE_CONTEXT_CLIENT_NAME': 66,
185 'REQUIRE_JS_PLAYER': False
186 },
187 'ios_music': {
188 'INNERTUBE_API_KEY': 'AIzaSyDK3iBpDP9nHVTk2qL73FLJICfOC3c51Og',
189 'INNERTUBE_HOST': 'music.youtube.com',
190 'INNERTUBE_CONTEXT': {
191 'client': {
192 'clientName': 'IOS_MUSIC',
193 'clientVersion': '4.32',
194 },
195 },
196 'INNERTUBE_CONTEXT_CLIENT_NAME': 26,
197 'REQUIRE_JS_PLAYER': False
198 },
199 'ios_creator': {
200 'INNERTUBE_CONTEXT': {
201 'client': {
202 'clientName': 'IOS_CREATOR',
203 'clientVersion': '21.24.100',
204 },
205 },
206 'INNERTUBE_CONTEXT_CLIENT_NAME': 15,
207 'REQUIRE_JS_PLAYER': False
208 },
209 # mweb has 'ultralow' formats
210 # See: https://github.com/yt-dlp/yt-dlp/pull/557
211 'mweb': {
212 'INNERTUBE_API_KEY': 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8',
213 'INNERTUBE_CONTEXT': {
214 'client': {
215 'clientName': 'MWEB',
216 'clientVersion': '2.20210721.07.00',
217 }
218 },
219 'INNERTUBE_CONTEXT_CLIENT_NAME': 2
220 },
221}
222
223
224def build_innertube_clients():
225 third_party = {
226 'embedUrl': 'https://google.com', # Can be any valid URL
227 }
228 base_clients = ('android', 'web', 'ios', 'mweb')
229 priority = qualities(base_clients[::-1])
230
231 for client, ytcfg in tuple(INNERTUBE_CLIENTS.items()):
232 ytcfg.setdefault('INNERTUBE_API_KEY', 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8')
233 ytcfg.setdefault('INNERTUBE_HOST', 'www.youtube.com')
234 ytcfg.setdefault('REQUIRE_JS_PLAYER', True)
235 ytcfg['INNERTUBE_CONTEXT']['client'].setdefault('hl', 'en')
236 ytcfg['priority'] = 10 * priority(client.split('_', 1)[0])
237
238 if client in base_clients:
239 INNERTUBE_CLIENTS[f'{client}_agegate'] = agegate_ytcfg = copy.deepcopy(ytcfg)
240 agegate_ytcfg['INNERTUBE_CONTEXT']['client']['clientScreen'] = 'EMBED'
241 agegate_ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = third_party
242 agegate_ytcfg['priority'] -= 1
243 elif client.endswith('_embedded'):
244 ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = third_party
245 ytcfg['priority'] -= 2
246 else:
247 ytcfg['priority'] -= 3
248
249
250build_innertube_clients()
251
252
253class YoutubeBaseInfoExtractor(InfoExtractor):
254 """Provide base functions for Youtube extractors"""
255
256 _RESERVED_NAMES = (
257 r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|'
258 r'shorts|movies|results|shared|hashtag|trending|feed|feeds|'
259 r'browse|oembed|get_video_info|iframe_api|s/player|'
260 r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
261
262 _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
263
264 _NETRC_MACHINE = 'youtube'
265
266 # If True it will raise an error if no login info is provided
267 _LOGIN_REQUIRED = False
268
269 _INVIDIOUS_SITES = (
270 # invidious-redirect websites
271 r'(?:www\.)?redirect\.invidious\.io',
272 r'(?:(?:www|dev)\.)?invidio\.us',
273 # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
274 r'(?:www\.)?invidious\.pussthecat\.org',
275 r'(?:www\.)?invidious\.zee\.li',
276 r'(?:www\.)?invidious\.ethibox\.fr',
277 r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
278 # youtube-dl invidious instances list
279 r'(?:(?:www|no)\.)?invidiou\.sh',
280 r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
281 r'(?:www\.)?invidious\.kabi\.tk',
282 r'(?:www\.)?invidious\.mastodon\.host',
283 r'(?:www\.)?invidious\.zapashcanon\.fr',
284 r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
285 r'(?:www\.)?invidious\.tinfoil-hat\.net',
286 r'(?:www\.)?invidious\.himiko\.cloud',
287 r'(?:www\.)?invidious\.reallyancient\.tech',
288 r'(?:www\.)?invidious\.tube',
289 r'(?:www\.)?invidiou\.site',
290 r'(?:www\.)?invidious\.site',
291 r'(?:www\.)?invidious\.xyz',
292 r'(?:www\.)?invidious\.nixnet\.xyz',
293 r'(?:www\.)?invidious\.048596\.xyz',
294 r'(?:www\.)?invidious\.drycat\.fr',
295 r'(?:www\.)?inv\.skyn3t\.in',
296 r'(?:www\.)?tube\.poal\.co',
297 r'(?:www\.)?tube\.connect\.cafe',
298 r'(?:www\.)?vid\.wxzm\.sx',
299 r'(?:www\.)?vid\.mint\.lgbt',
300 r'(?:www\.)?vid\.puffyan\.us',
301 r'(?:www\.)?yewtu\.be',
302 r'(?:www\.)?yt\.elukerio\.org',
303 r'(?:www\.)?yt\.lelux\.fi',
304 r'(?:www\.)?invidious\.ggc-project\.de',
305 r'(?:www\.)?yt\.maisputain\.ovh',
306 r'(?:www\.)?ytprivate\.com',
307 r'(?:www\.)?invidious\.13ad\.de',
308 r'(?:www\.)?invidious\.toot\.koeln',
309 r'(?:www\.)?invidious\.fdn\.fr',
310 r'(?:www\.)?watch\.nettohikari\.com',
311 r'(?:www\.)?invidious\.namazso\.eu',
312 r'(?:www\.)?invidious\.silkky\.cloud',
313 r'(?:www\.)?invidious\.exonip\.de',
314 r'(?:www\.)?invidious\.riverside\.rocks',
315 r'(?:www\.)?invidious\.blamefran\.net',
316 r'(?:www\.)?invidious\.moomoo\.de',
317 r'(?:www\.)?ytb\.trom\.tf',
318 r'(?:www\.)?yt\.cyberhost\.uk',
319 r'(?:www\.)?kgg2m7yk5aybusll\.onion',
320 r'(?:www\.)?qklhadlycap4cnod\.onion',
321 r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
322 r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
323 r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
324 r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
325 r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
326 r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
327 r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
328 r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
329 r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
330 r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
331 )
332
333 def _login(self):
334 """
335 Attempt to log in to YouTube.
336 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
337 """
338
339 if (self._LOGIN_REQUIRED
340 and self.get_param('cookiefile') is None
341 and self.get_param('cookiesfrombrowser') is None):
342 self.raise_login_required(
343 'Login details are needed to download this content', method='cookies')
344 username, password = self._get_login_info()
345 if username:
346 self.report_warning(f'Cannot login to YouTube using username and password. {self._LOGIN_HINTS["cookies"]}')
347
348 def _initialize_consent(self):
349 cookies = self._get_cookies('https://www.youtube.com/')
350 if cookies.get('__Secure-3PSID'):
351 return
352 consent_id = None
353 consent = cookies.get('CONSENT')
354 if consent:
355 if 'YES' in consent.value:
356 return
357 consent_id = self._search_regex(
358 r'PENDING\+(\d+)', consent.value, 'consent', default=None)
359 if not consent_id:
360 consent_id = random.randint(100, 999)
361 self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
362
363 def _real_initialize(self):
364 self._initialize_consent()
365 self._login()
366
367 _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
368 _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
369 _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
370
371 def _get_default_ytcfg(self, client='web'):
372 return copy.deepcopy(INNERTUBE_CLIENTS[client])
373
374 def _get_innertube_host(self, client='web'):
375 return INNERTUBE_CLIENTS[client]['INNERTUBE_HOST']
376
377 def _ytcfg_get_safe(self, ytcfg, getter, expected_type=None, default_client='web'):
378 # try_get but with fallback to default ytcfg client values when present
379 _func = lambda y: try_get(y, getter, expected_type)
380 return _func(ytcfg) or _func(self._get_default_ytcfg(default_client))
381
382 def _extract_client_name(self, ytcfg, default_client='web'):
383 return self._ytcfg_get_safe(
384 ytcfg, (lambda x: x['INNERTUBE_CLIENT_NAME'],
385 lambda x: x['INNERTUBE_CONTEXT']['client']['clientName']), compat_str, default_client)
386
387 def _extract_client_version(self, ytcfg, default_client='web'):
388 return self._ytcfg_get_safe(
389 ytcfg, (lambda x: x['INNERTUBE_CLIENT_VERSION'],
390 lambda x: x['INNERTUBE_CONTEXT']['client']['clientVersion']), compat_str, default_client)
391
392 def _extract_api_key(self, ytcfg=None, default_client='web'):
393 return self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_API_KEY'], compat_str, default_client)
394
395 def _extract_context(self, ytcfg=None, default_client='web'):
396 _get_context = lambda y: try_get(y, lambda x: x['INNERTUBE_CONTEXT'], dict)
397 context = _get_context(ytcfg)
398 if context:
399 return context
400
401 context = _get_context(self._get_default_ytcfg(default_client))
402 if not ytcfg:
403 return context
404
405 # Recreate the client context (required)
406 context['client'].update({
407 'clientVersion': self._extract_client_version(ytcfg, default_client),
408 'clientName': self._extract_client_name(ytcfg, default_client),
409 })
410 visitor_data = try_get(ytcfg, lambda x: x['VISITOR_DATA'], compat_str)
411 if visitor_data:
412 context['client']['visitorData'] = visitor_data
413 return context
414
415 _SAPISID = None
416
417 def _generate_sapisidhash_header(self, origin='https://www.youtube.com'):
418 time_now = round(time.time())
419 if self._SAPISID is None:
420 yt_cookies = self._get_cookies('https://www.youtube.com')
421 # Sometimes SAPISID cookie isn't present but __Secure-3PAPISID is.
422 # See: https://github.com/yt-dlp/yt-dlp/issues/393
423 sapisid_cookie = dict_get(
424 yt_cookies, ('__Secure-3PAPISID', 'SAPISID'))
425 if sapisid_cookie and sapisid_cookie.value:
426 self._SAPISID = sapisid_cookie.value
427 self.write_debug('Extracted SAPISID cookie')
428 # SAPISID cookie is required if not already present
429 if not yt_cookies.get('SAPISID'):
430 self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie')
431 self._set_cookie(
432 '.youtube.com', 'SAPISID', self._SAPISID, secure=True, expire_time=time_now + 3600)
433 else:
434 self._SAPISID = False
435 if not self._SAPISID:
436 return None
437 # SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323
438 sapisidhash = hashlib.sha1(
439 f'{time_now} {self._SAPISID} {origin}'.encode('utf-8')).hexdigest()
440 return f'SAPISIDHASH {time_now}_{sapisidhash}'
441
442 def _call_api(self, ep, query, video_id, fatal=True, headers=None,
443 note='Downloading API JSON', errnote='Unable to download API page',
444 context=None, api_key=None, api_hostname=None, default_client='web'):
445
446 data = {'context': context} if context else {'context': self._extract_context(default_client=default_client)}
447 data.update(query)
448 real_headers = self.generate_api_headers(default_client=default_client)
449 real_headers.update({'content-type': 'application/json'})
450 if headers:
451 real_headers.update(headers)
452 return self._download_json(
453 'https://%s/youtubei/v1/%s' % (api_hostname or self._get_innertube_host(default_client), ep),
454 video_id=video_id, fatal=fatal, note=note, errnote=errnote,
455 data=json.dumps(data).encode('utf8'), headers=real_headers,
456 query={'key': api_key or self._extract_api_key()})
457
458 def extract_yt_initial_data(self, item_id, webpage, fatal=True):
459 data = self._search_regex(
460 (r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
461 self._YT_INITIAL_DATA_RE), webpage, 'yt initial data', fatal=fatal)
462 if data:
463 return self._parse_json(data, item_id, fatal=fatal)
464
465 @staticmethod
466 def _extract_session_index(*data):
467 """
468 Index of current account in account list.
469 See: https://github.com/yt-dlp/yt-dlp/pull/519
470 """
471 for ytcfg in data:
472 session_index = int_or_none(try_get(ytcfg, lambda x: x['SESSION_INDEX']))
473 if session_index is not None:
474 return session_index
475
476 # Deprecated?
477 def _extract_identity_token(self, ytcfg=None, webpage=None):
478 if ytcfg:
479 token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
480 if token:
481 return token
482 if webpage:
483 return self._search_regex(
484 r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
485 'identity token', default=None, fatal=False)
486
487 @staticmethod
488 def _extract_account_syncid(*args):
489 """
490 Extract syncId required to download private playlists of secondary channels
491 @params response and/or ytcfg
492 """
493 for data in args:
494 # ytcfg includes channel_syncid if on secondary channel
495 delegated_sid = try_get(data, lambda x: x['DELEGATED_SESSION_ID'], compat_str)
496 if delegated_sid:
497 return delegated_sid
498 sync_ids = (try_get(
499 data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
500 lambda x: x['DATASYNC_ID']), compat_str) or '').split('||')
501 if len(sync_ids) >= 2 and sync_ids[1]:
502 # datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
503 # and just "user_syncid||" for primary channel. We only want the channel_syncid
504 return sync_ids[0]
505
506 @staticmethod
507 def _extract_visitor_data(*args):
508 """
509 Extracts visitorData from an API response or ytcfg
510 Appears to be used to track session state
511 """
512 return get_first(
513 args, (('VISITOR_DATA', ('INNERTUBE_CONTEXT', 'client', 'visitorData'), ('responseContext', 'visitorData'))),
514 expected_type=str)
515
516 @property
517 def is_authenticated(self):
518 return bool(self._generate_sapisidhash_header())
519
520 def extract_ytcfg(self, video_id, webpage):
521 if not webpage:
522 return {}
523 return self._parse_json(
524 self._search_regex(
525 r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
526 default='{}'), video_id, fatal=False) or {}
527
528 def generate_api_headers(
529 self, *, ytcfg=None, account_syncid=None, session_index=None,
530 visitor_data=None, identity_token=None, api_hostname=None, default_client='web'):
531
532 origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(default_client))
533 headers = {
534 'X-YouTube-Client-Name': compat_str(
535 self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)),
536 'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client),
537 'Origin': origin,
538 'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg),
539 'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg),
540 'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg)
541 }
542 if session_index is None:
543 session_index = self._extract_session_index(ytcfg)
544 if account_syncid or session_index is not None:
545 headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0
546
547 auth = self._generate_sapisidhash_header(origin)
548 if auth is not None:
549 headers['Authorization'] = auth
550 headers['X-Origin'] = origin
551 return {h: v for h, v in headers.items() if v is not None}
552
553 @staticmethod
554 def _build_api_continuation_query(continuation, ctp=None):
555 query = {
556 'continuation': continuation
557 }
558 # TODO: Inconsistency with clickTrackingParams.
559 # Currently we have a fixed ctp contained within context (from ytcfg)
560 # and a ctp in root query for continuation.
561 if ctp:
562 query['clickTracking'] = {'clickTrackingParams': ctp}
563 return query
564
565 @classmethod
566 def _extract_next_continuation_data(cls, renderer):
567 next_continuation = try_get(
568 renderer, (lambda x: x['continuations'][0]['nextContinuationData'],
569 lambda x: x['continuation']['reloadContinuationData']), dict)
570 if not next_continuation:
571 return
572 continuation = next_continuation.get('continuation')
573 if not continuation:
574 return
575 ctp = next_continuation.get('clickTrackingParams')
576 return cls._build_api_continuation_query(continuation, ctp)
577
578 @classmethod
579 def _extract_continuation_ep_data(cls, continuation_ep: dict):
580 if isinstance(continuation_ep, dict):
581 continuation = try_get(
582 continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
583 if not continuation:
584 return
585 ctp = continuation_ep.get('clickTrackingParams')
586 return cls._build_api_continuation_query(continuation, ctp)
587
588 @classmethod
589 def _extract_continuation(cls, renderer):
590 next_continuation = cls._extract_next_continuation_data(renderer)
591 if next_continuation:
592 return next_continuation
593
594 contents = []
595 for key in ('contents', 'items'):
596 contents.extend(try_get(renderer, lambda x: x[key], list) or [])
597
598 for content in contents:
599 if not isinstance(content, dict):
600 continue
601 continuation_ep = try_get(
602 content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'],
603 lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']),
604 dict)
605 continuation = cls._extract_continuation_ep_data(continuation_ep)
606 if continuation:
607 return continuation
608
609 @classmethod
610 def _extract_alerts(cls, data):
611 for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
612 if not isinstance(alert_dict, dict):
613 continue
614 for alert in alert_dict.values():
615 alert_type = alert.get('type')
616 if not alert_type:
617 continue
618 message = cls._get_text(alert, 'text')
619 if message:
620 yield alert_type, message
621
622 def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False):
623 errors = []
624 warnings = []
625 for alert_type, alert_message in alerts:
626 if alert_type.lower() == 'error' and fatal:
627 errors.append([alert_type, alert_message])
628 else:
629 warnings.append([alert_type, alert_message])
630
631 for alert_type, alert_message in (warnings + errors[:-1]):
632 self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message), only_once=only_once)
633 if errors:
634 raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
635
636 def _extract_and_report_alerts(self, data, *args, **kwargs):
637 return self._report_alerts(self._extract_alerts(data), *args, **kwargs)
638
639 def _extract_badges(self, renderer: dict):
640 badges = set()
641 for badge in try_get(renderer, lambda x: x['badges'], list) or []:
642 label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label'], compat_str)
643 if label:
644 badges.add(label.lower())
645 return badges
646
647 @staticmethod
648 def _get_text(data, *path_list, max_runs=None):
649 for path in path_list or [None]:
650 if path is None:
651 obj = [data]
652 else:
653 obj = traverse_obj(data, path, default=[])
654 if not any(key is ... or isinstance(key, (list, tuple)) for key in variadic(path)):
655 obj = [obj]
656 for item in obj:
657 text = try_get(item, lambda x: x['simpleText'], compat_str)
658 if text:
659 return text
660 runs = try_get(item, lambda x: x['runs'], list) or []
661 if not runs and isinstance(item, list):
662 runs = item
663
664 runs = runs[:min(len(runs), max_runs or len(runs))]
665 text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[]))
666 if text:
667 return text
668
669 def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None,
670 ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None,
671 default_client='web'):
672 response = None
673 last_error = None
674 count = -1
675 retries = self.get_param('extractor_retries', 3)
676 if check_get_keys is None:
677 check_get_keys = []
678 while count < retries:
679 count += 1
680 if last_error:
681 self.report_warning('%s. Retrying ...' % remove_end(last_error, '.'))
682 try:
683 response = self._call_api(
684 ep=ep, fatal=True, headers=headers,
685 video_id=item_id, query=query,
686 context=self._extract_context(ytcfg, default_client),
687 api_key=self._extract_api_key(ytcfg, default_client),
688 api_hostname=api_hostname, default_client=default_client,
689 note='%s%s' % (note, ' (retry #%d)' % count if count else ''))
690 except ExtractorError as e:
691 if isinstance(e.cause, network_exceptions):
692 if isinstance(e.cause, compat_HTTPError) and not is_html(e.cause.read(512)):
693 e.cause.seek(0)
694 yt_error = try_get(
695 self._parse_json(e.cause.read().decode(), item_id, fatal=False),
696 lambda x: x['error']['message'], compat_str)
697 if yt_error:
698 self._report_alerts([('ERROR', yt_error)], fatal=False)
699 # Downloading page may result in intermittent 5xx HTTP error
700 # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
701 # We also want to catch all other network exceptions since errors in later pages can be troublesome
702 # See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210
703 if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
704 last_error = error_to_compat_str(e.cause or e.msg)
705 if count < retries:
706 continue
707 if fatal:
708 raise
709 else:
710 self.report_warning(error_to_compat_str(e))
711 return
712
713 else:
714 try:
715 self._extract_and_report_alerts(response, only_once=True)
716 except ExtractorError as e:
717 # YouTube servers may return errors we want to retry on in a 200 OK response
718 # See: https://github.com/yt-dlp/yt-dlp/issues/839
719 if 'unknown error' in e.msg.lower():
720 last_error = e.msg
721 continue
722 if fatal:
723 raise
724 self.report_warning(error_to_compat_str(e))
725 return
726 if not check_get_keys or dict_get(response, check_get_keys):
727 break
728 # Youtube sometimes sends incomplete data
729 # See: https://github.com/ytdl-org/youtube-dl/issues/28194
730 last_error = 'Incomplete data received'
731 if count >= retries:
732 if fatal:
733 raise ExtractorError(last_error)
734 else:
735 self.report_warning(last_error)
736 return
737 return response
738
739 @staticmethod
740 def is_music_url(url):
741 return re.match(r'https?://music\.youtube\.com/', url) is not None
742
743 def _extract_video(self, renderer):
744 video_id = renderer.get('videoId')
745 title = self._get_text(renderer, 'title')
746 description = self._get_text(renderer, 'descriptionSnippet')
747 duration = parse_duration(self._get_text(
748 renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text')))
749 view_count_text = self._get_text(renderer, 'viewCountText') or ''
750 view_count = str_to_int(self._search_regex(
751 r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
752 'view count', default=None))
753
754 uploader = self._get_text(renderer, 'ownerText', 'shortBylineText')
755
756 return {
757 '_type': 'url',
758 'ie_key': YoutubeIE.ie_key(),
759 'id': video_id,
760 'url': f'https://www.youtube.com/watch?v={video_id}',
761 'title': title,
762 'description': description,
763 'duration': duration,
764 'view_count': view_count,
765 'uploader': uploader,
766 }
767
768
769class YoutubeIE(YoutubeBaseInfoExtractor):
770 IE_DESC = 'YouTube'
771 _VALID_URL = r"""(?x)^
772 (
773 (?:https?://|//) # http(s):// or protocol-independent URL
774 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
775 (?:www\.)?deturl\.com/www\.youtube\.com|
776 (?:www\.)?pwnyoutube\.com|
777 (?:www\.)?hooktube\.com|
778 (?:www\.)?yourepeat\.com|
779 tube\.majestyc\.net|
780 %(invidious)s|
781 youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
782 (?:.*?\#/)? # handle anchor (#/) redirect urls
783 (?: # the various things that can precede the ID:
784 (?:(?:v|embed|e|shorts)/(?!videoseries)) # v/ or embed/ or e/ or shorts/
785 |(?: # or the v= param in all its forms
786 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
787 (?:\?|\#!?) # the params delimiter ? or # or #!
788 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
789 v=
790 )
791 ))
792 |(?:
793 youtu\.be| # just youtu.be/xxxx
794 vid\.plus| # or vid.plus/xxxx
795 zwearz\.com/watch| # or zwearz.com/watch/xxxx
796 %(invidious)s
797 )/
798 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
799 )
800 )? # all until now is optional -> you can pass the naked ID
801 (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
802 (?(1).+)? # if we found the ID, everything can follow
803 (?:\#|$)""" % {
804 'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
805 }
806 _PLAYER_INFO_RE = (
807 r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
808 r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
809 r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
810 )
811 _formats = {
812 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
813 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
814 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
815 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
816 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
817 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
818 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
819 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
820 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
821 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
822 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
823 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
824 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
825 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
826 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
827 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
828 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
829 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
830
831
832 # 3D videos
833 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
834 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
835 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
836 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
837 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
838 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
839 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
840
841 # Apple HTTP Live Streaming
842 '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
843 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
844 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
845 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
846 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
847 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
848 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
849 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
850
851 # DASH mp4 video
852 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
853 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
854 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
855 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
856 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
857 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
858 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
859 '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
860 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
861 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
862 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
863 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
864
865 # Dash mp4 audio
866 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
867 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
868 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
869 '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
870 '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
871 '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
872 '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
873
874 # Dash webm
875 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
876 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
877 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
878 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
879 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
880 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
881 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
882 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
883 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
884 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
885 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
886 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
887 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
888 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
889 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
890 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
891 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
892 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
893 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
894 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
895 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
896 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
897
898 # Dash webm audio
899 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
900 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
901
902 # Dash webm audio with opus inside
903 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
904 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
905 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
906
907 # RTMP (unnamed)
908 '_rtmp': {'protocol': 'rtmp'},
909
910 # av01 video only formats sometimes served with "unknown" codecs
911 '394': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
912 '395': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
913 '396': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'av01.0.01M.08'},
914 '397': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'av01.0.04M.08'},
915 '398': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'av01.0.05M.08'},
916 '399': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'av01.0.08M.08'},
917 '400': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
918 '401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
919 }
920 _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
921
922 _GEO_BYPASS = False
923
924 IE_NAME = 'youtube'
925 _TESTS = [
926 {
927 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
928 'info_dict': {
929 'id': 'BaW_jenozKc',
930 'ext': 'mp4',
931 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
932 'uploader': 'Philipp Hagemeister',
933 'uploader_id': 'phihag',
934 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
935 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
936 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
937 'upload_date': '20121002',
938 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
939 'categories': ['Science & Technology'],
940 'tags': ['youtube-dl'],
941 'duration': 10,
942 'view_count': int,
943 'like_count': int,
944 'dislike_count': int,
945 'start_time': 1,
946 'end_time': 9,
947 }
948 },
949 {
950 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
951 'note': 'Embed-only video (#1746)',
952 'info_dict': {
953 'id': 'yZIXLfi8CZQ',
954 'ext': 'mp4',
955 'upload_date': '20120608',
956 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
957 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
958 'uploader': 'SET India',
959 'uploader_id': 'setindia',
960 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
961 'age_limit': 18,
962 },
963 'skip': 'Private video',
964 },
965 {
966 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
967 'note': 'Use the first video ID in the URL',
968 'info_dict': {
969 'id': 'BaW_jenozKc',
970 'ext': 'mp4',
971 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
972 'uploader': 'Philipp Hagemeister',
973 'uploader_id': 'phihag',
974 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
975 'upload_date': '20121002',
976 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
977 'categories': ['Science & Technology'],
978 'tags': ['youtube-dl'],
979 'duration': 10,
980 'view_count': int,
981 'like_count': int,
982 'dislike_count': int,
983 },
984 'params': {
985 'skip_download': True,
986 },
987 },
988 {
989 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
990 'note': '256k DASH audio (format 141) via DASH manifest',
991 'info_dict': {
992 'id': 'a9LDPn-MO4I',
993 'ext': 'm4a',
994 'upload_date': '20121002',
995 'uploader_id': '8KVIDEO',
996 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
997 'description': '',
998 'uploader': '8KVIDEO',
999 'title': 'UHDTV TEST 8K VIDEO.mp4'
1000 },
1001 'params': {
1002 'youtube_include_dash_manifest': True,
1003 'format': '141',
1004 },
1005 'skip': 'format 141 not served anymore',
1006 },
1007 # DASH manifest with encrypted signature
1008 {
1009 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
1010 'info_dict': {
1011 'id': 'IB3lcPjvWLA',
1012 'ext': 'm4a',
1013 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
1014 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
1015 'duration': 244,
1016 'uploader': 'AfrojackVEVO',
1017 'uploader_id': 'AfrojackVEVO',
1018 'upload_date': '20131011',
1019 'abr': 129.495,
1020 },
1021 'params': {
1022 'youtube_include_dash_manifest': True,
1023 'format': '141/bestaudio[ext=m4a]',
1024 },
1025 },
1026 # Age-gate videos. See https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-888837000
1027 {
1028 'note': 'Embed allowed age-gate video',
1029 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
1030 'info_dict': {
1031 'id': 'HtVdAasjOgU',
1032 'ext': 'mp4',
1033 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
1034 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
1035 'duration': 142,
1036 'uploader': 'The Witcher',
1037 'uploader_id': 'WitcherGame',
1038 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
1039 'upload_date': '20140605',
1040 'age_limit': 18,
1041 },
1042 },
1043 {
1044 'note': 'Age-gate video with embed allowed in public site',
1045 'url': 'https://youtube.com/watch?v=HsUATh_Nc2U',
1046 'info_dict': {
1047 'id': 'HsUATh_Nc2U',
1048 'ext': 'mp4',
1049 'title': 'Godzilla 2 (Official Video)',
1050 'description': 'md5:bf77e03fcae5529475e500129b05668a',
1051 'upload_date': '20200408',
1052 'uploader_id': 'FlyingKitty900',
1053 'uploader': 'FlyingKitty',
1054 'age_limit': 18,
1055 },
1056 },
1057 {
1058 'note': 'Age-gate video embedable only with clientScreen=EMBED',
1059 'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg',
1060 'info_dict': {
1061 'id': 'Tq92D6wQ1mg',
1062 'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
1063 'ext': 'mp4',
1064 'upload_date': '20191227',
1065 'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
1066 'uploader': 'Projekt Melody',
1067 'description': 'md5:17eccca93a786d51bc67646756894066',
1068 'age_limit': 18,
1069 },
1070 },
1071 {
1072 'note': 'Non-Agegated non-embeddable video',
1073 'url': 'https://youtube.com/watch?v=MeJVWBSsPAY',
1074 'info_dict': {
1075 'id': 'MeJVWBSsPAY',
1076 'ext': 'mp4',
1077 'title': 'OOMPH! - Such Mich Find Mich (Lyrics)',
1078 'uploader': 'Herr Lurik',
1079 'uploader_id': 'st3in234',
1080 'description': 'Fan Video. Music & Lyrics by OOMPH!.',
1081 'upload_date': '20130730',
1082 },
1083 },
1084 {
1085 'note': 'Non-bypassable age-gated video',
1086 'url': 'https://youtube.com/watch?v=Cr381pDsSsA',
1087 'only_matching': True,
1088 },
1089 # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
1090 # YouTube Red ad is not captured for creator
1091 {
1092 'url': '__2ABJjxzNo',
1093 'info_dict': {
1094 'id': '__2ABJjxzNo',
1095 'ext': 'mp4',
1096 'duration': 266,
1097 'upload_date': '20100430',
1098 'uploader_id': 'deadmau5',
1099 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
1100 'creator': 'deadmau5',
1101 'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
1102 'uploader': 'deadmau5',
1103 'title': 'Deadmau5 - Some Chords (HD)',
1104 'alt_title': 'Some Chords',
1105 },
1106 'expected_warnings': [
1107 'DASH manifest missing',
1108 ]
1109 },
1110 # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
1111 {
1112 'url': 'lqQg6PlCWgI',
1113 'info_dict': {
1114 'id': 'lqQg6PlCWgI',
1115 'ext': 'mp4',
1116 'duration': 6085,
1117 'upload_date': '20150827',
1118 'uploader_id': 'olympic',
1119 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
1120 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
1121 'uploader': 'Olympics',
1122 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
1123 },
1124 'params': {
1125 'skip_download': 'requires avconv',
1126 }
1127 },
1128 # Non-square pixels
1129 {
1130 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
1131 'info_dict': {
1132 'id': '_b-2C3KPAM0',
1133 'ext': 'mp4',
1134 'stretched_ratio': 16 / 9.,
1135 'duration': 85,
1136 'upload_date': '20110310',
1137 'uploader_id': 'AllenMeow',
1138 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
1139 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
1140 'uploader': '孫ᄋᄅ',
1141 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
1142 },
1143 },
1144 # url_encoded_fmt_stream_map is empty string
1145 {
1146 'url': 'qEJwOuvDf7I',
1147 'info_dict': {
1148 'id': 'qEJwOuvDf7I',
1149 'ext': 'webm',
1150 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
1151 'description': '',
1152 'upload_date': '20150404',
1153 'uploader_id': 'spbelect',
1154 'uploader': 'Наблюдатели Петербурга',
1155 },
1156 'params': {
1157 'skip_download': 'requires avconv',
1158 },
1159 'skip': 'This live event has ended.',
1160 },
1161 # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
1162 {
1163 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
1164 'info_dict': {
1165 'id': 'FIl7x6_3R5Y',
1166 'ext': 'webm',
1167 'title': 'md5:7b81415841e02ecd4313668cde88737a',
1168 'description': 'md5:116377fd2963b81ec4ce64b542173306',
1169 'duration': 220,
1170 'upload_date': '20150625',
1171 'uploader_id': 'dorappi2000',
1172 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
1173 'uploader': 'dorappi2000',
1174 'formats': 'mincount:31',
1175 },
1176 'skip': 'not actual anymore',
1177 },
1178 # DASH manifest with segment_list
1179 {
1180 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
1181 'md5': '8ce563a1d667b599d21064e982ab9e31',
1182 'info_dict': {
1183 'id': 'CsmdDsKjzN8',
1184 'ext': 'mp4',
1185 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
1186 'uploader': 'Airtek',
1187 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
1188 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
1189 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
1190 },
1191 'params': {
1192 'youtube_include_dash_manifest': True,
1193 'format': '135', # bestvideo
1194 },
1195 'skip': 'This live event has ended.',
1196 },
1197 {
1198 # Multifeed videos (multiple cameras), URL is for Main Camera
1199 'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
1200 'info_dict': {
1201 'id': 'jvGDaLqkpTg',
1202 'title': 'Tom Clancy Free Weekend Rainbow Whatever',
1203 'description': 'md5:e03b909557865076822aa169218d6a5d',
1204 },
1205 'playlist': [{
1206 'info_dict': {
1207 'id': 'jvGDaLqkpTg',
1208 'ext': 'mp4',
1209 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
1210 'description': 'md5:e03b909557865076822aa169218d6a5d',
1211 'duration': 10643,
1212 'upload_date': '20161111',
1213 'uploader': 'Team PGP',
1214 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1215 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1216 },
1217 }, {
1218 'info_dict': {
1219 'id': '3AKt1R1aDnw',
1220 'ext': 'mp4',
1221 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
1222 'description': 'md5:e03b909557865076822aa169218d6a5d',
1223 'duration': 10991,
1224 'upload_date': '20161111',
1225 'uploader': 'Team PGP',
1226 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1227 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1228 },
1229 }, {
1230 'info_dict': {
1231 'id': 'RtAMM00gpVc',
1232 'ext': 'mp4',
1233 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
1234 'description': 'md5:e03b909557865076822aa169218d6a5d',
1235 'duration': 10995,
1236 'upload_date': '20161111',
1237 'uploader': 'Team PGP',
1238 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1239 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1240 },
1241 }, {
1242 'info_dict': {
1243 'id': '6N2fdlP3C5U',
1244 'ext': 'mp4',
1245 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
1246 'description': 'md5:e03b909557865076822aa169218d6a5d',
1247 'duration': 10990,
1248 'upload_date': '20161111',
1249 'uploader': 'Team PGP',
1250 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1251 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1252 },
1253 }],
1254 'params': {
1255 'skip_download': True,
1256 },
1257 'skip': 'Not multifeed anymore',
1258 },
1259 {
1260 # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
1261 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
1262 'info_dict': {
1263 'id': 'gVfLd0zydlo',
1264 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
1265 },
1266 'playlist_count': 2,
1267 'skip': 'Not multifeed anymore',
1268 },
1269 {
1270 'url': 'https://vid.plus/FlRa-iH7PGw',
1271 'only_matching': True,
1272 },
1273 {
1274 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
1275 'only_matching': True,
1276 },
1277 {
1278 # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
1279 # Also tests cut-off URL expansion in video description (see
1280 # https://github.com/ytdl-org/youtube-dl/issues/1892,
1281 # https://github.com/ytdl-org/youtube-dl/issues/8164)
1282 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
1283 'info_dict': {
1284 'id': 'lsguqyKfVQg',
1285 'ext': 'mp4',
1286 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
1287 'alt_title': 'Dark Walk',
1288 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
1289 'duration': 133,
1290 'upload_date': '20151119',
1291 'uploader_id': 'IronSoulElf',
1292 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
1293 'uploader': 'IronSoulElf',
1294 'creator': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
1295 'track': 'Dark Walk',
1296 'artist': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
1297 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
1298 },
1299 'params': {
1300 'skip_download': True,
1301 },
1302 },
1303 {
1304 # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
1305 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
1306 'only_matching': True,
1307 },
1308 {
1309 # Video with yt:stretch=17:0
1310 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
1311 'info_dict': {
1312 'id': 'Q39EVAstoRM',
1313 'ext': 'mp4',
1314 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
1315 'description': 'md5:ee18a25c350637c8faff806845bddee9',
1316 'upload_date': '20151107',
1317 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
1318 'uploader': 'CH GAMER DROID',
1319 },
1320 'params': {
1321 'skip_download': True,
1322 },
1323 'skip': 'This video does not exist.',
1324 },
1325 {
1326 # Video with incomplete 'yt:stretch=16:'
1327 'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
1328 'only_matching': True,
1329 },
1330 {
1331 # Video licensed under Creative Commons
1332 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
1333 'info_dict': {
1334 'id': 'M4gD1WSo5mA',
1335 'ext': 'mp4',
1336 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
1337 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
1338 'duration': 721,
1339 'upload_date': '20150127',
1340 'uploader_id': 'BerkmanCenter',
1341 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
1342 'uploader': 'The Berkman Klein Center for Internet & Society',
1343 'license': 'Creative Commons Attribution license (reuse allowed)',
1344 },
1345 'params': {
1346 'skip_download': True,
1347 },
1348 },
1349 {
1350 # Channel-like uploader_url
1351 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
1352 'info_dict': {
1353 'id': 'eQcmzGIKrzg',
1354 'ext': 'mp4',
1355 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
1356 'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
1357 'duration': 4060,
1358 'upload_date': '20151119',
1359 'uploader': 'Bernie Sanders',
1360 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
1361 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
1362 'license': 'Creative Commons Attribution license (reuse allowed)',
1363 },
1364 'params': {
1365 'skip_download': True,
1366 },
1367 },
1368 {
1369 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
1370 'only_matching': True,
1371 },
1372 {
1373 # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
1374 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
1375 'only_matching': True,
1376 },
1377 {
1378 # Rental video preview
1379 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
1380 'info_dict': {
1381 'id': 'uGpuVWrhIzE',
1382 'ext': 'mp4',
1383 'title': 'Piku - Trailer',
1384 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
1385 'upload_date': '20150811',
1386 'uploader': 'FlixMatrix',
1387 'uploader_id': 'FlixMatrixKaravan',
1388 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
1389 'license': 'Standard YouTube License',
1390 },
1391 'params': {
1392 'skip_download': True,
1393 },
1394 'skip': 'This video is not available.',
1395 },
1396 {
1397 # YouTube Red video with episode data
1398 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
1399 'info_dict': {
1400 'id': 'iqKdEhx-dD4',
1401 'ext': 'mp4',
1402 'title': 'Isolation - Mind Field (Ep 1)',
1403 'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
1404 'duration': 2085,
1405 'upload_date': '20170118',
1406 'uploader': 'Vsauce',
1407 'uploader_id': 'Vsauce',
1408 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
1409 'series': 'Mind Field',
1410 'season_number': 1,
1411 'episode_number': 1,
1412 },
1413 'params': {
1414 'skip_download': True,
1415 },
1416 'expected_warnings': [
1417 'Skipping DASH manifest',
1418 ],
1419 },
1420 {
1421 # The following content has been identified by the YouTube community
1422 # as inappropriate or offensive to some audiences.
1423 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
1424 'info_dict': {
1425 'id': '6SJNVb0GnPI',
1426 'ext': 'mp4',
1427 'title': 'Race Differences in Intelligence',
1428 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
1429 'duration': 965,
1430 'upload_date': '20140124',
1431 'uploader': 'New Century Foundation',
1432 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
1433 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
1434 },
1435 'params': {
1436 'skip_download': True,
1437 },
1438 'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
1439 },
1440 {
1441 # itag 212
1442 'url': '1t24XAntNCY',
1443 'only_matching': True,
1444 },
1445 {
1446 # geo restricted to JP
1447 'url': 'sJL6WA-aGkQ',
1448 'only_matching': True,
1449 },
1450 {
1451 'url': 'https://invidio.us/watch?v=BaW_jenozKc',
1452 'only_matching': True,
1453 },
1454 {
1455 'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
1456 'only_matching': True,
1457 },
1458 {
1459 # from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
1460 'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
1461 'only_matching': True,
1462 },
1463 {
1464 # DRM protected
1465 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
1466 'only_matching': True,
1467 },
1468 {
1469 # Video with unsupported adaptive stream type formats
1470 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
1471 'info_dict': {
1472 'id': 'Z4Vy8R84T1U',
1473 'ext': 'mp4',
1474 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
1475 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
1476 'duration': 433,
1477 'upload_date': '20130923',
1478 'uploader': 'Amelia Putri Harwita',
1479 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
1480 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
1481 'formats': 'maxcount:10',
1482 },
1483 'params': {
1484 'skip_download': True,
1485 'youtube_include_dash_manifest': False,
1486 },
1487 'skip': 'not actual anymore',
1488 },
1489 {
1490 # Youtube Music Auto-generated description
1491 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
1492 'info_dict': {
1493 'id': 'MgNrAu2pzNs',
1494 'ext': 'mp4',
1495 'title': 'Voyeur Girl',
1496 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
1497 'upload_date': '20190312',
1498 'uploader': 'Stephen - Topic',
1499 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
1500 'artist': 'Stephen',
1501 'track': 'Voyeur Girl',
1502 'album': 'it\'s too much love to know my dear',
1503 'release_date': '20190313',
1504 'release_year': 2019,
1505 },
1506 'params': {
1507 'skip_download': True,
1508 },
1509 },
1510 {
1511 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
1512 'only_matching': True,
1513 },
1514 {
1515 # invalid -> valid video id redirection
1516 'url': 'DJztXj2GPfl',
1517 'info_dict': {
1518 'id': 'DJztXj2GPfk',
1519 'ext': 'mp4',
1520 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
1521 'description': 'md5:bf577a41da97918e94fa9798d9228825',
1522 'upload_date': '20090125',
1523 'uploader': 'Prochorowka',
1524 'uploader_id': 'Prochorowka',
1525 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
1526 'artist': 'Panjabi MC',
1527 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
1528 'album': 'Beware of the Boys (Mundian To Bach Ke)',
1529 },
1530 'params': {
1531 'skip_download': True,
1532 },
1533 'skip': 'Video unavailable',
1534 },
1535 {
1536 # empty description results in an empty string
1537 'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
1538 'info_dict': {
1539 'id': 'x41yOUIvK2k',
1540 'ext': 'mp4',
1541 'title': 'IMG 3456',
1542 'description': '',
1543 'upload_date': '20170613',
1544 'uploader_id': 'ElevageOrVert',
1545 'uploader': 'ElevageOrVert',
1546 },
1547 'params': {
1548 'skip_download': True,
1549 },
1550 },
1551 {
1552 # with '};' inside yt initial data (see [1])
1553 # see [2] for an example with '};' inside ytInitialPlayerResponse
1554 # 1. https://github.com/ytdl-org/youtube-dl/issues/27093
1555 # 2. https://github.com/ytdl-org/youtube-dl/issues/27216
1556 'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
1557 'info_dict': {
1558 'id': 'CHqg6qOn4no',
1559 'ext': 'mp4',
1560 'title': 'Part 77 Sort a list of simple types in c#',
1561 'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
1562 'upload_date': '20130831',
1563 'uploader_id': 'kudvenkat',
1564 'uploader': 'kudvenkat',
1565 },
1566 'params': {
1567 'skip_download': True,
1568 },
1569 },
1570 {
1571 # another example of '};' in ytInitialData
1572 'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
1573 'only_matching': True,
1574 },
1575 {
1576 'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
1577 'only_matching': True,
1578 },
1579 {
1580 # https://github.com/ytdl-org/youtube-dl/pull/28094
1581 'url': 'OtqTfy26tG0',
1582 'info_dict': {
1583 'id': 'OtqTfy26tG0',
1584 'ext': 'mp4',
1585 'title': 'Burn Out',
1586 'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
1587 'upload_date': '20141120',
1588 'uploader': 'The Cinematic Orchestra - Topic',
1589 'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
1590 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
1591 'artist': 'The Cinematic Orchestra',
1592 'track': 'Burn Out',
1593 'album': 'Every Day',
1594 'release_data': None,
1595 'release_year': None,
1596 },
1597 'params': {
1598 'skip_download': True,
1599 },
1600 },
1601 {
1602 # controversial video, only works with bpctr when authenticated with cookies
1603 'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
1604 'only_matching': True,
1605 },
1606 {
1607 # controversial video, requires bpctr/contentCheckOk
1608 'url': 'https://www.youtube.com/watch?v=SZJvDhaSDnc',
1609 'info_dict': {
1610 'id': 'SZJvDhaSDnc',
1611 'ext': 'mp4',
1612 'title': 'San Diego teen commits suicide after bullying over embarrassing video',
1613 'channel_id': 'UC-SJ6nODDmufqBzPBwCvYvQ',
1614 'uploader': 'CBS This Morning',
1615 'uploader_id': 'CBSThisMorning',
1616 'upload_date': '20140716',
1617 'description': 'md5:acde3a73d3f133fc97e837a9f76b53b7'
1618 }
1619 },
1620 {
1621 # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
1622 'url': 'cBvYw8_A0vQ',
1623 'info_dict': {
1624 'id': 'cBvYw8_A0vQ',
1625 'ext': 'mp4',
1626 'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
1627 'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
1628 'upload_date': '20201120',
1629 'uploader': 'Walk around Japan',
1630 'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
1631 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
1632 },
1633 'params': {
1634 'skip_download': True,
1635 },
1636 }, {
1637 # Has multiple audio streams
1638 'url': 'WaOKSUlf4TM',
1639 'only_matching': True
1640 }, {
1641 # Requires Premium: has format 141 when requested using YTM url
1642 'url': 'https://music.youtube.com/watch?v=XclachpHxis',
1643 'only_matching': True
1644 }, {
1645 # multiple subtitles with same lang_code
1646 'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug',
1647 'only_matching': True,
1648 }, {
1649 # Force use android client fallback
1650 'url': 'https://www.youtube.com/watch?v=YOelRv7fMxY',
1651 'info_dict': {
1652 'id': 'YOelRv7fMxY',
1653 'title': 'DIGGING A SECRET TUNNEL Part 1',
1654 'ext': '3gp',
1655 'upload_date': '20210624',
1656 'channel_id': 'UCp68_FLety0O-n9QU6phsgw',
1657 'uploader': 'colinfurze',
1658 'uploader_id': 'colinfurze',
1659 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCp68_FLety0O-n9QU6phsgw',
1660 'description': 'md5:b5096f56af7ccd7a555c84db81738b22'
1661 },
1662 'params': {
1663 'format': '17', # 3gp format available on android
1664 'extractor_args': {'youtube': {'player_client': ['android']}},
1665 },
1666 },
1667 {
1668 # Skip download of additional client configs (remix client config in this case)
1669 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
1670 'only_matching': True,
1671 'params': {
1672 'extractor_args': {'youtube': {'player_skip': ['configs']}},
1673 },
1674 }, {
1675 # shorts
1676 'url': 'https://www.youtube.com/shorts/BGQWPY4IigY',
1677 'only_matching': True,
1678 }, {
1679 'note': 'Storyboards',
1680 'url': 'https://www.youtube.com/watch?v=5KLPxDtMqe8',
1681 'info_dict': {
1682 'id': '5KLPxDtMqe8',
1683 'ext': 'mhtml',
1684 'format_id': 'sb0',
1685 'title': 'Your Brain is Plastic',
1686 'uploader_id': 'scishow',
1687 'description': 'md5:89cd86034bdb5466cd87c6ba206cd2bc',
1688 'upload_date': '20140324',
1689 'uploader': 'SciShow',
1690 }, 'params': {'format': 'mhtml', 'skip_download': True}
1691 }
1692 ]
1693
1694 @classmethod
1695 def suitable(cls, url):
1696 from ..utils import parse_qs
1697
1698 qs = parse_qs(url)
1699 if qs.get('list', [None])[0]:
1700 return False
1701 return super(YoutubeIE, cls).suitable(url)
1702
1703 def __init__(self, *args, **kwargs):
1704 super(YoutubeIE, self).__init__(*args, **kwargs)
1705 self._code_cache = {}
1706 self._player_cache = {}
1707
1708 def _extract_player_url(self, *ytcfgs, webpage=None):
1709 player_url = traverse_obj(
1710 ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'),
1711 get_all=False, expected_type=compat_str)
1712 if not player_url:
1713 return
1714 if player_url.startswith('//'):
1715 player_url = 'https:' + player_url
1716 elif not re.match(r'https?://', player_url):
1717 player_url = compat_urlparse.urljoin(
1718 'https://www.youtube.com', player_url)
1719 return player_url
1720
1721 def _download_player_url(self, video_id, fatal=False):
1722 res = self._download_webpage(
1723 'https://www.youtube.com/iframe_api',
1724 note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
1725 if res:
1726 player_version = self._search_regex(
1727 r'player\\?/([0-9a-fA-F]{8})\\?/', res, 'player version', fatal=fatal)
1728 if player_version:
1729 return f'https://www.youtube.com/s/player/{player_version}/player_ias.vflset/en_US/base.js'
1730
1731 def _signature_cache_id(self, example_sig):
1732 """ Return a string representation of a signature """
1733 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
1734
1735 @classmethod
1736 def _extract_player_info(cls, player_url):
1737 for player_re in cls._PLAYER_INFO_RE:
1738 id_m = re.search(player_re, player_url)
1739 if id_m:
1740 break
1741 else:
1742 raise ExtractorError('Cannot identify player %r' % player_url)
1743 return id_m.group('id')
1744
1745 def _load_player(self, video_id, player_url, fatal=True):
1746 player_id = self._extract_player_info(player_url)
1747 if player_id not in self._code_cache:
1748 code = self._download_webpage(
1749 player_url, video_id, fatal=fatal,
1750 note='Downloading player ' + player_id,
1751 errnote='Download of %s failed' % player_url)
1752 if code:
1753 self._code_cache[player_id] = code
1754 return self._code_cache.get(player_id)
1755
1756 def _extract_signature_function(self, video_id, player_url, example_sig):
1757 player_id = self._extract_player_info(player_url)
1758
1759 # Read from filesystem cache
1760 func_id = 'js_%s_%s' % (
1761 player_id, self._signature_cache_id(example_sig))
1762 assert os.path.basename(func_id) == func_id
1763
1764 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
1765 if cache_spec is not None:
1766 return lambda s: ''.join(s[i] for i in cache_spec)
1767
1768 code = self._load_player(video_id, player_url)
1769 if code:
1770 res = self._parse_sig_js(code)
1771
1772 test_string = ''.join(map(compat_chr, range(len(example_sig))))
1773 cache_res = res(test_string)
1774 cache_spec = [ord(c) for c in cache_res]
1775
1776 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
1777 return res
1778
1779 def _print_sig_code(self, func, example_sig):
1780 if not self.get_param('youtube_print_sig_code'):
1781 return
1782
1783 def gen_sig_code(idxs):
1784 def _genslice(start, end, step):
1785 starts = '' if start == 0 else str(start)
1786 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
1787 steps = '' if step == 1 else (':%d' % step)
1788 return 's[%s%s%s]' % (starts, ends, steps)
1789
1790 step = None
1791 # Quelch pyflakes warnings - start will be set when step is set
1792 start = '(Never used)'
1793 for i, prev in zip(idxs[1:], idxs[:-1]):
1794 if step is not None:
1795 if i - prev == step:
1796 continue
1797 yield _genslice(start, prev, step)
1798 step = None
1799 continue
1800 if i - prev in [-1, 1]:
1801 step = i - prev
1802 start = prev
1803 continue
1804 else:
1805 yield 's[%d]' % prev
1806 if step is None:
1807 yield 's[%d]' % i
1808 else:
1809 yield _genslice(start, i, step)
1810
1811 test_string = ''.join(map(compat_chr, range(len(example_sig))))
1812 cache_res = func(test_string)
1813 cache_spec = [ord(c) for c in cache_res]
1814 expr_code = ' + '.join(gen_sig_code(cache_spec))
1815 signature_id_tuple = '(%s)' % (
1816 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
1817 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
1818 ' return %s\n') % (signature_id_tuple, expr_code)
1819 self.to_screen('Extracted signature function:\n' + code)
1820
1821 def _parse_sig_js(self, jscode):
1822 funcname = self._search_regex(
1823 (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1824 r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1825 r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
1826 r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
1827 r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
1828 r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
1829 r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
1830 # Obsolete patterns
1831 r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1832 r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
1833 r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1834 r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1835 r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1836 r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1837 r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1838 r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
1839 jscode, 'Initial JS player signature function name', group='sig')
1840
1841 jsi = JSInterpreter(jscode)
1842 initial_function = jsi.extract_function(funcname)
1843 return lambda s: initial_function([s])
1844
1845 def _decrypt_signature(self, s, video_id, player_url):
1846 """Turn the encrypted s field into a working signature"""
1847
1848 if player_url is None:
1849 raise ExtractorError('Cannot decrypt signature without player_url')
1850
1851 try:
1852 player_id = (player_url, self._signature_cache_id(s))
1853 if player_id not in self._player_cache:
1854 func = self._extract_signature_function(
1855 video_id, player_url, s
1856 )
1857 self._player_cache[player_id] = func
1858 func = self._player_cache[player_id]
1859 self._print_sig_code(func, s)
1860 return func(s)
1861 except Exception as e:
1862 raise ExtractorError('Signature extraction failed: ' + traceback.format_exc(), cause=e)
1863
1864 def _decrypt_nsig(self, s, video_id, player_url):
1865 """Turn the encrypted n field into a working signature"""
1866 if player_url is None:
1867 raise ExtractorError('Cannot decrypt nsig without player_url')
1868 if player_url.startswith('//'):
1869 player_url = 'https:' + player_url
1870 elif not re.match(r'https?://', player_url):
1871 player_url = compat_urlparse.urljoin(
1872 'https://www.youtube.com', player_url)
1873
1874 sig_id = ('nsig_value', s)
1875 if sig_id in self._player_cache:
1876 return self._player_cache[sig_id]
1877
1878 try:
1879 player_id = ('nsig', player_url)
1880 if player_id not in self._player_cache:
1881 self._player_cache[player_id] = self._extract_n_function(video_id, player_url)
1882 func = self._player_cache[player_id]
1883 self._player_cache[sig_id] = func(s)
1884 self.write_debug(f'Decrypted nsig {s} => {self._player_cache[sig_id]}')
1885 return self._player_cache[sig_id]
1886 except Exception as e:
1887 raise ExtractorError(traceback.format_exc(), cause=e, video_id=video_id)
1888
1889 def _extract_n_function_name(self, jscode):
1890 return self._search_regex(
1891 (r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z0-9$]{3})\([a-zA-Z0-9]\)',),
1892 jscode, 'Initial JS player n function name', group='nfunc')
1893
1894 def _extract_n_function(self, video_id, player_url):
1895 player_id = self._extract_player_info(player_url)
1896 func_code = self._downloader.cache.load('youtube-nsig', player_id)
1897
1898 if func_code:
1899 jsi = JSInterpreter(func_code)
1900 else:
1901 jscode = self._load_player(video_id, player_url)
1902 funcname = self._extract_n_function_name(jscode)
1903 jsi = JSInterpreter(jscode)
1904 func_code = jsi.extract_function_code(funcname)
1905 self._downloader.cache.store('youtube-nsig', player_id, func_code)
1906
1907 if self.get_param('youtube_print_sig_code'):
1908 self.to_screen(f'Extracted nsig function from {player_id}:\n{func_code[1]}\n')
1909
1910 return lambda s: jsi.extract_function_from_code(*func_code)([s])
1911
1912 def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
1913 """
1914 Extract signatureTimestamp (sts)
1915 Required to tell API what sig/player version is in use.
1916 """
1917 sts = None
1918 if isinstance(ytcfg, dict):
1919 sts = int_or_none(ytcfg.get('STS'))
1920
1921 if not sts:
1922 # Attempt to extract from player
1923 if player_url is None:
1924 error_msg = 'Cannot extract signature timestamp without player_url.'
1925 if fatal:
1926 raise ExtractorError(error_msg)
1927 self.report_warning(error_msg)
1928 return
1929 code = self._load_player(video_id, player_url, fatal=fatal)
1930 if code:
1931 sts = int_or_none(self._search_regex(
1932 r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code,
1933 'JS player signature timestamp', group='sts', fatal=fatal))
1934 return sts
1935
1936 def _mark_watched(self, video_id, player_responses):
1937 playback_url = get_first(
1938 player_responses, ('playbackTracking', 'videostatsPlaybackUrl', 'baseUrl'),
1939 expected_type=url_or_none)
1940 if not playback_url:
1941 self.report_warning('Unable to mark watched')
1942 return
1943 parsed_playback_url = compat_urlparse.urlparse(playback_url)
1944 qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1945
1946 # cpn generation algorithm is reverse engineered from base.js.
1947 # In fact it works even with dummy cpn.
1948 CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1949 cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1950
1951 qs.update({
1952 'ver': ['2'],
1953 'cpn': [cpn],
1954 })
1955 playback_url = compat_urlparse.urlunparse(
1956 parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1957
1958 self._download_webpage(
1959 playback_url, video_id, 'Marking watched',
1960 'Unable to mark watched', fatal=False)
1961
1962 @staticmethod
1963 def _extract_urls(webpage):
1964 # Embedded YouTube player
1965 entries = [
1966 unescapeHTML(mobj.group('url'))
1967 for mobj in re.finditer(r'''(?x)
1968 (?:
1969 <iframe[^>]+?src=|
1970 data-video-url=|
1971 <embed[^>]+?src=|
1972 embedSWF\(?:\s*|
1973 <object[^>]+data=|
1974 new\s+SWFObject\(
1975 )
1976 (["\'])
1977 (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
1978 (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
1979 \1''', webpage)]
1980
1981 # lazyYT YouTube embed
1982 entries.extend(list(map(
1983 unescapeHTML,
1984 re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
1985
1986 # Wordpress "YouTube Video Importer" plugin
1987 matches = re.findall(r'''(?x)<div[^>]+
1988 class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
1989 data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
1990 entries.extend(m[-1] for m in matches)
1991
1992 return entries
1993
1994 @staticmethod
1995 def _extract_url(webpage):
1996 urls = YoutubeIE._extract_urls(webpage)
1997 return urls[0] if urls else None
1998
1999 @classmethod
2000 def extract_id(cls, url):
2001 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
2002 if mobj is None:
2003 raise ExtractorError('Invalid URL: %s' % url)
2004 return mobj.group('id')
2005
2006 def _extract_chapters_from_json(self, data, duration):
2007 chapter_list = traverse_obj(
2008 data, (
2009 'playerOverlays', 'playerOverlayRenderer', 'decoratedPlayerBarRenderer',
2010 'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters'
2011 ), expected_type=list)
2012
2013 return self._extract_chapters(
2014 chapter_list,
2015 chapter_time=lambda chapter: float_or_none(
2016 traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000),
2017 chapter_title=lambda chapter: traverse_obj(
2018 chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str),
2019 duration=duration)
2020
2021 def _extract_chapters_from_engagement_panel(self, data, duration):
2022 content_list = traverse_obj(
2023 data,
2024 ('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'),
2025 expected_type=list, default=[])
2026 chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription'))
2027 chapter_title = lambda chapter: self._get_text(chapter, 'title')
2028
2029 return next((
2030 filter(None, (
2031 self._extract_chapters(
2032 traverse_obj(contents, (..., 'macroMarkersListItemRenderer')),
2033 chapter_time, chapter_title, duration)
2034 for contents in content_list
2035 ))), [])
2036
2037 def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration):
2038 chapters = []
2039 last_chapter = {'start_time': 0}
2040 for idx, chapter in enumerate(chapter_list or []):
2041 title = chapter_title(chapter)
2042 start_time = chapter_time(chapter)
2043 if start_time is None:
2044 continue
2045 last_chapter['end_time'] = start_time
2046 if start_time < last_chapter['start_time']:
2047 if idx == 1:
2048 chapters.pop()
2049 self.report_warning('Invalid start time for chapter "%s"' % last_chapter['title'])
2050 else:
2051 self.report_warning(f'Invalid start time for chapter "{title}"')
2052 continue
2053 last_chapter = {'start_time': start_time, 'title': title}
2054 chapters.append(last_chapter)
2055 last_chapter['end_time'] = duration
2056 return chapters
2057
2058 def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
2059 return self._parse_json(self._search_regex(
2060 (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
2061 regex), webpage, name, default='{}'), video_id, fatal=False)
2062
2063 @staticmethod
2064 def parse_time_text(time_text):
2065 """
2066 Parse the comment time text
2067 time_text is in the format 'X units ago (edited)'
2068 """
2069 time_text_split = time_text.split(' ')
2070 if len(time_text_split) >= 3:
2071 try:
2072 return datetime_from_str('now-%s%s' % (time_text_split[0], time_text_split[1]), precision='auto')
2073 except ValueError:
2074 return None
2075
2076 def _extract_comment(self, comment_renderer, parent=None):
2077 comment_id = comment_renderer.get('commentId')
2078 if not comment_id:
2079 return
2080
2081 text = self._get_text(comment_renderer, 'contentText')
2082
2083 # note: timestamp is an estimate calculated from the current time and time_text
2084 time_text = self._get_text(comment_renderer, 'publishedTimeText') or ''
2085 time_text_dt = self.parse_time_text(time_text)
2086 if isinstance(time_text_dt, datetime.datetime):
2087 timestamp = calendar.timegm(time_text_dt.timetuple())
2088 author = self._get_text(comment_renderer, 'authorText')
2089 author_id = try_get(comment_renderer,
2090 lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
2091
2092 votes = parse_count(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'],
2093 lambda x: x['likeCount']), compat_str)) or 0
2094 author_thumbnail = try_get(comment_renderer,
2095 lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str)
2096
2097 author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
2098 is_favorited = 'creatorHeart' in (try_get(
2099 comment_renderer, lambda x: x['actionButtons']['commentActionButtonsRenderer'], dict) or {})
2100 return {
2101 'id': comment_id,
2102 'text': text,
2103 'timestamp': timestamp,
2104 'time_text': time_text,
2105 'like_count': votes,
2106 'is_favorited': is_favorited,
2107 'author': author,
2108 'author_id': author_id,
2109 'author_thumbnail': author_thumbnail,
2110 'author_is_uploader': author_is_uploader,
2111 'parent': parent or 'root'
2112 }
2113
2114 def _comment_entries(self, root_continuation_data, ytcfg, video_id, parent=None, comment_counts=None):
2115
2116 def extract_header(contents):
2117 _continuation = None
2118 for content in contents:
2119 comments_header_renderer = try_get(content, lambda x: x['commentsHeaderRenderer'])
2120 expected_comment_count = parse_count(self._get_text(
2121 comments_header_renderer, 'countText', 'commentsCount', max_runs=1))
2122
2123 if expected_comment_count:
2124 comment_counts[1] = expected_comment_count
2125 self.to_screen('Downloading ~%d comments' % expected_comment_count)
2126 sort_mode_str = self._configuration_arg('comment_sort', [''])[0]
2127 comment_sort_index = int(sort_mode_str != 'top') # 1 = new, 0 = top
2128
2129 sort_menu_item = try_get(
2130 comments_header_renderer,
2131 lambda x: x['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'][comment_sort_index], dict) or {}
2132 sort_continuation_ep = sort_menu_item.get('serviceEndpoint') or {}
2133
2134 _continuation = self._extract_continuation_ep_data(sort_continuation_ep) or self._extract_continuation(sort_menu_item)
2135 if not _continuation:
2136 continue
2137
2138 sort_text = sort_menu_item.get('title')
2139 if isinstance(sort_text, compat_str):
2140 sort_text = sort_text.lower()
2141 else:
2142 sort_text = 'top comments' if comment_sort_index == 0 else 'newest first'
2143 self.to_screen('Sorting comments by %s' % sort_text)
2144 break
2145 return _continuation
2146
2147 def extract_thread(contents):
2148 if not parent:
2149 comment_counts[2] = 0
2150 for content in contents:
2151 comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer'])
2152 comment_renderer = try_get(
2153 comment_thread_renderer, (lambda x: x['comment']['commentRenderer'], dict)) or try_get(
2154 content, (lambda x: x['commentRenderer'], dict))
2155
2156 if not comment_renderer:
2157 continue
2158 comment = self._extract_comment(comment_renderer, parent)
2159 if not comment:
2160 continue
2161 comment_counts[0] += 1
2162 yield comment
2163 # Attempt to get the replies
2164 comment_replies_renderer = try_get(
2165 comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict)
2166
2167 if comment_replies_renderer:
2168 comment_counts[2] += 1
2169 comment_entries_iter = self._comment_entries(
2170 comment_replies_renderer, ytcfg, video_id,
2171 parent=comment.get('id'), comment_counts=comment_counts)
2172
2173 for reply_comment in comment_entries_iter:
2174 yield reply_comment
2175
2176 # YouTube comments have a max depth of 2
2177 max_depth = int_or_none(self._configuration_arg('max_comment_depth', [''])[0]) or float('inf')
2178 if max_depth == 1 and parent:
2179 return
2180 if not comment_counts:
2181 # comment so far, est. total comments, current comment thread #
2182 comment_counts = [0, 0, 0]
2183
2184 continuation = self._extract_continuation(root_continuation_data)
2185 if continuation and len(continuation['continuation']) < 27:
2186 self.write_debug('Detected old API continuation token. Generating new API compatible token.')
2187 continuation_token = self._generate_comment_continuation(video_id)
2188 continuation = self._build_api_continuation_query(continuation_token, None)
2189
2190 message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1)
2191 if message and not parent:
2192 self.report_warning(message, video_id=video_id)
2193
2194 visitor_data = None
2195 is_first_continuation = parent is None
2196
2197 for page_num in itertools.count(0):
2198 if not continuation:
2199 break
2200 headers = self.generate_api_headers(ytcfg=ytcfg, visitor_data=visitor_data)
2201 comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1])
2202 if page_num == 0:
2203 if is_first_continuation:
2204 note_prefix = 'Downloading comment section API JSON'
2205 else:
2206 note_prefix = ' Downloading comment API JSON reply thread %d %s' % (
2207 comment_counts[2], comment_prog_str)
2208 else:
2209 note_prefix = '%sDownloading comment%s API JSON page %d %s' % (
2210 ' ' if parent else '', ' replies' if parent else '',
2211 page_num, comment_prog_str)
2212
2213 response = self._extract_response(
2214 item_id=None, query=continuation,
2215 ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix,
2216 check_get_keys=('onResponseReceivedEndpoints', 'continuationContents'))
2217 if not response:
2218 break
2219 visitor_data = try_get(
2220 response,
2221 lambda x: x['responseContext']['webResponseContextExtensionData']['ytConfigData']['visitorData'],
2222 compat_str) or visitor_data
2223
2224 continuation_contents = dict_get(response, ('onResponseReceivedEndpoints', 'continuationContents'))
2225
2226 continuation = None
2227 if isinstance(continuation_contents, list):
2228 for continuation_section in continuation_contents:
2229 if not isinstance(continuation_section, dict):
2230 continue
2231 continuation_items = try_get(
2232 continuation_section,
2233 (lambda x: x['reloadContinuationItemsCommand']['continuationItems'],
2234 lambda x: x['appendContinuationItemsAction']['continuationItems']),
2235 list) or []
2236 if is_first_continuation:
2237 continuation = extract_header(continuation_items)
2238 is_first_continuation = False
2239 if continuation:
2240 break
2241 continue
2242 count = 0
2243 for count, entry in enumerate(extract_thread(continuation_items)):
2244 yield entry
2245 continuation = self._extract_continuation({'contents': continuation_items})
2246 if continuation:
2247 # Sometimes YouTube provides a continuation without any comments
2248 # In most cases we end up just downloading these with very little comments to come.
2249 if count == 0:
2250 if not parent:
2251 self.report_warning('No comments received - assuming end of comments')
2252 continuation = None
2253 break
2254
2255 # Deprecated response structure
2256 elif isinstance(continuation_contents, dict):
2257 known_continuation_renderers = ('itemSectionContinuation', 'commentRepliesContinuation')
2258 for key, continuation_renderer in continuation_contents.items():
2259 if key not in known_continuation_renderers:
2260 continue
2261 if not isinstance(continuation_renderer, dict):
2262 continue
2263 if is_first_continuation:
2264 header_continuation_items = [continuation_renderer.get('header') or {}]
2265 continuation = extract_header(header_continuation_items)
2266 is_first_continuation = False
2267 if continuation:
2268 break
2269
2270 # Sometimes YouTube provides a continuation without any comments
2271 # In most cases we end up just downloading these with very little comments to come.
2272 count = 0
2273 for count, entry in enumerate(extract_thread(continuation_renderer.get('contents') or {})):
2274 yield entry
2275 continuation = self._extract_continuation(continuation_renderer)
2276 if count == 0:
2277 if not parent:
2278 self.report_warning('No comments received - assuming end of comments')
2279 continuation = None
2280 break
2281
2282 @staticmethod
2283 def _generate_comment_continuation(video_id):
2284 """
2285 Generates initial comment section continuation token from given video id
2286 """
2287 b64_vid_id = base64.b64encode(bytes(video_id.encode('utf-8')))
2288 parts = ('Eg0SCw==', b64_vid_id, 'GAYyJyIRIgs=', b64_vid_id, 'MAB4AjAAQhBjb21tZW50cy1zZWN0aW9u')
2289 new_continuation_intlist = list(itertools.chain.from_iterable(
2290 [bytes_to_intlist(base64.b64decode(part)) for part in parts]))
2291 return base64.b64encode(intlist_to_bytes(new_continuation_intlist)).decode('utf-8')
2292
2293 def _get_comments(self, ytcfg, video_id, contents, webpage):
2294 """Entry for comment extraction"""
2295 def _real_comment_extract(contents):
2296 renderer = next((
2297 item for item in traverse_obj(contents, (..., 'itemSectionRenderer'), default={})
2298 if item.get('sectionIdentifier') == 'comment-item-section'), None)
2299 yield from self._comment_entries(renderer, ytcfg, video_id)
2300
2301 max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0])
2302 # Force English regardless of account setting to prevent parsing issues
2303 # See: https://github.com/yt-dlp/yt-dlp/issues/532
2304 ytcfg = copy.deepcopy(ytcfg)
2305 traverse_obj(
2306 ytcfg, ('INNERTUBE_CONTEXT', 'client'), expected_type=dict, default={})['hl'] = 'en'
2307 return itertools.islice(_real_comment_extract(contents), 0, max_comments)
2308
2309 @staticmethod
2310 def _get_checkok_params():
2311 return {'contentCheckOk': True, 'racyCheckOk': True}
2312
2313 @classmethod
2314 def _generate_player_context(cls, sts=None):
2315 context = {
2316 'html5Preference': 'HTML5_PREF_WANTS',
2317 }
2318 if sts is not None:
2319 context['signatureTimestamp'] = sts
2320 return {
2321 'playbackContext': {
2322 'contentPlaybackContext': context
2323 },
2324 **cls._get_checkok_params()
2325 }
2326
2327 @staticmethod
2328 def _is_agegated(player_response):
2329 if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')):
2330 return True
2331
2332 reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[])
2333 AGE_GATE_REASONS = (
2334 'confirm your age', 'age-restricted', 'inappropriate', # reason
2335 'age_verification_required', 'age_check_required', # status
2336 )
2337 return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons)
2338
2339 @staticmethod
2340 def _is_unplayable(player_response):
2341 return traverse_obj(player_response, ('playabilityStatus', 'status')) == 'UNPLAYABLE'
2342
2343 def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr):
2344
2345 session_index = self._extract_session_index(player_ytcfg, master_ytcfg)
2346 syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr)
2347 sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) if player_url else None
2348 headers = self.generate_api_headers(
2349 ytcfg=player_ytcfg, account_syncid=syncid, session_index=session_index, default_client=client)
2350
2351 yt_query = {'videoId': video_id}
2352 yt_query.update(self._generate_player_context(sts))
2353 return self._extract_response(
2354 item_id=video_id, ep='player', query=yt_query,
2355 ytcfg=player_ytcfg, headers=headers, fatal=True,
2356 default_client=client,
2357 note='Downloading %s player API JSON' % client.replace('_', ' ').strip()
2358 ) or None
2359
2360 def _get_requested_clients(self, url, smuggled_data):
2361 requested_clients = []
2362 default = ['android', 'web']
2363 allowed_clients = sorted(
2364 [client for client in INNERTUBE_CLIENTS.keys() if client[:1] != '_'],
2365 key=lambda client: INNERTUBE_CLIENTS[client]['priority'], reverse=True)
2366 for client in self._configuration_arg('player_client'):
2367 if client in allowed_clients:
2368 requested_clients.append(client)
2369 elif client == 'default':
2370 requested_clients.extend(default)
2371 elif client == 'all':
2372 requested_clients.extend(allowed_clients)
2373 else:
2374 self.report_warning(f'Skipping unsupported client {client}')
2375 if not requested_clients:
2376 requested_clients = default
2377
2378 if smuggled_data.get('is_music_url') or self.is_music_url(url):
2379 requested_clients.extend(
2380 f'{client}_music' for client in requested_clients if f'{client}_music' in INNERTUBE_CLIENTS)
2381
2382 return orderedSet(requested_clients)
2383
2384 def _extract_player_ytcfg(self, client, video_id):
2385 url = {
2386 'web_music': 'https://music.youtube.com',
2387 'web_embedded': f'https://www.youtube.com/embed/{video_id}?html5=1'
2388 }.get(client)
2389 if not url:
2390 return {}
2391 webpage = self._download_webpage(url, video_id, fatal=False, note=f'Downloading {client} config')
2392 return self.extract_ytcfg(video_id, webpage) or {}
2393
2394 def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg):
2395 initial_pr = None
2396 if webpage:
2397 initial_pr = self._extract_yt_initial_variable(
2398 webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
2399 video_id, 'initial player response')
2400
2401 original_clients = clients
2402 clients = clients[::-1]
2403 prs = []
2404
2405 def append_client(client_name):
2406 if client_name in INNERTUBE_CLIENTS and client_name not in original_clients:
2407 clients.append(client_name)
2408
2409 # Android player_response does not have microFormats which are needed for
2410 # extraction of some data. So we return the initial_pr with formats
2411 # stripped out even if not requested by the user
2412 # See: https://github.com/yt-dlp/yt-dlp/issues/501
2413 if initial_pr:
2414 pr = dict(initial_pr)
2415 pr['streamingData'] = None
2416 prs.append(pr)
2417
2418 last_error = None
2419 tried_iframe_fallback = False
2420 player_url = None
2421 while clients:
2422 client = clients.pop()
2423 player_ytcfg = master_ytcfg if client == 'web' else {}
2424 if 'configs' not in self._configuration_arg('player_skip'):
2425 player_ytcfg = self._extract_player_ytcfg(client, video_id) or player_ytcfg
2426
2427 player_url = player_url or self._extract_player_url(master_ytcfg, player_ytcfg, webpage=webpage)
2428 require_js_player = self._get_default_ytcfg(client).get('REQUIRE_JS_PLAYER')
2429 if 'js' in self._configuration_arg('player_skip'):
2430 require_js_player = False
2431 player_url = None
2432
2433 if not player_url and not tried_iframe_fallback and require_js_player:
2434 player_url = self._download_player_url(video_id)
2435 tried_iframe_fallback = True
2436
2437 try:
2438 pr = initial_pr if client == 'web' and initial_pr else self._extract_player_response(
2439 client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr)
2440 except ExtractorError as e:
2441 if last_error:
2442 self.report_warning(last_error)
2443 last_error = e
2444 continue
2445
2446 if pr:
2447 prs.append(pr)
2448
2449 # creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in
2450 if client.endswith('_agegate') and self._is_unplayable(pr) and self.is_authenticated:
2451 append_client(client.replace('_agegate', '_creator'))
2452 elif self._is_agegated(pr):
2453 append_client(f'{client}_agegate')
2454
2455 if last_error:
2456 if not len(prs):
2457 raise last_error
2458 self.report_warning(last_error)
2459 return prs, player_url
2460
2461 def _extract_formats(self, streaming_data, video_id, player_url, is_live):
2462 itags, stream_ids = {}, []
2463 itag_qualities, res_qualities = {}, {}
2464 q = qualities([
2465 # Normally tiny is the smallest video-only formats. But
2466 # audio-only formats with unknown quality may get tagged as tiny
2467 'tiny',
2468 'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats
2469 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
2470 ])
2471 streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[])
2472
2473 for fmt in streaming_formats:
2474 if fmt.get('targetDurationSec') or fmt.get('drmFamilies'):
2475 continue
2476
2477 itag = str_or_none(fmt.get('itag'))
2478 audio_track = fmt.get('audioTrack') or {}
2479 stream_id = '%s.%s' % (itag or '', audio_track.get('id', ''))
2480 if stream_id in stream_ids:
2481 continue
2482
2483 quality = fmt.get('quality')
2484 height = int_or_none(fmt.get('height'))
2485 if quality == 'tiny' or not quality:
2486 quality = fmt.get('audioQuality', '').lower() or quality
2487 # The 3gp format (17) in android client has a quality of "small",
2488 # but is actually worse than other formats
2489 if itag == '17':
2490 quality = 'tiny'
2491 if quality:
2492 if itag:
2493 itag_qualities[itag] = quality
2494 if height:
2495 res_qualities[height] = quality
2496 # FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
2497 # (adding `&sq=0` to the URL) and parsing emsg box to determine the
2498 # number of fragment that would subsequently requested with (`&sq=N`)
2499 if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
2500 continue
2501
2502 fmt_url = fmt.get('url')
2503 if not fmt_url:
2504 sc = compat_parse_qs(fmt.get('signatureCipher'))
2505 fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0]))
2506 encrypted_sig = try_get(sc, lambda x: x['s'][0])
2507 if not (sc and fmt_url and encrypted_sig):
2508 continue
2509 if not player_url:
2510 continue
2511 signature = self._decrypt_signature(sc['s'][0], video_id, player_url)
2512 sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
2513 fmt_url += '&' + sp + '=' + signature
2514
2515 query = parse_qs(fmt_url)
2516 throttled = False
2517 if query.get('ratebypass') != ['yes'] and query.get('n'):
2518 try:
2519 fmt_url = update_url_query(fmt_url, {
2520 'n': self._decrypt_nsig(query['n'][0], video_id, player_url)})
2521 except ExtractorError as e:
2522 self.report_warning(
2523 f'nsig extraction failed: You may experience throttling for some formats\n'
2524 f'n = {query["n"][0]} ; player = {player_url}\n{e}', only_once=True)
2525 throttled = True
2526
2527 if itag:
2528 itags[itag] = 'https'
2529 stream_ids.append(stream_id)
2530
2531 tbr = float_or_none(
2532 fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
2533 dct = {
2534 'asr': int_or_none(fmt.get('audioSampleRate')),
2535 'filesize': int_or_none(fmt.get('contentLength')),
2536 'format_id': itag,
2537 'format_note': join_nonempty(
2538 '%s%s' % (audio_track.get('displayName') or '',
2539 ' (default)' if audio_track.get('audioIsDefault') else ''),
2540 fmt.get('qualityLabel') or quality.replace('audio_quality_', ''),
2541 throttled and 'THROTTLED', delim=', '),
2542 'source_preference': -10 if throttled else -1,
2543 'fps': int_or_none(fmt.get('fps')) or None,
2544 'height': height,
2545 'quality': q(quality),
2546 'tbr': tbr,
2547 'url': fmt_url,
2548 'width': int_or_none(fmt.get('width')),
2549 'language': audio_track.get('id', '').split('.')[0],
2550 'language_preference': 1 if audio_track.get('audioIsDefault') else -1,
2551 }
2552 mime_mobj = re.match(
2553 r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
2554 if mime_mobj:
2555 dct['ext'] = mimetype2ext(mime_mobj.group(1))
2556 dct.update(parse_codecs(mime_mobj.group(2)))
2557 no_audio = dct.get('acodec') == 'none'
2558 no_video = dct.get('vcodec') == 'none'
2559 if no_audio:
2560 dct['vbr'] = tbr
2561 if no_video:
2562 dct['abr'] = tbr
2563 if no_audio or no_video:
2564 dct['downloader_options'] = {
2565 # Youtube throttles chunks >~10M
2566 'http_chunk_size': 10485760,
2567 }
2568 if dct.get('ext'):
2569 dct['container'] = dct['ext'] + '_dash'
2570 yield dct
2571
2572 skip_manifests = self._configuration_arg('skip')
2573 get_dash = (
2574 (not is_live or self._configuration_arg('include_live_dash'))
2575 and 'dash' not in skip_manifests and self.get_param('youtube_include_dash_manifest', True))
2576 get_hls = 'hls' not in skip_manifests and self.get_param('youtube_include_hls_manifest', True)
2577
2578 def process_manifest_format(f, proto, itag):
2579 if itag in itags:
2580 if itags[itag] == proto or f'{itag}-{proto}' in itags:
2581 return False
2582 itag = f'{itag}-{proto}'
2583 if itag:
2584 f['format_id'] = itag
2585 itags[itag] = proto
2586
2587 f['quality'] = next((
2588 q(qdict[val])
2589 for val, qdict in ((f.get('format_id', '').split('-')[0], itag_qualities), (f.get('height'), res_qualities))
2590 if val in qdict), -1)
2591 return True
2592
2593 for sd in streaming_data:
2594 hls_manifest_url = get_hls and sd.get('hlsManifestUrl')
2595 if hls_manifest_url:
2596 for f in self._extract_m3u8_formats(hls_manifest_url, video_id, 'mp4', fatal=False):
2597 if process_manifest_format(f, 'hls', self._search_regex(
2598 r'/itag/(\d+)', f['url'], 'itag', default=None)):
2599 yield f
2600
2601 dash_manifest_url = get_dash and sd.get('dashManifestUrl')
2602 if dash_manifest_url:
2603 for f in self._extract_mpd_formats(dash_manifest_url, video_id, fatal=False):
2604 if process_manifest_format(f, 'dash', f['format_id']):
2605 f['filesize'] = int_or_none(self._search_regex(
2606 r'/clen/(\d+)', f.get('fragment_base_url') or f['url'], 'file size', default=None))
2607 yield f
2608
2609 def _extract_storyboard(self, player_responses, duration):
2610 spec = get_first(
2611 player_responses, ('storyboards', 'playerStoryboardSpecRenderer', 'spec'), default='').split('|')[::-1]
2612 if not spec:
2613 return
2614 base_url = spec.pop()
2615 L = len(spec) - 1
2616 for i, args in enumerate(spec):
2617 args = args.split('#')
2618 counts = list(map(int_or_none, args[:5]))
2619 if len(args) != 8 or not all(counts):
2620 self.report_warning(f'Malformed storyboard {i}: {"#".join(args)}{bug_reports_message()}')
2621 continue
2622 width, height, frame_count, cols, rows = counts
2623 N, sigh = args[6:]
2624
2625 url = base_url.replace('$L', str(L - i)).replace('$N', N) + f'&sigh={sigh}'
2626 fragment_count = frame_count / (cols * rows)
2627 fragment_duration = duration / fragment_count
2628 yield {
2629 'format_id': f'sb{i}',
2630 'format_note': 'storyboard',
2631 'ext': 'mhtml',
2632 'protocol': 'mhtml',
2633 'acodec': 'none',
2634 'vcodec': 'none',
2635 'url': url,
2636 'width': width,
2637 'height': height,
2638 'fragments': [{
2639 'path': url.replace('$M', str(j)),
2640 'duration': min(fragment_duration, duration - (j * fragment_duration)),
2641 } for j in range(math.ceil(fragment_count))],
2642 }
2643
2644 def _real_extract(self, url):
2645 url, smuggled_data = unsmuggle_url(url, {})
2646 video_id = self._match_id(url)
2647
2648 base_url = self.http_scheme() + '//www.youtube.com/'
2649 webpage_url = base_url + 'watch?v=' + video_id
2650 webpage = None
2651 if 'webpage' not in self._configuration_arg('player_skip'):
2652 webpage = self._download_webpage(
2653 webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
2654
2655 master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg()
2656
2657 player_responses, player_url = self._extract_player_responses(
2658 self._get_requested_clients(url, smuggled_data),
2659 video_id, webpage, master_ytcfg)
2660
2661 playability_statuses = traverse_obj(
2662 player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[])
2663
2664 trailer_video_id = get_first(
2665 playability_statuses,
2666 ('errorScreen', 'playerLegacyDesktopYpcTrailerRenderer', 'trailerVideoId'),
2667 expected_type=str)
2668 if trailer_video_id:
2669 return self.url_result(
2670 trailer_video_id, self.ie_key(), trailer_video_id)
2671
2672 search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None))
2673 if webpage else (lambda x: None))
2674
2675 video_details = traverse_obj(
2676 player_responses, (..., 'videoDetails'), expected_type=dict, default=[])
2677 microformats = traverse_obj(
2678 player_responses, (..., 'microformat', 'playerMicroformatRenderer'),
2679 expected_type=dict, default=[])
2680 video_title = (
2681 get_first(video_details, 'title')
2682 or self._get_text(microformats, (..., 'title'))
2683 or search_meta(['og:title', 'twitter:title', 'title']))
2684 video_description = get_first(video_details, 'shortDescription')
2685
2686 multifeed_metadata_list = get_first(
2687 player_responses,
2688 ('multicamera', 'playerLegacyMulticameraRenderer', 'metadataList'),
2689 expected_type=str)
2690 if multifeed_metadata_list and not smuggled_data.get('force_singlefeed'):
2691 if self.get_param('noplaylist'):
2692 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
2693 else:
2694 entries = []
2695 feed_ids = []
2696 for feed in multifeed_metadata_list.split(','):
2697 # Unquote should take place before split on comma (,) since textual
2698 # fields may contain comma as well (see
2699 # https://github.com/ytdl-org/youtube-dl/issues/8536)
2700 feed_data = compat_parse_qs(
2701 compat_urllib_parse_unquote_plus(feed))
2702
2703 def feed_entry(name):
2704 return try_get(
2705 feed_data, lambda x: x[name][0], compat_str)
2706
2707 feed_id = feed_entry('id')
2708 if not feed_id:
2709 continue
2710 feed_title = feed_entry('title')
2711 title = video_title
2712 if feed_title:
2713 title += ' (%s)' % feed_title
2714 entries.append({
2715 '_type': 'url_transparent',
2716 'ie_key': 'Youtube',
2717 'url': smuggle_url(
2718 '%swatch?v=%s' % (base_url, feed_data['id'][0]),
2719 {'force_singlefeed': True}),
2720 'title': title,
2721 })
2722 feed_ids.append(feed_id)
2723 self.to_screen(
2724 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
2725 % (', '.join(feed_ids), video_id))
2726 return self.playlist_result(
2727 entries, video_id, video_title, video_description)
2728
2729 live_broadcast_details = traverse_obj(microformats, (..., 'liveBroadcastDetails'))
2730 is_live = get_first(video_details, 'isLive')
2731 if is_live is None:
2732 is_live = get_first(live_broadcast_details, 'isLiveNow')
2733
2734 streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[])
2735 formats = list(self._extract_formats(streaming_data, video_id, player_url, is_live))
2736
2737 if not formats:
2738 if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')):
2739 self.report_drm(video_id)
2740 pemr = get_first(
2741 playability_statuses,
2742 ('errorScreen', 'playerErrorMessageRenderer'), expected_type=dict) or {}
2743 reason = self._get_text(pemr, 'reason') or get_first(playability_statuses, 'reason')
2744 subreason = clean_html(self._get_text(pemr, 'subreason') or '')
2745 if subreason:
2746 if subreason == 'The uploader has not made this video available in your country.':
2747 countries = get_first(microformats, 'availableCountries')
2748 if not countries:
2749 regions_allowed = search_meta('regionsAllowed')
2750 countries = regions_allowed.split(',') if regions_allowed else None
2751 self.raise_geo_restricted(subreason, countries, metadata_available=True)
2752 reason += f'. {subreason}'
2753 if reason:
2754 self.raise_no_formats(reason, expected=True)
2755
2756 keywords = get_first(video_details, 'keywords', expected_type=list) or []
2757 if not keywords and webpage:
2758 keywords = [
2759 unescapeHTML(m.group('content'))
2760 for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
2761 for keyword in keywords:
2762 if keyword.startswith('yt:stretch='):
2763 mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
2764 if mobj:
2765 # NB: float is intentional for forcing float division
2766 w, h = (float(v) for v in mobj.groups())
2767 if w > 0 and h > 0:
2768 ratio = w / h
2769 for f in formats:
2770 if f.get('vcodec') != 'none':
2771 f['stretched_ratio'] = ratio
2772 break
2773
2774 thumbnails = []
2775 thumbnail_dicts = traverse_obj(
2776 (video_details, microformats), (..., ..., 'thumbnail', 'thumbnails', ...),
2777 expected_type=dict, default=[])
2778 for thumbnail in thumbnail_dicts:
2779 thumbnail_url = thumbnail.get('url')
2780 if not thumbnail_url:
2781 continue
2782 # Sometimes youtube gives a wrong thumbnail URL. See:
2783 # https://github.com/yt-dlp/yt-dlp/issues/233
2784 # https://github.com/ytdl-org/youtube-dl/issues/28023
2785 if 'maxresdefault' in thumbnail_url:
2786 thumbnail_url = thumbnail_url.split('?')[0]
2787 thumbnails.append({
2788 'url': thumbnail_url,
2789 'height': int_or_none(thumbnail.get('height')),
2790 'width': int_or_none(thumbnail.get('width')),
2791 })
2792 thumbnail_url = search_meta(['og:image', 'twitter:image'])
2793 if thumbnail_url:
2794 thumbnails.append({
2795 'url': thumbnail_url,
2796 })
2797 original_thumbnails = thumbnails.copy()
2798
2799 # The best resolution thumbnails sometimes does not appear in the webpage
2800 # See: https://github.com/ytdl-org/youtube-dl/issues/29049, https://github.com/yt-dlp/yt-dlp/issues/340
2801 # List of possible thumbnails - Ref: <https://stackoverflow.com/a/20542029>
2802 thumbnail_names = [
2803 'maxresdefault', 'hq720', 'sddefault', 'sd1', 'sd2', 'sd3',
2804 'hqdefault', 'hq1', 'hq2', 'hq3', '0',
2805 'mqdefault', 'mq1', 'mq2', 'mq3',
2806 'default', '1', '2', '3'
2807 ]
2808 n_thumbnail_names = len(thumbnail_names)
2809 thumbnails.extend({
2810 'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format(
2811 video_id=video_id, name=name, ext=ext,
2812 webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''),
2813 } for name in thumbnail_names for ext in ('webp', 'jpg'))
2814 for thumb in thumbnails:
2815 i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names)
2816 thumb['preference'] = (0 if '.webp' in thumb['url'] else -1) - (2 * i)
2817 self._remove_duplicate_formats(thumbnails)
2818 self._downloader._sort_thumbnails(original_thumbnails)
2819
2820 category = get_first(microformats, 'category') or search_meta('genre')
2821 channel_id = str_or_none(
2822 get_first(video_details, 'channelId')
2823 or get_first(microformats, 'externalChannelId')
2824 or search_meta('channelId'))
2825 duration = int_or_none(
2826 get_first(video_details, 'lengthSeconds')
2827 or get_first(microformats, 'lengthSeconds')
2828 or parse_duration(search_meta('duration'))) or None
2829 owner_profile_url = get_first(microformats, 'ownerProfileUrl')
2830
2831 live_content = get_first(video_details, 'isLiveContent')
2832 is_upcoming = get_first(video_details, 'isUpcoming')
2833 if is_live is None:
2834 if is_upcoming or live_content is False:
2835 is_live = False
2836 if is_upcoming is None and (live_content or is_live):
2837 is_upcoming = False
2838 live_starttime = parse_iso8601(get_first(live_broadcast_details, 'startTimestamp'))
2839 live_endtime = parse_iso8601(get_first(live_broadcast_details, 'endTimestamp'))
2840 if not duration and live_endtime and live_starttime:
2841 duration = live_endtime - live_starttime
2842
2843 formats.extend(self._extract_storyboard(player_responses, duration))
2844
2845 # Source is given priority since formats that throttle are given lower source_preference
2846 # When throttling issue is fully fixed, remove this
2847 self._sort_formats(formats, ('quality', 'res', 'fps', 'hdr:12', 'source', 'codec:vp9.2', 'lang', 'proto'))
2848
2849 info = {
2850 'id': video_id,
2851 'title': self._live_title(video_title) if is_live else video_title,
2852 'formats': formats,
2853 'thumbnails': thumbnails,
2854 # The best thumbnail that we are sure exists. Prevents unnecessary
2855 # URL checking if user don't care about getting the best possible thumbnail
2856 'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')),
2857 'description': video_description,
2858 'upload_date': unified_strdate(
2859 get_first(microformats, 'uploadDate')
2860 or search_meta('uploadDate')),
2861 'uploader': get_first(video_details, 'author'),
2862 'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
2863 'uploader_url': owner_profile_url,
2864 'channel_id': channel_id,
2865 'channel_url': f'https://www.youtube.com/channel/{channel_id}' if channel_id else None,
2866 'duration': duration,
2867 'view_count': int_or_none(
2868 get_first((video_details, microformats), (..., 'viewCount'))
2869 or search_meta('interactionCount')),
2870 'average_rating': float_or_none(get_first(video_details, 'averageRating')),
2871 'age_limit': 18 if (
2872 get_first(microformats, 'isFamilySafe') is False
2873 or search_meta('isFamilyFriendly') == 'false'
2874 or search_meta('og:restrictions:age') == '18+') else 0,
2875 'webpage_url': webpage_url,
2876 'categories': [category] if category else None,
2877 'tags': keywords,
2878 'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'),
2879 'is_live': is_live,
2880 'was_live': (False if is_live or is_upcoming or live_content is False
2881 else None if is_live is None or is_upcoming is None
2882 else live_content),
2883 'live_status': 'is_upcoming' if is_upcoming else None, # rest will be set by YoutubeDL
2884 'release_timestamp': live_starttime,
2885 }
2886
2887 pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict)
2888 if pctr:
2889 def get_lang_code(track):
2890 return (remove_start(track.get('vssId') or '', '.').replace('.', '-')
2891 or track.get('languageCode'))
2892
2893 # Converted into dicts to remove duplicates
2894 captions = {
2895 get_lang_code(sub): sub
2896 for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
2897 translation_languages = {
2898 lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
2899 for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
2900
2901 def process_language(container, base_url, lang_code, sub_name, query):
2902 lang_subs = container.setdefault(lang_code, [])
2903 for fmt in self._SUBTITLE_FORMATS:
2904 query.update({
2905 'fmt': fmt,
2906 })
2907 lang_subs.append({
2908 'ext': fmt,
2909 'url': update_url_query(base_url, query),
2910 'name': sub_name,
2911 })
2912
2913 subtitles, automatic_captions = {}, {}
2914 for lang_code, caption_track in captions.items():
2915 base_url = caption_track.get('baseUrl')
2916 if not base_url:
2917 continue
2918 lang_name = self._get_text(caption_track, 'name', max_runs=1)
2919 if caption_track.get('kind') != 'asr':
2920 if not lang_code:
2921 continue
2922 process_language(
2923 subtitles, base_url, lang_code, lang_name, {})
2924 if not caption_track.get('isTranslatable'):
2925 continue
2926 for trans_code, trans_name in translation_languages.items():
2927 if not trans_code:
2928 continue
2929 if caption_track.get('kind') != 'asr':
2930 trans_code += f'-{lang_code}'
2931 trans_name += format_field(lang_name, template=' from %s')
2932 process_language(
2933 automatic_captions, base_url, trans_code, trans_name, {'tlang': trans_code})
2934 info['automatic_captions'] = automatic_captions
2935 info['subtitles'] = subtitles
2936
2937 parsed_url = compat_urllib_parse_urlparse(url)
2938 for component in [parsed_url.fragment, parsed_url.query]:
2939 query = compat_parse_qs(component)
2940 for k, v in query.items():
2941 for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
2942 d_k += '_time'
2943 if d_k not in info and k in s_ks:
2944 info[d_k] = parse_duration(query[k][0])
2945
2946 # Youtube Music Auto-generated description
2947 if video_description:
2948 mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
2949 if mobj:
2950 release_year = mobj.group('release_year')
2951 release_date = mobj.group('release_date')
2952 if release_date:
2953 release_date = release_date.replace('-', '')
2954 if not release_year:
2955 release_year = release_date[:4]
2956 info.update({
2957 'album': mobj.group('album'.strip()),
2958 'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
2959 'track': mobj.group('track').strip(),
2960 'release_date': release_date,
2961 'release_year': int_or_none(release_year),
2962 })
2963
2964 initial_data = None
2965 if webpage:
2966 initial_data = self._extract_yt_initial_variable(
2967 webpage, self._YT_INITIAL_DATA_RE, video_id,
2968 'yt initial data')
2969 if not initial_data:
2970 query = {'videoId': video_id}
2971 query.update(self._get_checkok_params())
2972 initial_data = self._extract_response(
2973 item_id=video_id, ep='next', fatal=False,
2974 ytcfg=master_ytcfg, query=query,
2975 headers=self.generate_api_headers(ytcfg=master_ytcfg),
2976 note='Downloading initial data API JSON')
2977
2978 try:
2979 # This will error if there is no livechat
2980 initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
2981 info.setdefault('subtitles', {})['live_chat'] = [{
2982 'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies
2983 'video_id': video_id,
2984 'ext': 'json',
2985 'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay',
2986 }]
2987 except (KeyError, IndexError, TypeError):
2988 pass
2989
2990 if initial_data:
2991 info['chapters'] = (
2992 self._extract_chapters_from_json(initial_data, duration)
2993 or self._extract_chapters_from_engagement_panel(initial_data, duration)
2994 or None)
2995
2996 contents = try_get(
2997 initial_data,
2998 lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'],
2999 list) or []
3000 for content in contents:
3001 vpir = content.get('videoPrimaryInfoRenderer')
3002 if vpir:
3003 stl = vpir.get('superTitleLink')
3004 if stl:
3005 stl = self._get_text(stl)
3006 if try_get(
3007 vpir,
3008 lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
3009 info['location'] = stl
3010 else:
3011 mobj = re.search(r'(.+?)\s*S(\d+)\s*•\s*E(\d+)', stl)
3012 if mobj:
3013 info.update({
3014 'series': mobj.group(1),
3015 'season_number': int(mobj.group(2)),
3016 'episode_number': int(mobj.group(3)),
3017 })
3018 for tlb in (try_get(
3019 vpir,
3020 lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
3021 list) or []):
3022 tbr = tlb.get('toggleButtonRenderer') or {}
3023 for getter, regex in [(
3024 lambda x: x['defaultText']['accessibility']['accessibilityData'],
3025 r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
3026 lambda x: x['accessibility'],
3027 lambda x: x['accessibilityData']['accessibilityData'],
3028 ], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
3029 label = (try_get(tbr, getter, dict) or {}).get('label')
3030 if label:
3031 mobj = re.match(regex, label)
3032 if mobj:
3033 info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
3034 break
3035 sbr_tooltip = try_get(
3036 vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
3037 if sbr_tooltip:
3038 like_count, dislike_count = sbr_tooltip.split(' / ')
3039 info.update({
3040 'like_count': str_to_int(like_count),
3041 'dislike_count': str_to_int(dislike_count),
3042 })
3043 vsir = content.get('videoSecondaryInfoRenderer')
3044 if vsir:
3045 info['channel'] = self._get_text(vsir, ('owner', 'videoOwnerRenderer', 'title'))
3046 rows = try_get(
3047 vsir,
3048 lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
3049 list) or []
3050 multiple_songs = False
3051 for row in rows:
3052 if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
3053 multiple_songs = True
3054 break
3055 for row in rows:
3056 mrr = row.get('metadataRowRenderer') or {}
3057 mrr_title = mrr.get('title')
3058 if not mrr_title:
3059 continue
3060 mrr_title = self._get_text(mrr, 'title')
3061 mrr_contents_text = self._get_text(mrr, ('contents', 0))
3062 if mrr_title == 'License':
3063 info['license'] = mrr_contents_text
3064 elif not multiple_songs:
3065 if mrr_title == 'Album':
3066 info['album'] = mrr_contents_text
3067 elif mrr_title == 'Artist':
3068 info['artist'] = mrr_contents_text
3069 elif mrr_title == 'Song':
3070 info['track'] = mrr_contents_text
3071
3072 fallbacks = {
3073 'channel': 'uploader',
3074 'channel_id': 'uploader_id',
3075 'channel_url': 'uploader_url',
3076 }
3077 for to, frm in fallbacks.items():
3078 if not info.get(to):
3079 info[to] = info.get(frm)
3080
3081 for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
3082 v = info.get(s_k)
3083 if v:
3084 info[d_k] = v
3085
3086 is_private = get_first(video_details, 'isPrivate', expected_type=bool)
3087 is_unlisted = get_first(microformats, 'isUnlisted', expected_type=bool)
3088 is_membersonly = None
3089 is_premium = None
3090 if initial_data and is_private is not None:
3091 is_membersonly = False
3092 is_premium = False
3093 contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
3094 badge_labels = set()
3095 for content in contents:
3096 if not isinstance(content, dict):
3097 continue
3098 badge_labels.update(self._extract_badges(content.get('videoPrimaryInfoRenderer')))
3099 for badge_label in badge_labels:
3100 if badge_label.lower() == 'members only':
3101 is_membersonly = True
3102 elif badge_label.lower() == 'premium':
3103 is_premium = True
3104 elif badge_label.lower() == 'unlisted':
3105 is_unlisted = True
3106
3107 info['availability'] = self._availability(
3108 is_private=is_private,
3109 needs_premium=is_premium,
3110 needs_subscription=is_membersonly,
3111 needs_auth=info['age_limit'] >= 18,
3112 is_unlisted=None if is_private is None else is_unlisted)
3113
3114 info['__post_extractor'] = self.extract_comments(master_ytcfg, video_id, contents, webpage)
3115
3116 self.mark_watched(video_id, player_responses)
3117
3118 return info
3119
3120class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
3121
3122 def _extract_channel_id(self, webpage):
3123 channel_id = self._html_search_meta(
3124 'channelId', webpage, 'channel id', default=None)
3125 if channel_id:
3126 return channel_id
3127 channel_url = self._html_search_meta(
3128 ('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
3129 'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
3130 'twitter:app:url:googleplay'), webpage, 'channel url')
3131 return self._search_regex(
3132 r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
3133 channel_url, 'channel id')
3134
3135 @staticmethod
3136 def _extract_basic_item_renderer(item):
3137 # Modified from _extract_grid_item_renderer
3138 known_basic_renderers = (
3139 'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer'
3140 )
3141 for key, renderer in item.items():
3142 if not isinstance(renderer, dict):
3143 continue
3144 elif key in known_basic_renderers:
3145 return renderer
3146 elif key.startswith('grid') and key.endswith('Renderer'):
3147 return renderer
3148
3149 def _grid_entries(self, grid_renderer):
3150 for item in grid_renderer['items']:
3151 if not isinstance(item, dict):
3152 continue
3153 renderer = self._extract_basic_item_renderer(item)
3154 if not isinstance(renderer, dict):
3155 continue
3156 title = self._get_text(renderer, 'title')
3157
3158 # playlist
3159 playlist_id = renderer.get('playlistId')
3160 if playlist_id:
3161 yield self.url_result(
3162 'https://www.youtube.com/playlist?list=%s' % playlist_id,
3163 ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
3164 video_title=title)
3165 continue
3166 # video
3167 video_id = renderer.get('videoId')
3168 if video_id:
3169 yield self._extract_video(renderer)
3170 continue
3171 # channel
3172 channel_id = renderer.get('channelId')
3173 if channel_id:
3174 yield self.url_result(
3175 'https://www.youtube.com/channel/%s' % channel_id,
3176 ie=YoutubeTabIE.ie_key(), video_title=title)
3177 continue
3178 # generic endpoint URL support
3179 ep_url = urljoin('https://www.youtube.com/', try_get(
3180 renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
3181 compat_str))
3182 if ep_url:
3183 for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
3184 if ie.suitable(ep_url):
3185 yield self.url_result(
3186 ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
3187 break
3188
3189 def _shelf_entries_from_content(self, shelf_renderer):
3190 content = shelf_renderer.get('content')
3191 if not isinstance(content, dict):
3192 return
3193 renderer = content.get('gridRenderer') or content.get('expandedShelfContentsRenderer')
3194 if renderer:
3195 # TODO: add support for nested playlists so each shelf is processed
3196 # as separate playlist
3197 # TODO: this includes only first N items
3198 for entry in self._grid_entries(renderer):
3199 yield entry
3200 renderer = content.get('horizontalListRenderer')
3201 if renderer:
3202 # TODO
3203 pass
3204
3205 def _shelf_entries(self, shelf_renderer, skip_channels=False):
3206 ep = try_get(
3207 shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
3208 compat_str)
3209 shelf_url = urljoin('https://www.youtube.com', ep)
3210 if shelf_url:
3211 # Skipping links to another channels, note that checking for
3212 # endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
3213 # will not work
3214 if skip_channels and '/channels?' in shelf_url:
3215 return
3216 title = self._get_text(shelf_renderer, 'title')
3217 yield self.url_result(shelf_url, video_title=title)
3218 # Shelf may not contain shelf URL, fallback to extraction from content
3219 for entry in self._shelf_entries_from_content(shelf_renderer):
3220 yield entry
3221
3222 def _playlist_entries(self, video_list_renderer):
3223 for content in video_list_renderer['contents']:
3224 if not isinstance(content, dict):
3225 continue
3226 renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
3227 if not isinstance(renderer, dict):
3228 continue
3229 video_id = renderer.get('videoId')
3230 if not video_id:
3231 continue
3232 yield self._extract_video(renderer)
3233
3234 def _rich_entries(self, rich_grid_renderer):
3235 renderer = try_get(
3236 rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
3237 video_id = renderer.get('videoId')
3238 if not video_id:
3239 return
3240 yield self._extract_video(renderer)
3241
3242 def _video_entry(self, video_renderer):
3243 video_id = video_renderer.get('videoId')
3244 if video_id:
3245 return self._extract_video(video_renderer)
3246
3247 def _post_thread_entries(self, post_thread_renderer):
3248 post_renderer = try_get(
3249 post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
3250 if not post_renderer:
3251 return
3252 # video attachment
3253 video_renderer = try_get(
3254 post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {}
3255 video_id = video_renderer.get('videoId')
3256 if video_id:
3257 entry = self._extract_video(video_renderer)
3258 if entry:
3259 yield entry
3260 # playlist attachment
3261 playlist_id = try_get(
3262 post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str)
3263 if playlist_id:
3264 yield self.url_result(
3265 'https://www.youtube.com/playlist?list=%s' % playlist_id,
3266 ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
3267 # inline video links
3268 runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
3269 for run in runs:
3270 if not isinstance(run, dict):
3271 continue
3272 ep_url = try_get(
3273 run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
3274 if not ep_url:
3275 continue
3276 if not YoutubeIE.suitable(ep_url):
3277 continue
3278 ep_video_id = YoutubeIE._match_id(ep_url)
3279 if video_id == ep_video_id:
3280 continue
3281 yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id)
3282
3283 def _post_thread_continuation_entries(self, post_thread_continuation):
3284 contents = post_thread_continuation.get('contents')
3285 if not isinstance(contents, list):
3286 return
3287 for content in contents:
3288 renderer = content.get('backstagePostThreadRenderer')
3289 if not isinstance(renderer, dict):
3290 continue
3291 for entry in self._post_thread_entries(renderer):
3292 yield entry
3293
3294 r''' # unused
3295 def _rich_grid_entries(self, contents):
3296 for content in contents:
3297 video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
3298 if video_renderer:
3299 entry = self._video_entry(video_renderer)
3300 if entry:
3301 yield entry
3302 '''
3303 def _extract_entries(self, parent_renderer, continuation_list):
3304 # continuation_list is modified in-place with continuation_list = [continuation_token]
3305 continuation_list[:] = [None]
3306 contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
3307 for content in contents:
3308 if not isinstance(content, dict):
3309 continue
3310 is_renderer = try_get(content, lambda x: x['itemSectionRenderer'], dict)
3311 if not is_renderer:
3312 renderer = content.get('richItemRenderer')
3313 if renderer:
3314 for entry in self._rich_entries(renderer):
3315 yield entry
3316 continuation_list[0] = self._extract_continuation(parent_renderer)
3317 continue
3318 isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
3319 for isr_content in isr_contents:
3320 if not isinstance(isr_content, dict):
3321 continue
3322
3323 known_renderers = {
3324 'playlistVideoListRenderer': self._playlist_entries,
3325 'gridRenderer': self._grid_entries,
3326 'shelfRenderer': lambda x: self._shelf_entries(x),
3327 'backstagePostThreadRenderer': self._post_thread_entries,
3328 'videoRenderer': lambda x: [self._video_entry(x)],
3329 }
3330 for key, renderer in isr_content.items():
3331 if key not in known_renderers:
3332 continue
3333 for entry in known_renderers[key](renderer):
3334 if entry:
3335 yield entry
3336 continuation_list[0] = self._extract_continuation(renderer)
3337 break
3338
3339 if not continuation_list[0]:
3340 continuation_list[0] = self._extract_continuation(is_renderer)
3341
3342 if not continuation_list[0]:
3343 continuation_list[0] = self._extract_continuation(parent_renderer)
3344
3345 def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data):
3346 continuation_list = [None]
3347 extract_entries = lambda x: self._extract_entries(x, continuation_list)
3348 tab_content = try_get(tab, lambda x: x['content'], dict)
3349 if not tab_content:
3350 return
3351 parent_renderer = (
3352 try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
3353 or try_get(tab_content, lambda x: x['richGridRenderer'], dict) or {})
3354 for entry in extract_entries(parent_renderer):
3355 yield entry
3356 continuation = continuation_list[0]
3357
3358 for page_num in itertools.count(1):
3359 if not continuation:
3360 break
3361 headers = self.generate_api_headers(
3362 ytcfg=ytcfg, account_syncid=account_syncid, visitor_data=visitor_data)
3363 response = self._extract_response(
3364 item_id='%s page %s' % (item_id, page_num),
3365 query=continuation, headers=headers, ytcfg=ytcfg,
3366 check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints'))
3367
3368 if not response:
3369 break
3370 # Extracting updated visitor data is required to prevent an infinite extraction loop in some cases
3371 # See: https://github.com/ytdl-org/youtube-dl/issues/28702
3372 visitor_data = self._extract_visitor_data(response) or visitor_data
3373
3374 known_continuation_renderers = {
3375 'playlistVideoListContinuation': self._playlist_entries,
3376 'gridContinuation': self._grid_entries,
3377 'itemSectionContinuation': self._post_thread_continuation_entries,
3378 'sectionListContinuation': extract_entries, # for feeds
3379 }
3380 continuation_contents = try_get(
3381 response, lambda x: x['continuationContents'], dict) or {}
3382 continuation_renderer = None
3383 for key, value in continuation_contents.items():
3384 if key not in known_continuation_renderers:
3385 continue
3386 continuation_renderer = value
3387 continuation_list = [None]
3388 for entry in known_continuation_renderers[key](continuation_renderer):
3389 yield entry
3390 continuation = continuation_list[0] or self._extract_continuation(continuation_renderer)
3391 break
3392 if continuation_renderer:
3393 continue
3394
3395 known_renderers = {
3396 'gridPlaylistRenderer': (self._grid_entries, 'items'),
3397 'gridVideoRenderer': (self._grid_entries, 'items'),
3398 'gridChannelRenderer': (self._grid_entries, 'items'),
3399 'playlistVideoRenderer': (self._playlist_entries, 'contents'),
3400 'itemSectionRenderer': (extract_entries, 'contents'), # for feeds
3401 'richItemRenderer': (extract_entries, 'contents'), # for hashtag
3402 'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
3403 }
3404 on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
3405 continuation_items = try_get(
3406 on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
3407 continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
3408 video_items_renderer = None
3409 for key, value in continuation_item.items():
3410 if key not in known_renderers:
3411 continue
3412 video_items_renderer = {known_renderers[key][1]: continuation_items}
3413 continuation_list = [None]
3414 for entry in known_renderers[key][0](video_items_renderer):
3415 yield entry
3416 continuation = continuation_list[0] or self._extract_continuation(video_items_renderer)
3417 break
3418 if video_items_renderer:
3419 continue
3420 break
3421
3422 @staticmethod
3423 def _extract_selected_tab(tabs):
3424 for tab in tabs:
3425 renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
3426 if renderer.get('selected') is True:
3427 return renderer
3428 else:
3429 raise ExtractorError('Unable to find selected tab')
3430
3431 @classmethod
3432 def _extract_uploader(cls, data):
3433 uploader = {}
3434 renderer = cls._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {}
3435 owner = try_get(
3436 renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
3437 if owner:
3438 uploader['uploader'] = owner.get('text')
3439 uploader['uploader_id'] = try_get(
3440 owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
3441 uploader['uploader_url'] = urljoin(
3442 'https://www.youtube.com/',
3443 try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
3444 return {k: v for k, v in uploader.items() if v is not None}
3445
3446 def _extract_from_tabs(self, item_id, ytcfg, data, tabs):
3447 playlist_id = title = description = channel_url = channel_name = channel_id = None
3448 thumbnails_list = []
3449 tags = []
3450
3451 selected_tab = self._extract_selected_tab(tabs)
3452 renderer = try_get(
3453 data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
3454 if renderer:
3455 channel_name = renderer.get('title')
3456 channel_url = renderer.get('channelUrl')
3457 channel_id = renderer.get('externalId')
3458 else:
3459 renderer = try_get(
3460 data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
3461
3462 if renderer:
3463 title = renderer.get('title')
3464 description = renderer.get('description', '')
3465 playlist_id = channel_id
3466 tags = renderer.get('keywords', '').split()
3467 thumbnails_list = (
3468 try_get(renderer, lambda x: x['avatar']['thumbnails'], list)
3469 or try_get(
3470 self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer'),
3471 lambda x: x['thumbnailRenderer']['playlistVideoThumbnailRenderer']['thumbnail']['thumbnails'],
3472 list)
3473 or [])
3474
3475 thumbnails = []
3476 for t in thumbnails_list:
3477 if not isinstance(t, dict):
3478 continue
3479 thumbnail_url = url_or_none(t.get('url'))
3480 if not thumbnail_url:
3481 continue
3482 thumbnails.append({
3483 'url': thumbnail_url,
3484 'width': int_or_none(t.get('width')),
3485 'height': int_or_none(t.get('height')),
3486 })
3487 if playlist_id is None:
3488 playlist_id = item_id
3489 if title is None:
3490 title = (
3491 try_get(data, lambda x: x['header']['hashtagHeaderRenderer']['hashtag']['simpleText'])
3492 or playlist_id)
3493 title += format_field(selected_tab, 'title', ' - %s')
3494 title += format_field(selected_tab, 'expandedText', ' - %s')
3495 metadata = {
3496 'playlist_id': playlist_id,
3497 'playlist_title': title,
3498 'playlist_description': description,
3499 'uploader': channel_name,
3500 'uploader_id': channel_id,
3501 'uploader_url': channel_url,
3502 'thumbnails': thumbnails,
3503 'tags': tags,
3504 }
3505 availability = self._extract_availability(data)
3506 if availability:
3507 metadata['availability'] = availability
3508 if not channel_id:
3509 metadata.update(self._extract_uploader(data))
3510 metadata.update({
3511 'channel': metadata['uploader'],
3512 'channel_id': metadata['uploader_id'],
3513 'channel_url': metadata['uploader_url']})
3514 return self.playlist_result(
3515 self._entries(
3516 selected_tab, playlist_id, ytcfg,
3517 self._extract_account_syncid(ytcfg, data),
3518 self._extract_visitor_data(data, ytcfg)),
3519 **metadata)
3520
3521 def _extract_mix_playlist(self, playlist, playlist_id, data, ytcfg):
3522 first_id = last_id = response = None
3523 for page_num in itertools.count(1):
3524 videos = list(self._playlist_entries(playlist))
3525 if not videos:
3526 return
3527 start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1
3528 if start >= len(videos):
3529 return
3530 for video in videos[start:]:
3531 if video['id'] == first_id:
3532 self.to_screen('First video %s found again; Assuming end of Mix' % first_id)
3533 return
3534 yield video
3535 first_id = first_id or videos[0]['id']
3536 last_id = videos[-1]['id']
3537 watch_endpoint = try_get(
3538 playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
3539 headers = self.generate_api_headers(
3540 ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
3541 visitor_data=self._extract_visitor_data(response, data, ytcfg))
3542 query = {
3543 'playlistId': playlist_id,
3544 'videoId': watch_endpoint.get('videoId') or last_id,
3545 'index': watch_endpoint.get('index') or len(videos),
3546 'params': watch_endpoint.get('params') or 'OAE%3D'
3547 }
3548 response = self._extract_response(
3549 item_id='%s page %d' % (playlist_id, page_num),
3550 query=query, ep='next', headers=headers, ytcfg=ytcfg,
3551 check_get_keys='contents'
3552 )
3553 playlist = try_get(
3554 response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
3555
3556 def _extract_from_playlist(self, item_id, url, data, playlist, ytcfg):
3557 title = playlist.get('title') or try_get(
3558 data, lambda x: x['titleText']['simpleText'], compat_str)
3559 playlist_id = playlist.get('playlistId') or item_id
3560
3561 # Delegating everything except mix playlists to regular tab-based playlist URL
3562 playlist_url = urljoin(url, try_get(
3563 playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
3564 compat_str))
3565 if playlist_url and playlist_url != url:
3566 return self.url_result(
3567 playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
3568 video_title=title)
3569
3570 return self.playlist_result(
3571 self._extract_mix_playlist(playlist, playlist_id, data, ytcfg),
3572 playlist_id=playlist_id, playlist_title=title)
3573
3574 def _extract_availability(self, data):
3575 """
3576 Gets the availability of a given playlist/tab.
3577 Note: Unless YouTube tells us explicitly, we do not assume it is public
3578 @param data: response
3579 """
3580 is_private = is_unlisted = None
3581 renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {}
3582 badge_labels = self._extract_badges(renderer)
3583
3584 # Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge
3585 privacy_dropdown_entries = try_get(
3586 renderer, lambda x: x['privacyForm']['dropdownFormFieldRenderer']['dropdown']['dropdownRenderer']['entries'], list) or []
3587 for renderer_dict in privacy_dropdown_entries:
3588 is_selected = try_get(
3589 renderer_dict, lambda x: x['privacyDropdownItemRenderer']['isSelected'], bool) or False
3590 if not is_selected:
3591 continue
3592 label = self._get_text(renderer_dict, ('privacyDropdownItemRenderer', 'label'))
3593 if label:
3594 badge_labels.add(label.lower())
3595 break
3596
3597 for badge_label in badge_labels:
3598 if badge_label == 'unlisted':
3599 is_unlisted = True
3600 elif badge_label == 'private':
3601 is_private = True
3602 elif badge_label == 'public':
3603 is_unlisted = is_private = False
3604 return self._availability(is_private, False, False, False, is_unlisted)
3605
3606 @staticmethod
3607 def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict):
3608 sidebar_renderer = try_get(
3609 data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) or []
3610 for item in sidebar_renderer:
3611 renderer = try_get(item, lambda x: x[info_renderer], expected_type)
3612 if renderer:
3613 return renderer
3614
3615 def _reload_with_unavailable_videos(self, item_id, data, ytcfg):
3616 """
3617 Get playlist with unavailable videos if the 'show unavailable videos' button exists.
3618 """
3619 browse_id = params = None
3620 renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
3621 if not renderer:
3622 return
3623 menu_renderer = try_get(
3624 renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
3625 for menu_item in menu_renderer:
3626 if not isinstance(menu_item, dict):
3627 continue
3628 nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
3629 text = try_get(
3630 nav_item_renderer, lambda x: x['text']['simpleText'], compat_str)
3631 if not text or text.lower() != 'show unavailable videos':
3632 continue
3633 browse_endpoint = try_get(
3634 nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
3635 browse_id = browse_endpoint.get('browseId')
3636 params = browse_endpoint.get('params')
3637 break
3638
3639 headers = self.generate_api_headers(
3640 ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
3641 visitor_data=self._extract_visitor_data(data, ytcfg))
3642 query = {
3643 'params': params or 'wgYCCAA=',
3644 'browseId': browse_id or 'VL%s' % item_id
3645 }
3646 return self._extract_response(
3647 item_id=item_id, headers=headers, query=query,
3648 check_get_keys='contents', fatal=False, ytcfg=ytcfg,
3649 note='Downloading API JSON with unavailable videos')
3650
3651 def _extract_webpage(self, url, item_id, fatal=True):
3652 retries = self.get_param('extractor_retries', 3)
3653 count = -1
3654 webpage = data = last_error = None
3655 while count < retries:
3656 count += 1
3657 # Sometimes youtube returns a webpage with incomplete ytInitialData
3658 # See: https://github.com/yt-dlp/yt-dlp/issues/116
3659 if last_error:
3660 self.report_warning('%s. Retrying ...' % last_error)
3661 try:
3662 webpage = self._download_webpage(
3663 url, item_id,
3664 note='Downloading webpage%s' % (' (retry #%d)' % count if count else '',))
3665 data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {}
3666 except ExtractorError as e:
3667 if isinstance(e.cause, network_exceptions):
3668 if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
3669 last_error = error_to_compat_str(e.cause or e.msg)
3670 if count < retries:
3671 continue
3672 if fatal:
3673 raise
3674 self.report_warning(error_to_compat_str(e))
3675 break
3676 else:
3677 try:
3678 self._extract_and_report_alerts(data)
3679 except ExtractorError as e:
3680 if fatal:
3681 raise
3682 self.report_warning(error_to_compat_str(e))
3683 break
3684
3685 if dict_get(data, ('contents', 'currentVideoEndpoint')):
3686 break
3687
3688 last_error = 'Incomplete yt initial data received'
3689 if count >= retries:
3690 if fatal:
3691 raise ExtractorError(last_error)
3692 self.report_warning(last_error)
3693 break
3694
3695 return webpage, data
3696
3697 def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=False, default_client='web'):
3698 data = None
3699 if 'webpage' not in self._configuration_arg('skip'):
3700 webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal)
3701 ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage)
3702 if not data:
3703 if not ytcfg and self.is_authenticated:
3704 msg = 'Playlists that require authentication may not extract correctly without a successful webpage download.'
3705 if 'authcheck' not in self._configuration_arg('skip') and fatal:
3706 raise ExtractorError(
3707 msg + ' If you are not downloading private content, or your cookies are only for the first account and channel,'
3708 ' pass "--extractor-args youtubetab:skip=authcheck" to skip this check',
3709 expected=True)
3710 self.report_warning(msg, only_once=True)
3711 data = self._extract_tab_endpoint(url, item_id, ytcfg, fatal=fatal, default_client=default_client)
3712 return data, ytcfg
3713
3714 def _extract_tab_endpoint(self, url, item_id, ytcfg=None, fatal=True, default_client='web'):
3715 headers = self.generate_api_headers(ytcfg=ytcfg, default_client=default_client)
3716 resolve_response = self._extract_response(
3717 item_id=item_id, query={'url': url}, check_get_keys='endpoint', headers=headers, ytcfg=ytcfg, fatal=fatal,
3718 ep='navigation/resolve_url', note='Downloading API parameters API JSON', default_client=default_client)
3719 endpoints = {'browseEndpoint': 'browse', 'watchEndpoint': 'next'}
3720 for ep_key, ep in endpoints.items():
3721 params = try_get(resolve_response, lambda x: x['endpoint'][ep_key], dict)
3722 if params:
3723 return self._extract_response(
3724 item_id=item_id, query=params, ep=ep, headers=headers,
3725 ytcfg=ytcfg, fatal=fatal, default_client=default_client,
3726 check_get_keys=('contents', 'currentVideoEndpoint'))
3727 err_note = 'Failed to resolve url (does the playlist exist?)'
3728 if fatal:
3729 raise ExtractorError(err_note, expected=True)
3730 self.report_warning(err_note, item_id)
3731
3732 @staticmethod
3733 def _smuggle_data(entries, data):
3734 for entry in entries:
3735 if data:
3736 entry['url'] = smuggle_url(entry['url'], data)
3737 yield entry
3738
3739 _SEARCH_PARAMS = None
3740
3741 def _search_results(self, query, params=NO_DEFAULT):
3742 data = {'query': query}
3743 if params is NO_DEFAULT:
3744 params = self._SEARCH_PARAMS
3745 if params:
3746 data['params'] = params
3747 continuation = {}
3748 for page_num in itertools.count(1):
3749 data.update(continuation)
3750 search = self._extract_response(
3751 item_id='query "%s" page %s' % (query, page_num), ep='search', query=data,
3752 check_get_keys=('contents', 'onResponseReceivedCommands')
3753 )
3754 if not search:
3755 break
3756 slr_contents = try_get(
3757 search,
3758 (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
3759 lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
3760 list)
3761 if not slr_contents:
3762 break
3763
3764 # Youtube sometimes adds promoted content to searches,
3765 # changing the index location of videos and token.
3766 # So we search through all entries till we find them.
3767 continuation = None
3768 for slr_content in slr_contents:
3769 if not continuation:
3770 continuation = self._extract_continuation({'contents': [slr_content]})
3771
3772 isr_contents = try_get(
3773 slr_content,
3774 lambda x: x['itemSectionRenderer']['contents'],
3775 list)
3776 if not isr_contents:
3777 continue
3778 for content in isr_contents:
3779 if not isinstance(content, dict):
3780 continue
3781 video = content.get('videoRenderer')
3782 if not isinstance(video, dict):
3783 continue
3784 video_id = video.get('videoId')
3785 if not video_id:
3786 continue
3787
3788 yield self._extract_video(video)
3789
3790 if not continuation:
3791 break
3792
3793
3794class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
3795 IE_DESC = 'YouTube Tabs'
3796 _VALID_URL = r'''(?x:
3797 https?://
3798 (?:\w+\.)?
3799 (?:
3800 youtube(?:kids)?\.com|
3801 %(invidious)s
3802 )/
3803 (?:
3804 (?P<channel_type>channel|c|user|browse)/|
3805 (?P<not_channel>
3806 feed/|hashtag/|
3807 (?:playlist|watch)\?.*?\blist=
3808 )|
3809 (?!(?:%(reserved_names)s)\b) # Direct URLs
3810 )
3811 (?P<id>[^/?\#&]+)
3812 )''' % {
3813 'reserved_names': YoutubeBaseInfoExtractor._RESERVED_NAMES,
3814 'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
3815 }
3816 IE_NAME = 'youtube:tab'
3817
3818 _TESTS = [{
3819 'note': 'playlists, multipage',
3820 'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
3821 'playlist_mincount': 94,
3822 'info_dict': {
3823 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
3824 'title': 'Игорь Клейнер - Playlists',
3825 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
3826 'uploader': 'Игорь Клейнер',
3827 'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
3828 },
3829 }, {
3830 'note': 'playlists, multipage, different order',
3831 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
3832 'playlist_mincount': 94,
3833 'info_dict': {
3834 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
3835 'title': 'Игорь Клейнер - Playlists',
3836 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
3837 'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
3838 'uploader': 'Игорь Клейнер',
3839 },
3840 }, {
3841 'note': 'playlists, series',
3842 'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
3843 'playlist_mincount': 5,
3844 'info_dict': {
3845 'id': 'UCYO_jab_esuFRV4b17AJtAw',
3846 'title': '3Blue1Brown - Playlists',
3847 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
3848 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
3849 'uploader': '3Blue1Brown',
3850 },
3851 }, {
3852 'note': 'playlists, singlepage',
3853 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
3854 'playlist_mincount': 4,
3855 'info_dict': {
3856 'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
3857 'title': 'ThirstForScience - Playlists',
3858 'description': 'md5:609399d937ea957b0f53cbffb747a14c',
3859 'uploader': 'ThirstForScience',
3860 'uploader_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
3861 }
3862 }, {
3863 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
3864 'only_matching': True,
3865 }, {
3866 'note': 'basic, single video playlist',
3867 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
3868 'info_dict': {
3869 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
3870 'uploader': 'Sergey M.',
3871 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
3872 'title': 'youtube-dl public playlist',
3873 },
3874 'playlist_count': 1,
3875 }, {
3876 'note': 'empty playlist',
3877 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
3878 'info_dict': {
3879 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
3880 'uploader': 'Sergey M.',
3881 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
3882 'title': 'youtube-dl empty playlist',
3883 },
3884 'playlist_count': 0,
3885 }, {
3886 'note': 'Home tab',
3887 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
3888 'info_dict': {
3889 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3890 'title': 'lex will - Home',
3891 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3892 'uploader': 'lex will',
3893 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3894 },
3895 'playlist_mincount': 2,
3896 }, {
3897 'note': 'Videos tab',
3898 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
3899 'info_dict': {
3900 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3901 'title': 'lex will - Videos',
3902 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3903 'uploader': 'lex will',
3904 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3905 },
3906 'playlist_mincount': 975,
3907 }, {
3908 'note': 'Videos tab, sorted by popular',
3909 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
3910 'info_dict': {
3911 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3912 'title': 'lex will - Videos',
3913 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3914 'uploader': 'lex will',
3915 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3916 },
3917 'playlist_mincount': 199,
3918 }, {
3919 'note': 'Playlists tab',
3920 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
3921 'info_dict': {
3922 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3923 'title': 'lex will - Playlists',
3924 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3925 'uploader': 'lex will',
3926 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3927 },
3928 'playlist_mincount': 17,
3929 }, {
3930 'note': 'Community tab',
3931 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
3932 'info_dict': {
3933 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3934 'title': 'lex will - Community',
3935 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3936 'uploader': 'lex will',
3937 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3938 },
3939 'playlist_mincount': 18,
3940 }, {
3941 'note': 'Channels tab',
3942 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
3943 'info_dict': {
3944 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3945 'title': 'lex will - Channels',
3946 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3947 'uploader': 'lex will',
3948 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3949 },
3950 'playlist_mincount': 12,
3951 }, {
3952 'note': 'Search tab',
3953 'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
3954 'playlist_mincount': 40,
3955 'info_dict': {
3956 'id': 'UCYO_jab_esuFRV4b17AJtAw',
3957 'title': '3Blue1Brown - Search - linear algebra',
3958 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
3959 'uploader': '3Blue1Brown',
3960 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
3961 },
3962 }, {
3963 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
3964 'only_matching': True,
3965 }, {
3966 'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
3967 'only_matching': True,
3968 }, {
3969 'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
3970 'only_matching': True,
3971 }, {
3972 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
3973 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
3974 'info_dict': {
3975 'title': '29C3: Not my department',
3976 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
3977 'uploader': 'Christiaan008',
3978 'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
3979 'description': 'md5:a14dc1a8ef8307a9807fe136a0660268',
3980 },
3981 'playlist_count': 96,
3982 }, {
3983 'note': 'Large playlist',
3984 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
3985 'info_dict': {
3986 'title': 'Uploads from Cauchemar',
3987 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
3988 'uploader': 'Cauchemar',
3989 'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
3990 },
3991 'playlist_mincount': 1123,
3992 }, {
3993 'note': 'even larger playlist, 8832 videos',
3994 'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
3995 'only_matching': True,
3996 }, {
3997 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
3998 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
3999 'info_dict': {
4000 'title': 'Uploads from Interstellar Movie',
4001 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
4002 'uploader': 'Interstellar Movie',
4003 'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
4004 },
4005 'playlist_mincount': 21,
4006 }, {
4007 'note': 'Playlist with "show unavailable videos" button',
4008 'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q',
4009 'info_dict': {
4010 'title': 'Uploads from Phim Siêu Nhân Nhật Bản',
4011 'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q',
4012 'uploader': 'Phim Siêu Nhân Nhật Bản',
4013 'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
4014 },
4015 'playlist_mincount': 200,
4016 }, {
4017 'note': 'Playlist with unavailable videos in page 7',
4018 'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w',
4019 'info_dict': {
4020 'title': 'Uploads from BlankTV',
4021 'id': 'UU8l9frL61Yl5KFOl87nIm2w',
4022 'uploader': 'BlankTV',
4023 'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w',
4024 },
4025 'playlist_mincount': 1000,
4026 }, {
4027 'note': 'https://github.com/ytdl-org/youtube-dl/issues/21844',
4028 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
4029 'info_dict': {
4030 'title': 'Data Analysis with Dr Mike Pound',
4031 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
4032 'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
4033 'uploader': 'Computerphile',
4034 'description': 'md5:7f567c574d13d3f8c0954d9ffee4e487',
4035 },
4036 'playlist_mincount': 11,
4037 }, {
4038 'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
4039 'only_matching': True,
4040 }, {
4041 'note': 'Playlist URL that does not actually serve a playlist',
4042 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
4043 'info_dict': {
4044 'id': 'FqZTN594JQw',
4045 'ext': 'webm',
4046 'title': "Smiley's People 01 detective, Adventure Series, Action",
4047 'uploader': 'STREEM',
4048 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
4049 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
4050 'upload_date': '20150526',
4051 'license': 'Standard YouTube License',
4052 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
4053 'categories': ['People & Blogs'],
4054 'tags': list,
4055 'view_count': int,
4056 'like_count': int,
4057 'dislike_count': int,
4058 },
4059 'params': {
4060 'skip_download': True,
4061 },
4062 'skip': 'This video is not available.',
4063 'add_ie': [YoutubeIE.ie_key()],
4064 }, {
4065 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
4066 'only_matching': True,
4067 }, {
4068 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
4069 'only_matching': True,
4070 }, {
4071 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
4072 'info_dict': {
4073 'id': '3yImotZU3tw', # This will keep changing
4074 'ext': 'mp4',
4075 'title': compat_str,
4076 'uploader': 'Sky News',
4077 'uploader_id': 'skynews',
4078 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
4079 'upload_date': r're:\d{8}',
4080 'description': compat_str,
4081 'categories': ['News & Politics'],
4082 'tags': list,
4083 'like_count': int,
4084 'dislike_count': int,
4085 },
4086 'params': {
4087 'skip_download': True,
4088 },
4089 'expected_warnings': ['Downloading just video ', 'Ignoring subtitle tracks found in '],
4090 }, {
4091 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
4092 'info_dict': {
4093 'id': 'a48o2S1cPoo',
4094 'ext': 'mp4',
4095 'title': 'The Young Turks - Live Main Show',
4096 'uploader': 'The Young Turks',
4097 'uploader_id': 'TheYoungTurks',
4098 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
4099 'upload_date': '20150715',
4100 'license': 'Standard YouTube License',
4101 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
4102 'categories': ['News & Politics'],
4103 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
4104 'like_count': int,
4105 'dislike_count': int,
4106 },
4107 'params': {
4108 'skip_download': True,
4109 },
4110 'only_matching': True,
4111 }, {
4112 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
4113 'only_matching': True,
4114 }, {
4115 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
4116 'only_matching': True,
4117 }, {
4118 'note': 'A channel that is not live. Should raise error',
4119 'url': 'https://www.youtube.com/user/numberphile/live',
4120 'only_matching': True,
4121 }, {
4122 'url': 'https://www.youtube.com/feed/trending',
4123 'only_matching': True,
4124 }, {
4125 'url': 'https://www.youtube.com/feed/library',
4126 'only_matching': True,
4127 }, {
4128 'url': 'https://www.youtube.com/feed/history',
4129 'only_matching': True,
4130 }, {
4131 'url': 'https://www.youtube.com/feed/subscriptions',
4132 'only_matching': True,
4133 }, {
4134 'url': 'https://www.youtube.com/feed/watch_later',
4135 'only_matching': True,
4136 }, {
4137 'note': 'Recommended - redirects to home page.',
4138 'url': 'https://www.youtube.com/feed/recommended',
4139 'only_matching': True,
4140 }, {
4141 'note': 'inline playlist with not always working continuations',
4142 'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
4143 'only_matching': True,
4144 }, {
4145 'url': 'https://www.youtube.com/course',
4146 'only_matching': True,
4147 }, {
4148 'url': 'https://www.youtube.com/zsecurity',
4149 'only_matching': True,
4150 }, {
4151 'url': 'http://www.youtube.com/NASAgovVideo/videos',
4152 'only_matching': True,
4153 }, {
4154 'url': 'https://www.youtube.com/TheYoungTurks/live',
4155 'only_matching': True,
4156 }, {
4157 'url': 'https://www.youtube.com/hashtag/cctv9',
4158 'info_dict': {
4159 'id': 'cctv9',
4160 'title': '#cctv9',
4161 },
4162 'playlist_mincount': 350,
4163 }, {
4164 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
4165 'only_matching': True,
4166 }, {
4167 'note': 'Requires Premium: should request additional YTM-info webpage (and have format 141) for videos in playlist',
4168 'url': 'https://music.youtube.com/playlist?list=PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
4169 'only_matching': True
4170 }, {
4171 'note': '/browse/ should redirect to /channel/',
4172 'url': 'https://music.youtube.com/browse/UC1a8OFewdjuLq6KlF8M_8Ng',
4173 'only_matching': True
4174 }, {
4175 'note': 'VLPL, should redirect to playlist?list=PL...',
4176 'url': 'https://music.youtube.com/browse/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
4177 'info_dict': {
4178 'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
4179 'uploader': 'NoCopyrightSounds',
4180 'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
4181 'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
4182 'title': 'NCS Releases',
4183 },
4184 'playlist_mincount': 166,
4185 }, {
4186 'note': 'Topic, should redirect to playlist?list=UU...',
4187 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
4188 'info_dict': {
4189 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
4190 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
4191 'title': 'Uploads from Royalty Free Music - Topic',
4192 'uploader': 'Royalty Free Music - Topic',
4193 },
4194 'expected_warnings': [
4195 'A channel/user page was given',
4196 'The URL does not have a videos tab',
4197 ],
4198 'playlist_mincount': 101,
4199 }, {
4200 'note': 'Topic without a UU playlist',
4201 'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg',
4202 'info_dict': {
4203 'id': 'UCtFRv9O2AHqOZjjynzrv-xg',
4204 'title': 'UCtFRv9O2AHqOZjjynzrv-xg',
4205 },
4206 'expected_warnings': [
4207 'A channel/user page was given',
4208 'The URL does not have a videos tab',
4209 'Falling back to channel URL',
4210 ],
4211 'playlist_mincount': 9,
4212 }, {
4213 'note': 'Youtube music Album',
4214 'url': 'https://music.youtube.com/browse/MPREb_gTAcphH99wE',
4215 'info_dict': {
4216 'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0',
4217 'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
4218 },
4219 'playlist_count': 50,
4220 }, {
4221 'note': 'unlisted single video playlist',
4222 'url': 'https://www.youtube.com/playlist?list=PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
4223 'info_dict': {
4224 'uploader_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
4225 'uploader': 'colethedj',
4226 'id': 'PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
4227 'title': 'yt-dlp unlisted playlist test',
4228 'availability': 'unlisted'
4229 },
4230 'playlist_count': 1,
4231 }, {
4232 'note': 'API Fallback: Recommended - redirects to home page. Requires visitorData',
4233 'url': 'https://www.youtube.com/feed/recommended',
4234 'info_dict': {
4235 'id': 'recommended',
4236 'title': 'recommended',
4237 },
4238 'playlist_mincount': 50,
4239 'params': {
4240 'skip_download': True,
4241 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
4242 },
4243 }, {
4244 'note': 'API Fallback: /videos tab, sorted by oldest first',
4245 'url': 'https://www.youtube.com/user/theCodyReeder/videos?view=0&sort=da&flow=grid',
4246 'info_dict': {
4247 'id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
4248 'title': 'Cody\'sLab - Videos',
4249 'description': 'md5:d083b7c2f0c67ee7a6c74c3e9b4243fa',
4250 'uploader': 'Cody\'sLab',
4251 'uploader_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
4252 },
4253 'playlist_mincount': 650,
4254 'params': {
4255 'skip_download': True,
4256 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
4257 },
4258 }, {
4259 'note': 'API Fallback: Topic, should redirect to playlist?list=UU...',
4260 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
4261 'info_dict': {
4262 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
4263 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
4264 'title': 'Uploads from Royalty Free Music - Topic',
4265 'uploader': 'Royalty Free Music - Topic',
4266 },
4267 'expected_warnings': [
4268 'A channel/user page was given',
4269 'The URL does not have a videos tab',
4270 ],
4271 'playlist_mincount': 101,
4272 'params': {
4273 'skip_download': True,
4274 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
4275 },
4276 }]
4277
4278 @classmethod
4279 def suitable(cls, url):
4280 return False if YoutubeIE.suitable(url) else super(
4281 YoutubeTabIE, cls).suitable(url)
4282
4283 def _real_extract(self, url):
4284 url, smuggled_data = unsmuggle_url(url, {})
4285 if self.is_music_url(url):
4286 smuggled_data['is_music_url'] = True
4287 info_dict = self.__real_extract(url, smuggled_data)
4288 if info_dict.get('entries'):
4289 info_dict['entries'] = self._smuggle_data(info_dict['entries'], smuggled_data)
4290 return info_dict
4291
4292 _url_re = re.compile(r'(?P<pre>%s)(?(channel_type)(?P<tab>/\w+))?(?P<post>.*)$' % _VALID_URL)
4293
4294 def __real_extract(self, url, smuggled_data):
4295 item_id = self._match_id(url)
4296 url = compat_urlparse.urlunparse(
4297 compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
4298 compat_opts = self.get_param('compat_opts', [])
4299
4300 def get_mobj(url):
4301 mobj = self._url_re.match(url).groupdict()
4302 mobj.update((k, '') for k, v in mobj.items() if v is None)
4303 return mobj
4304
4305 mobj = get_mobj(url)
4306 # Youtube returns incomplete data if tabname is not lower case
4307 pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
4308 if is_channel:
4309 if smuggled_data.get('is_music_url'):
4310 if item_id[:2] == 'VL':
4311 # Youtube music VL channels have an equivalent playlist
4312 item_id = item_id[2:]
4313 pre, tab, post, is_channel = 'https://www.youtube.com/playlist?list=%s' % item_id, '', '', False
4314 elif item_id[:2] == 'MP':
4315 # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
4316 mdata = self._extract_tab_endpoint(
4317 'https://music.youtube.com/channel/%s' % item_id, item_id, default_client='web_music')
4318 murl = traverse_obj(
4319 mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'), get_all=False, expected_type=compat_str)
4320 if not murl:
4321 raise ExtractorError('Failed to resolve album to playlist.')
4322 return self.url_result(murl, ie=YoutubeTabIE.ie_key())
4323 elif mobj['channel_type'] == 'browse':
4324 # Youtube music /browse/ should be changed to /channel/
4325 pre = 'https://www.youtube.com/channel/%s' % item_id
4326 if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
4327 # Home URLs should redirect to /videos/
4328 self.report_warning(
4329 'A channel/user page was given. All the channel\'s videos will be downloaded. '
4330 'To download only the videos in the home page, add a "/featured" to the URL')
4331 tab = '/videos'
4332
4333 url = ''.join((pre, tab, post))
4334 mobj = get_mobj(url)
4335
4336 # Handle both video/playlist URLs
4337 qs = parse_qs(url)
4338 video_id = qs.get('v', [None])[0]
4339 playlist_id = qs.get('list', [None])[0]
4340
4341 if not video_id and mobj['not_channel'].startswith('watch'):
4342 if not playlist_id:
4343 # If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
4344 raise ExtractorError('Unable to recognize tab page')
4345 # Common mistake: https://www.youtube.com/watch?list=playlist_id
4346 self.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
4347 url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
4348 mobj = get_mobj(url)
4349
4350 if video_id and playlist_id:
4351 if self.get_param('noplaylist'):
4352 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
4353 return self.url_result(f'https://www.youtube.com/watch?v={video_id}', ie=YoutubeIE.ie_key(), video_id=video_id)
4354 self.to_screen('Downloading playlist %s; add --no-playlist to just download video %s' % (playlist_id, video_id))
4355
4356 data, ytcfg = self._extract_data(url, item_id)
4357
4358 tabs = try_get(
4359 data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
4360 if tabs:
4361 selected_tab = self._extract_selected_tab(tabs)
4362 tab_name = selected_tab.get('title', '')
4363 if 'no-youtube-channel-redirect' not in compat_opts:
4364 if mobj['tab'] == '/live':
4365 # Live tab should have redirected to the video
4366 raise ExtractorError('The channel is not currently live', expected=True)
4367 if mobj['tab'] == '/videos' and tab_name.lower() != mobj['tab'][1:]:
4368 if not mobj['not_channel'] and item_id[:2] == 'UC':
4369 # Topic channels don't have /videos. Use the equivalent playlist instead
4370 self.report_warning('The URL does not have a %s tab. Trying to redirect to playlist UU%s instead' % (mobj['tab'][1:], item_id[2:]))
4371 pl_id = 'UU%s' % item_id[2:]
4372 pl_url = 'https://www.youtube.com/playlist?list=%s%s' % (pl_id, mobj['post'])
4373 try:
4374 data, ytcfg, item_id, url = *self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True), pl_id, pl_url
4375 except ExtractorError:
4376 self.report_warning('The playlist gave error. Falling back to channel URL')
4377 else:
4378 self.report_warning('The URL does not have a %s tab. %s is being downloaded instead' % (mobj['tab'][1:], tab_name))
4379
4380 self.write_debug('Final URL: %s' % url)
4381
4382 # YouTube sometimes provides a button to reload playlist with unavailable videos.
4383 if 'no-youtube-unavailable-videos' not in compat_opts:
4384 data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
4385 self._extract_and_report_alerts(data, only_once=True)
4386 tabs = try_get(
4387 data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
4388 if tabs:
4389 return self._extract_from_tabs(item_id, ytcfg, data, tabs)
4390
4391 playlist = try_get(
4392 data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
4393 if playlist:
4394 return self._extract_from_playlist(item_id, url, data, playlist, ytcfg)
4395
4396 video_id = try_get(
4397 data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
4398 compat_str) or video_id
4399 if video_id:
4400 if mobj['tab'] != '/live': # live tab is expected to redirect to video
4401 self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
4402 return self.url_result(f'https://www.youtube.com/watch?v={video_id}', ie=YoutubeIE.ie_key(), video_id=video_id)
4403
4404 raise ExtractorError('Unable to recognize tab page')
4405
4406
4407class YoutubePlaylistIE(InfoExtractor):
4408 IE_DESC = 'YouTube playlists'
4409 _VALID_URL = r'''(?x)(?:
4410 (?:https?://)?
4411 (?:\w+\.)?
4412 (?:
4413 (?:
4414 youtube(?:kids)?\.com|
4415 %(invidious)s
4416 )
4417 /.*?\?.*?\blist=
4418 )?
4419 (?P<id>%(playlist_id)s)
4420 )''' % {
4421 'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE,
4422 'invidious': '|'.join(YoutubeBaseInfoExtractor._INVIDIOUS_SITES),
4423 }
4424 IE_NAME = 'youtube:playlist'
4425 _TESTS = [{
4426 'note': 'issue #673',
4427 'url': 'PLBB231211A4F62143',
4428 'info_dict': {
4429 'title': '[OLD]Team Fortress 2 (Class-based LP)',
4430 'id': 'PLBB231211A4F62143',
4431 'uploader': 'Wickydoo',
4432 'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
4433 'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2',
4434 },
4435 'playlist_mincount': 29,
4436 }, {
4437 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
4438 'info_dict': {
4439 'title': 'YDL_safe_search',
4440 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
4441 },
4442 'playlist_count': 2,
4443 'skip': 'This playlist is private',
4444 }, {
4445 'note': 'embedded',
4446 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
4447 'playlist_count': 4,
4448 'info_dict': {
4449 'title': 'JODA15',
4450 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
4451 'uploader': 'milan',
4452 'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
4453 }
4454 }, {
4455 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
4456 'playlist_mincount': 654,
4457 'info_dict': {
4458 'title': '2018 Chinese New Singles (11/6 updated)',
4459 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
4460 'uploader': 'LBK',
4461 'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
4462 'description': 'md5:da521864744d60a198e3a88af4db0d9d',
4463 }
4464 }, {
4465 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
4466 'only_matching': True,
4467 }, {
4468 # music album playlist
4469 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
4470 'only_matching': True,
4471 }]
4472
4473 @classmethod
4474 def suitable(cls, url):
4475 if YoutubeTabIE.suitable(url):
4476 return False
4477 from ..utils import parse_qs
4478 qs = parse_qs(url)
4479 if qs.get('v', [None])[0]:
4480 return False
4481 return super(YoutubePlaylistIE, cls).suitable(url)
4482
4483 def _real_extract(self, url):
4484 playlist_id = self._match_id(url)
4485 is_music_url = YoutubeBaseInfoExtractor.is_music_url(url)
4486 url = update_url_query(
4487 'https://www.youtube.com/playlist',
4488 parse_qs(url) or {'list': playlist_id})
4489 if is_music_url:
4490 url = smuggle_url(url, {'is_music_url': True})
4491 return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
4492
4493
4494class YoutubeYtBeIE(InfoExtractor):
4495 IE_DESC = 'youtu.be'
4496 _VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
4497 _TESTS = [{
4498 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
4499 'info_dict': {
4500 'id': 'yeWKywCrFtk',
4501 'ext': 'mp4',
4502 'title': 'Small Scale Baler and Braiding Rugs',
4503 'uploader': 'Backus-Page House Museum',
4504 'uploader_id': 'backuspagemuseum',
4505 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
4506 'upload_date': '20161008',
4507 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
4508 'categories': ['Nonprofits & Activism'],
4509 'tags': list,
4510 'like_count': int,
4511 'dislike_count': int,
4512 },
4513 'params': {
4514 'noplaylist': True,
4515 'skip_download': True,
4516 },
4517 }, {
4518 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
4519 'only_matching': True,
4520 }]
4521
4522 def _real_extract(self, url):
4523 mobj = self._match_valid_url(url)
4524 video_id = mobj.group('id')
4525 playlist_id = mobj.group('playlist_id')
4526 return self.url_result(
4527 update_url_query('https://www.youtube.com/watch', {
4528 'v': video_id,
4529 'list': playlist_id,
4530 'feature': 'youtu.be',
4531 }), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
4532
4533
4534class YoutubeYtUserIE(InfoExtractor):
4535 IE_DESC = 'YouTube user videos; "ytuser:" prefix'
4536 _VALID_URL = r'ytuser:(?P<id>.+)'
4537 _TESTS = [{
4538 'url': 'ytuser:phihag',
4539 'only_matching': True,
4540 }]
4541
4542 def _real_extract(self, url):
4543 user_id = self._match_id(url)
4544 return self.url_result(
4545 'https://www.youtube.com/user/%s/videos' % user_id,
4546 ie=YoutubeTabIE.ie_key(), video_id=user_id)
4547
4548
4549class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
4550 IE_NAME = 'youtube:favorites'
4551 IE_DESC = 'YouTube liked videos; ":ytfav" keyword (requires cookies)'
4552 _VALID_URL = r':ytfav(?:ou?rite)?s?'
4553 _LOGIN_REQUIRED = True
4554 _TESTS = [{
4555 'url': ':ytfav',
4556 'only_matching': True,
4557 }, {
4558 'url': ':ytfavorites',
4559 'only_matching': True,
4560 }]
4561
4562 def _real_extract(self, url):
4563 return self.url_result(
4564 'https://www.youtube.com/playlist?list=LL',
4565 ie=YoutubeTabIE.ie_key())
4566
4567
4568class YoutubeSearchIE(YoutubeTabBaseInfoExtractor, SearchInfoExtractor):
4569 IE_DESC = 'YouTube search'
4570 IE_NAME = 'youtube:search'
4571 _SEARCH_KEY = 'ytsearch'
4572 _SEARCH_PARAMS = None
4573 _TESTS = []
4574
4575class YoutubeSearchDateIE(SearchInfoExtractor, YoutubeTabBaseInfoExtractor):
4576 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
4577 _SEARCH_KEY = 'ytsearchdate'
4578 IE_DESC = 'YouTube search, newest videos first'
4579 _SEARCH_PARAMS = 'CAI%3D'
4580
4581
4582class YoutubeSearchURLIE(YoutubeTabBaseInfoExtractor):
4583 IE_DESC = 'YouTube search URLs with sorting and filter support'
4584 IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
4585 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?:[^&]+)(?:[&]|$)'
4586 _TESTS = [{
4587 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
4588 'playlist_mincount': 5,
4589 'info_dict': {
4590 'id': 'youtube-dl test video',
4591 'title': 'youtube-dl test video',
4592 }
4593 }, {
4594 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
4595 'only_matching': True,
4596 }]
4597
4598 def _real_extract(self, url):
4599 qs = parse_qs(url)
4600 query = (qs.get('search_query') or qs.get('q'))[0]
4601 return self.playlist_result(self._search_results(query, qs.get('sp', (None,))[0]), query, query)
4602
4603
4604class YoutubeFeedsInfoExtractor(YoutubeTabIE):
4605 """
4606 Base class for feed extractors
4607 Subclasses must define the _FEED_NAME property.
4608 """
4609 _LOGIN_REQUIRED = True
4610 _TESTS = []
4611
4612 @property
4613 def IE_NAME(self):
4614 return 'youtube:%s' % self._FEED_NAME
4615
4616 def _real_extract(self, url):
4617 return self.url_result(
4618 'https://www.youtube.com/feed/%s' % self._FEED_NAME,
4619 ie=YoutubeTabIE.ie_key())
4620
4621
4622class YoutubeWatchLaterIE(InfoExtractor):
4623 IE_NAME = 'youtube:watchlater'
4624 IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
4625 _VALID_URL = r':ytwatchlater'
4626 _TESTS = [{
4627 'url': ':ytwatchlater',
4628 'only_matching': True,
4629 }]
4630
4631 def _real_extract(self, url):
4632 return self.url_result(
4633 'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
4634
4635
4636class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
4637 IE_DESC = 'YouTube recommended videos; ":ytrec" keyword'
4638 _VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
4639 _FEED_NAME = 'recommended'
4640 _LOGIN_REQUIRED = False
4641 _TESTS = [{
4642 'url': ':ytrec',
4643 'only_matching': True,
4644 }, {
4645 'url': ':ytrecommended',
4646 'only_matching': True,
4647 }, {
4648 'url': 'https://youtube.com',
4649 'only_matching': True,
4650 }]
4651
4652
4653class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
4654 IE_DESC = 'YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)'
4655 _VALID_URL = r':ytsub(?:scription)?s?'
4656 _FEED_NAME = 'subscriptions'
4657 _TESTS = [{
4658 'url': ':ytsubs',
4659 'only_matching': True,
4660 }, {
4661 'url': ':ytsubscriptions',
4662 'only_matching': True,
4663 }]
4664
4665
4666class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
4667 IE_DESC = 'Youtube watch history; ":ythis" keyword (requires cookies)'
4668 _VALID_URL = r':ythis(?:tory)?'
4669 _FEED_NAME = 'history'
4670 _TESTS = [{
4671 'url': ':ythistory',
4672 'only_matching': True,
4673 }]
4674
4675
4676class YoutubeTruncatedURLIE(InfoExtractor):
4677 IE_NAME = 'youtube:truncated_url'
4678 IE_DESC = False # Do not list
4679 _VALID_URL = r'''(?x)
4680 (?:https?://)?
4681 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
4682 (?:watch\?(?:
4683 feature=[a-z_]+|
4684 annotation_id=annotation_[^&]+|
4685 x-yt-cl=[0-9]+|
4686 hl=[^&]*|
4687 t=[0-9]+
4688 )?
4689 |
4690 attribution_link\?a=[^&]+
4691 )
4692 $
4693 '''
4694
4695 _TESTS = [{
4696 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
4697 'only_matching': True,
4698 }, {
4699 'url': 'https://www.youtube.com/watch?',
4700 'only_matching': True,
4701 }, {
4702 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
4703 'only_matching': True,
4704 }, {
4705 'url': 'https://www.youtube.com/watch?feature=foo',
4706 'only_matching': True,
4707 }, {
4708 'url': 'https://www.youtube.com/watch?hl=en-GB',
4709 'only_matching': True,
4710 }, {
4711 'url': 'https://www.youtube.com/watch?t=2372',
4712 'only_matching': True,
4713 }]
4714
4715 def _real_extract(self, url):
4716 raise ExtractorError(
4717 'Did you forget to quote the URL? Remember that & is a meta '
4718 'character in most shells, so you want to put the URL in quotes, '
4719 'like youtube-dl '
4720 '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
4721 ' or simply youtube-dl BaW_jenozKc .',
4722 expected=True)
4723
4724
4725class YoutubeClipIE(InfoExtractor):
4726 IE_NAME = 'youtube:clip'
4727 IE_DESC = False # Do not list
4728 _VALID_URL = r'https?://(?:www\.)?youtube\.com/clip/'
4729
4730 def _real_extract(self, url):
4731 self.report_warning('YouTube clips are not currently supported. The entire video will be downloaded instead')
4732 return self.url_result(url, 'Generic')
4733
4734
4735class YoutubeTruncatedIDIE(InfoExtractor):
4736 IE_NAME = 'youtube:truncated_id'
4737 IE_DESC = False # Do not list
4738 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
4739
4740 _TESTS = [{
4741 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
4742 'only_matching': True,
4743 }]
4744
4745 def _real_extract(self, url):
4746 video_id = self._match_id(url)
4747 raise ExtractorError(
4748 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
4749 expected=True)