]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/youtube.py
[youtube] Fix throttling by decrypting n-sig (#1437)
[yt-dlp.git] / yt_dlp / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5 import base64
6 import calendar
7 import copy
8 import datetime
9 import hashlib
10 import itertools
11 import json
12 import os.path
13 import random
14 import re
15 import time
16 import traceback
17
18 from .common import InfoExtractor, SearchInfoExtractor
19 from ..compat import (
20 compat_chr,
21 compat_HTTPError,
22 compat_parse_qs,
23 compat_str,
24 compat_urllib_parse_unquote_plus,
25 compat_urllib_parse_urlencode,
26 compat_urllib_parse_urlparse,
27 compat_urlparse,
28 )
29 from ..jsinterp import JSInterpreter
30 from ..utils import (
31 bytes_to_intlist,
32 clean_html,
33 datetime_from_str,
34 dict_get,
35 error_to_compat_str,
36 ExtractorError,
37 float_or_none,
38 format_field,
39 int_or_none,
40 intlist_to_bytes,
41 is_html,
42 mimetype2ext,
43 network_exceptions,
44 orderedSet,
45 parse_codecs,
46 parse_count,
47 parse_duration,
48 parse_iso8601,
49 parse_qs,
50 qualities,
51 remove_end,
52 remove_start,
53 smuggle_url,
54 str_or_none,
55 str_to_int,
56 traverse_obj,
57 try_get,
58 unescapeHTML,
59 unified_strdate,
60 unsmuggle_url,
61 update_url_query,
62 url_or_none,
63 urljoin,
64 variadic,
65 )
66
67
68 # any clients starting with _ cannot be explicity requested by the user
69 INNERTUBE_CLIENTS = {
70 'web': {
71 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
72 'INNERTUBE_CONTEXT': {
73 'client': {
74 'clientName': 'WEB',
75 'clientVersion': '2.20210622.10.00',
76 }
77 },
78 'INNERTUBE_CONTEXT_CLIENT_NAME': 1
79 },
80 'web_embedded': {
81 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
82 'INNERTUBE_CONTEXT': {
83 'client': {
84 'clientName': 'WEB_EMBEDDED_PLAYER',
85 'clientVersion': '1.20210620.0.1',
86 },
87 },
88 'INNERTUBE_CONTEXT_CLIENT_NAME': 56
89 },
90 'web_music': {
91 'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
92 'INNERTUBE_HOST': 'music.youtube.com',
93 'INNERTUBE_CONTEXT': {
94 'client': {
95 'clientName': 'WEB_REMIX',
96 'clientVersion': '1.20210621.00.00',
97 }
98 },
99 'INNERTUBE_CONTEXT_CLIENT_NAME': 67,
100 },
101 'web_creator': {
102 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
103 'INNERTUBE_CONTEXT': {
104 'client': {
105 'clientName': 'WEB_CREATOR',
106 'clientVersion': '1.20210621.00.00',
107 }
108 },
109 'INNERTUBE_CONTEXT_CLIENT_NAME': 62,
110 },
111 'android': {
112 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
113 'INNERTUBE_CONTEXT': {
114 'client': {
115 'clientName': 'ANDROID',
116 'clientVersion': '16.20',
117 }
118 },
119 'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
120 'REQUIRE_JS_PLAYER': False
121 },
122 'android_embedded': {
123 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
124 'INNERTUBE_CONTEXT': {
125 'client': {
126 'clientName': 'ANDROID_EMBEDDED_PLAYER',
127 'clientVersion': '16.20',
128 },
129 },
130 'INNERTUBE_CONTEXT_CLIENT_NAME': 55,
131 'REQUIRE_JS_PLAYER': False
132 },
133 'android_music': {
134 'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
135 'INNERTUBE_HOST': 'music.youtube.com',
136 'INNERTUBE_CONTEXT': {
137 'client': {
138 'clientName': 'ANDROID_MUSIC',
139 'clientVersion': '4.32',
140 }
141 },
142 'INNERTUBE_CONTEXT_CLIENT_NAME': 21,
143 'REQUIRE_JS_PLAYER': False
144 },
145 'android_creator': {
146 'INNERTUBE_CONTEXT': {
147 'client': {
148 'clientName': 'ANDROID_CREATOR',
149 'clientVersion': '21.24.100',
150 },
151 },
152 'INNERTUBE_CONTEXT_CLIENT_NAME': 14,
153 'REQUIRE_JS_PLAYER': False
154 },
155 # ios has HLS live streams
156 # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680
157 'ios': {
158 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
159 'INNERTUBE_CONTEXT': {
160 'client': {
161 'clientName': 'IOS',
162 'clientVersion': '16.20',
163 }
164 },
165 'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
166 'REQUIRE_JS_PLAYER': False
167 },
168 'ios_embedded': {
169 'INNERTUBE_API_KEY': 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8',
170 'INNERTUBE_CONTEXT': {
171 'client': {
172 'clientName': 'IOS_MESSAGES_EXTENSION',
173 'clientVersion': '16.20',
174 },
175 },
176 'INNERTUBE_CONTEXT_CLIENT_NAME': 66,
177 'REQUIRE_JS_PLAYER': False
178 },
179 'ios_music': {
180 'INNERTUBE_API_KEY': 'AIzaSyDK3iBpDP9nHVTk2qL73FLJICfOC3c51Og',
181 'INNERTUBE_HOST': 'music.youtube.com',
182 'INNERTUBE_CONTEXT': {
183 'client': {
184 'clientName': 'IOS_MUSIC',
185 'clientVersion': '4.32',
186 },
187 },
188 'INNERTUBE_CONTEXT_CLIENT_NAME': 26,
189 'REQUIRE_JS_PLAYER': False
190 },
191 'ios_creator': {
192 'INNERTUBE_CONTEXT': {
193 'client': {
194 'clientName': 'IOS_CREATOR',
195 'clientVersion': '21.24.100',
196 },
197 },
198 'INNERTUBE_CONTEXT_CLIENT_NAME': 15,
199 'REQUIRE_JS_PLAYER': False
200 },
201 # mweb has 'ultralow' formats
202 # See: https://github.com/yt-dlp/yt-dlp/pull/557
203 'mweb': {
204 'INNERTUBE_API_KEY': 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8',
205 'INNERTUBE_CONTEXT': {
206 'client': {
207 'clientName': 'MWEB',
208 'clientVersion': '2.20210721.07.00',
209 }
210 },
211 'INNERTUBE_CONTEXT_CLIENT_NAME': 2
212 },
213 }
214
215
216 def build_innertube_clients():
217 third_party = {
218 'embedUrl': 'https://google.com', # Can be any valid URL
219 }
220 base_clients = ('android', 'web', 'ios', 'mweb')
221 priority = qualities(base_clients[::-1])
222
223 for client, ytcfg in tuple(INNERTUBE_CLIENTS.items()):
224 ytcfg.setdefault('INNERTUBE_API_KEY', 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8')
225 ytcfg.setdefault('INNERTUBE_HOST', 'www.youtube.com')
226 ytcfg.setdefault('REQUIRE_JS_PLAYER', True)
227 ytcfg['INNERTUBE_CONTEXT']['client'].setdefault('hl', 'en')
228 ytcfg['priority'] = 10 * priority(client.split('_', 1)[0])
229
230 if client in base_clients:
231 INNERTUBE_CLIENTS[f'{client}_agegate'] = agegate_ytcfg = copy.deepcopy(ytcfg)
232 agegate_ytcfg['INNERTUBE_CONTEXT']['client']['clientScreen'] = 'EMBED'
233 agegate_ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = third_party
234 agegate_ytcfg['priority'] -= 1
235 elif client.endswith('_embedded'):
236 ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = third_party
237 ytcfg['priority'] -= 2
238 else:
239 ytcfg['priority'] -= 3
240
241
242 build_innertube_clients()
243
244
245 class YoutubeBaseInfoExtractor(InfoExtractor):
246 """Provide base functions for Youtube extractors"""
247
248 _RESERVED_NAMES = (
249 r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|'
250 r'shorts|movies|results|shared|hashtag|trending|feed|feeds|'
251 r'browse|oembed|get_video_info|iframe_api|s/player|'
252 r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
253
254 _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
255
256 _NETRC_MACHINE = 'youtube'
257
258 # If True it will raise an error if no login info is provided
259 _LOGIN_REQUIRED = False
260
261 def _login(self):
262 """
263 Attempt to log in to YouTube.
264 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
265 """
266
267 if (self._LOGIN_REQUIRED
268 and self.get_param('cookiefile') is None
269 and self.get_param('cookiesfrombrowser') is None):
270 self.raise_login_required(
271 'Login details are needed to download this content', method='cookies')
272 username, password = self._get_login_info()
273 if username:
274 self.report_warning(f'Cannot login to YouTube using username and password. {self._LOGIN_HINTS["cookies"]}')
275
276 def _initialize_consent(self):
277 cookies = self._get_cookies('https://www.youtube.com/')
278 if cookies.get('__Secure-3PSID'):
279 return
280 consent_id = None
281 consent = cookies.get('CONSENT')
282 if consent:
283 if 'YES' in consent.value:
284 return
285 consent_id = self._search_regex(
286 r'PENDING\+(\d+)', consent.value, 'consent', default=None)
287 if not consent_id:
288 consent_id = random.randint(100, 999)
289 self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
290
291 def _real_initialize(self):
292 self._initialize_consent()
293 self._login()
294
295 _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
296 _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
297 _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
298
299 def _get_default_ytcfg(self, client='web'):
300 return copy.deepcopy(INNERTUBE_CLIENTS[client])
301
302 def _get_innertube_host(self, client='web'):
303 return INNERTUBE_CLIENTS[client]['INNERTUBE_HOST']
304
305 def _ytcfg_get_safe(self, ytcfg, getter, expected_type=None, default_client='web'):
306 # try_get but with fallback to default ytcfg client values when present
307 _func = lambda y: try_get(y, getter, expected_type)
308 return _func(ytcfg) or _func(self._get_default_ytcfg(default_client))
309
310 def _extract_client_name(self, ytcfg, default_client='web'):
311 return self._ytcfg_get_safe(
312 ytcfg, (lambda x: x['INNERTUBE_CLIENT_NAME'],
313 lambda x: x['INNERTUBE_CONTEXT']['client']['clientName']), compat_str, default_client)
314
315 def _extract_client_version(self, ytcfg, default_client='web'):
316 return self._ytcfg_get_safe(
317 ytcfg, (lambda x: x['INNERTUBE_CLIENT_VERSION'],
318 lambda x: x['INNERTUBE_CONTEXT']['client']['clientVersion']), compat_str, default_client)
319
320 def _extract_api_key(self, ytcfg=None, default_client='web'):
321 return self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_API_KEY'], compat_str, default_client)
322
323 def _extract_context(self, ytcfg=None, default_client='web'):
324 _get_context = lambda y: try_get(y, lambda x: x['INNERTUBE_CONTEXT'], dict)
325 context = _get_context(ytcfg)
326 if context:
327 return context
328
329 context = _get_context(self._get_default_ytcfg(default_client))
330 if not ytcfg:
331 return context
332
333 # Recreate the client context (required)
334 context['client'].update({
335 'clientVersion': self._extract_client_version(ytcfg, default_client),
336 'clientName': self._extract_client_name(ytcfg, default_client),
337 })
338 visitor_data = try_get(ytcfg, lambda x: x['VISITOR_DATA'], compat_str)
339 if visitor_data:
340 context['client']['visitorData'] = visitor_data
341 return context
342
343 _SAPISID = None
344
345 def _generate_sapisidhash_header(self, origin='https://www.youtube.com'):
346 time_now = round(time.time())
347 if self._SAPISID is None:
348 yt_cookies = self._get_cookies('https://www.youtube.com')
349 # Sometimes SAPISID cookie isn't present but __Secure-3PAPISID is.
350 # See: https://github.com/yt-dlp/yt-dlp/issues/393
351 sapisid_cookie = dict_get(
352 yt_cookies, ('__Secure-3PAPISID', 'SAPISID'))
353 if sapisid_cookie and sapisid_cookie.value:
354 self._SAPISID = sapisid_cookie.value
355 self.write_debug('Extracted SAPISID cookie')
356 # SAPISID cookie is required if not already present
357 if not yt_cookies.get('SAPISID'):
358 self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie')
359 self._set_cookie(
360 '.youtube.com', 'SAPISID', self._SAPISID, secure=True, expire_time=time_now + 3600)
361 else:
362 self._SAPISID = False
363 if not self._SAPISID:
364 return None
365 # SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323
366 sapisidhash = hashlib.sha1(
367 f'{time_now} {self._SAPISID} {origin}'.encode('utf-8')).hexdigest()
368 return f'SAPISIDHASH {time_now}_{sapisidhash}'
369
370 def _call_api(self, ep, query, video_id, fatal=True, headers=None,
371 note='Downloading API JSON', errnote='Unable to download API page',
372 context=None, api_key=None, api_hostname=None, default_client='web'):
373
374 data = {'context': context} if context else {'context': self._extract_context(default_client=default_client)}
375 data.update(query)
376 real_headers = self.generate_api_headers(default_client=default_client)
377 real_headers.update({'content-type': 'application/json'})
378 if headers:
379 real_headers.update(headers)
380 return self._download_json(
381 'https://%s/youtubei/v1/%s' % (api_hostname or self._get_innertube_host(default_client), ep),
382 video_id=video_id, fatal=fatal, note=note, errnote=errnote,
383 data=json.dumps(data).encode('utf8'), headers=real_headers,
384 query={'key': api_key or self._extract_api_key()})
385
386 def extract_yt_initial_data(self, item_id, webpage, fatal=True):
387 data = self._search_regex(
388 (r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
389 self._YT_INITIAL_DATA_RE), webpage, 'yt initial data', fatal=fatal)
390 if data:
391 return self._parse_json(data, item_id, fatal=fatal)
392
393 @staticmethod
394 def _extract_session_index(*data):
395 """
396 Index of current account in account list.
397 See: https://github.com/yt-dlp/yt-dlp/pull/519
398 """
399 for ytcfg in data:
400 session_index = int_or_none(try_get(ytcfg, lambda x: x['SESSION_INDEX']))
401 if session_index is not None:
402 return session_index
403
404 # Deprecated?
405 def _extract_identity_token(self, ytcfg=None, webpage=None):
406 if ytcfg:
407 token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
408 if token:
409 return token
410 if webpage:
411 return self._search_regex(
412 r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
413 'identity token', default=None, fatal=False)
414
415 @staticmethod
416 def _extract_account_syncid(*args):
417 """
418 Extract syncId required to download private playlists of secondary channels
419 @params response and/or ytcfg
420 """
421 for data in args:
422 # ytcfg includes channel_syncid if on secondary channel
423 delegated_sid = try_get(data, lambda x: x['DELEGATED_SESSION_ID'], compat_str)
424 if delegated_sid:
425 return delegated_sid
426 sync_ids = (try_get(
427 data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
428 lambda x: x['DATASYNC_ID']), compat_str) or '').split('||')
429 if len(sync_ids) >= 2 and sync_ids[1]:
430 # datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
431 # and just "user_syncid||" for primary channel. We only want the channel_syncid
432 return sync_ids[0]
433
434 @staticmethod
435 def _extract_visitor_data(*args):
436 """
437 Extracts visitorData from an API response or ytcfg
438 Appears to be used to track session state
439 """
440 return traverse_obj(
441 args, (..., ('VISITOR_DATA', ('INNERTUBE_CONTEXT', 'client', 'visitorData'), ('responseContext', 'visitorData'))),
442 expected_type=compat_str, get_all=False)
443
444 @property
445 def is_authenticated(self):
446 return bool(self._generate_sapisidhash_header())
447
448 def extract_ytcfg(self, video_id, webpage):
449 if not webpage:
450 return {}
451 return self._parse_json(
452 self._search_regex(
453 r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
454 default='{}'), video_id, fatal=False) or {}
455
456 def generate_api_headers(
457 self, *, ytcfg=None, account_syncid=None, session_index=None,
458 visitor_data=None, identity_token=None, api_hostname=None, default_client='web'):
459
460 origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(default_client))
461 headers = {
462 'X-YouTube-Client-Name': compat_str(
463 self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)),
464 'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client),
465 'Origin': origin,
466 'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg),
467 'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg),
468 'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg)
469 }
470 if session_index is None:
471 session_index = self._extract_session_index(ytcfg)
472 if account_syncid or session_index is not None:
473 headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0
474
475 auth = self._generate_sapisidhash_header(origin)
476 if auth is not None:
477 headers['Authorization'] = auth
478 headers['X-Origin'] = origin
479 return {h: v for h, v in headers.items() if v is not None}
480
481 @staticmethod
482 def _build_api_continuation_query(continuation, ctp=None):
483 query = {
484 'continuation': continuation
485 }
486 # TODO: Inconsistency with clickTrackingParams.
487 # Currently we have a fixed ctp contained within context (from ytcfg)
488 # and a ctp in root query for continuation.
489 if ctp:
490 query['clickTracking'] = {'clickTrackingParams': ctp}
491 return query
492
493 @classmethod
494 def _extract_next_continuation_data(cls, renderer):
495 next_continuation = try_get(
496 renderer, (lambda x: x['continuations'][0]['nextContinuationData'],
497 lambda x: x['continuation']['reloadContinuationData']), dict)
498 if not next_continuation:
499 return
500 continuation = next_continuation.get('continuation')
501 if not continuation:
502 return
503 ctp = next_continuation.get('clickTrackingParams')
504 return cls._build_api_continuation_query(continuation, ctp)
505
506 @classmethod
507 def _extract_continuation_ep_data(cls, continuation_ep: dict):
508 if isinstance(continuation_ep, dict):
509 continuation = try_get(
510 continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
511 if not continuation:
512 return
513 ctp = continuation_ep.get('clickTrackingParams')
514 return cls._build_api_continuation_query(continuation, ctp)
515
516 @classmethod
517 def _extract_continuation(cls, renderer):
518 next_continuation = cls._extract_next_continuation_data(renderer)
519 if next_continuation:
520 return next_continuation
521
522 contents = []
523 for key in ('contents', 'items'):
524 contents.extend(try_get(renderer, lambda x: x[key], list) or [])
525
526 for content in contents:
527 if not isinstance(content, dict):
528 continue
529 continuation_ep = try_get(
530 content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'],
531 lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']),
532 dict)
533 continuation = cls._extract_continuation_ep_data(continuation_ep)
534 if continuation:
535 return continuation
536
537 @classmethod
538 def _extract_alerts(cls, data):
539 for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
540 if not isinstance(alert_dict, dict):
541 continue
542 for alert in alert_dict.values():
543 alert_type = alert.get('type')
544 if not alert_type:
545 continue
546 message = cls._get_text(alert, 'text')
547 if message:
548 yield alert_type, message
549
550 def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False):
551 errors = []
552 warnings = []
553 for alert_type, alert_message in alerts:
554 if alert_type.lower() == 'error' and fatal:
555 errors.append([alert_type, alert_message])
556 else:
557 warnings.append([alert_type, alert_message])
558
559 for alert_type, alert_message in (warnings + errors[:-1]):
560 self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message), only_once=only_once)
561 if errors:
562 raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
563
564 def _extract_and_report_alerts(self, data, *args, **kwargs):
565 return self._report_alerts(self._extract_alerts(data), *args, **kwargs)
566
567 def _extract_badges(self, renderer: dict):
568 badges = set()
569 for badge in try_get(renderer, lambda x: x['badges'], list) or []:
570 label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label'], compat_str)
571 if label:
572 badges.add(label.lower())
573 return badges
574
575 @staticmethod
576 def _get_text(data, *path_list, max_runs=None):
577 for path in path_list or [None]:
578 if path is None:
579 obj = [data]
580 else:
581 obj = traverse_obj(data, path, default=[])
582 if not any(key is ... or isinstance(key, (list, tuple)) for key in variadic(path)):
583 obj = [obj]
584 for item in obj:
585 text = try_get(item, lambda x: x['simpleText'], compat_str)
586 if text:
587 return text
588 runs = try_get(item, lambda x: x['runs'], list) or []
589 if not runs and isinstance(item, list):
590 runs = item
591
592 runs = runs[:min(len(runs), max_runs or len(runs))]
593 text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[]))
594 if text:
595 return text
596
597 def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None,
598 ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None,
599 default_client='web'):
600 response = None
601 last_error = None
602 count = -1
603 retries = self.get_param('extractor_retries', 3)
604 if check_get_keys is None:
605 check_get_keys = []
606 while count < retries:
607 count += 1
608 if last_error:
609 self.report_warning('%s. Retrying ...' % remove_end(last_error, '.'))
610 try:
611 response = self._call_api(
612 ep=ep, fatal=True, headers=headers,
613 video_id=item_id, query=query,
614 context=self._extract_context(ytcfg, default_client),
615 api_key=self._extract_api_key(ytcfg, default_client),
616 api_hostname=api_hostname, default_client=default_client,
617 note='%s%s' % (note, ' (retry #%d)' % count if count else ''))
618 except ExtractorError as e:
619 if isinstance(e.cause, network_exceptions):
620 if isinstance(e.cause, compat_HTTPError) and not is_html(e.cause.read(512)):
621 e.cause.seek(0)
622 yt_error = try_get(
623 self._parse_json(e.cause.read().decode(), item_id, fatal=False),
624 lambda x: x['error']['message'], compat_str)
625 if yt_error:
626 self._report_alerts([('ERROR', yt_error)], fatal=False)
627 # Downloading page may result in intermittent 5xx HTTP error
628 # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
629 # We also want to catch all other network exceptions since errors in later pages can be troublesome
630 # See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210
631 if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
632 last_error = error_to_compat_str(e.cause or e.msg)
633 if count < retries:
634 continue
635 if fatal:
636 raise
637 else:
638 self.report_warning(error_to_compat_str(e))
639 return
640
641 else:
642 try:
643 self._extract_and_report_alerts(response, only_once=True)
644 except ExtractorError as e:
645 # YouTube servers may return errors we want to retry on in a 200 OK response
646 # See: https://github.com/yt-dlp/yt-dlp/issues/839
647 if 'unknown error' in e.msg.lower():
648 last_error = e.msg
649 continue
650 if fatal:
651 raise
652 self.report_warning(error_to_compat_str(e))
653 return
654 if not check_get_keys or dict_get(response, check_get_keys):
655 break
656 # Youtube sometimes sends incomplete data
657 # See: https://github.com/ytdl-org/youtube-dl/issues/28194
658 last_error = 'Incomplete data received'
659 if count >= retries:
660 if fatal:
661 raise ExtractorError(last_error)
662 else:
663 self.report_warning(last_error)
664 return
665 return response
666
667 @staticmethod
668 def is_music_url(url):
669 return re.match(r'https?://music\.youtube\.com/', url) is not None
670
671 def _extract_video(self, renderer):
672 video_id = renderer.get('videoId')
673 title = self._get_text(renderer, 'title')
674 description = self._get_text(renderer, 'descriptionSnippet')
675 duration = parse_duration(self._get_text(
676 renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text')))
677 view_count_text = self._get_text(renderer, 'viewCountText') or ''
678 view_count = str_to_int(self._search_regex(
679 r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
680 'view count', default=None))
681
682 uploader = self._get_text(renderer, 'ownerText', 'shortBylineText')
683
684 return {
685 '_type': 'url',
686 'ie_key': YoutubeIE.ie_key(),
687 'id': video_id,
688 'url': f'https://www.youtube.com/watch?v={video_id}',
689 'title': title,
690 'description': description,
691 'duration': duration,
692 'view_count': view_count,
693 'uploader': uploader,
694 }
695
696
697 class YoutubeIE(YoutubeBaseInfoExtractor):
698 IE_DESC = 'YouTube'
699 _INVIDIOUS_SITES = (
700 # invidious-redirect websites
701 r'(?:www\.)?redirect\.invidious\.io',
702 r'(?:(?:www|dev)\.)?invidio\.us',
703 # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
704 r'(?:www\.)?invidious\.pussthecat\.org',
705 r'(?:www\.)?invidious\.zee\.li',
706 r'(?:www\.)?invidious\.ethibox\.fr',
707 r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
708 # youtube-dl invidious instances list
709 r'(?:(?:www|no)\.)?invidiou\.sh',
710 r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
711 r'(?:www\.)?invidious\.kabi\.tk',
712 r'(?:www\.)?invidious\.mastodon\.host',
713 r'(?:www\.)?invidious\.zapashcanon\.fr',
714 r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
715 r'(?:www\.)?invidious\.tinfoil-hat\.net',
716 r'(?:www\.)?invidious\.himiko\.cloud',
717 r'(?:www\.)?invidious\.reallyancient\.tech',
718 r'(?:www\.)?invidious\.tube',
719 r'(?:www\.)?invidiou\.site',
720 r'(?:www\.)?invidious\.site',
721 r'(?:www\.)?invidious\.xyz',
722 r'(?:www\.)?invidious\.nixnet\.xyz',
723 r'(?:www\.)?invidious\.048596\.xyz',
724 r'(?:www\.)?invidious\.drycat\.fr',
725 r'(?:www\.)?inv\.skyn3t\.in',
726 r'(?:www\.)?tube\.poal\.co',
727 r'(?:www\.)?tube\.connect\.cafe',
728 r'(?:www\.)?vid\.wxzm\.sx',
729 r'(?:www\.)?vid\.mint\.lgbt',
730 r'(?:www\.)?vid\.puffyan\.us',
731 r'(?:www\.)?yewtu\.be',
732 r'(?:www\.)?yt\.elukerio\.org',
733 r'(?:www\.)?yt\.lelux\.fi',
734 r'(?:www\.)?invidious\.ggc-project\.de',
735 r'(?:www\.)?yt\.maisputain\.ovh',
736 r'(?:www\.)?ytprivate\.com',
737 r'(?:www\.)?invidious\.13ad\.de',
738 r'(?:www\.)?invidious\.toot\.koeln',
739 r'(?:www\.)?invidious\.fdn\.fr',
740 r'(?:www\.)?watch\.nettohikari\.com',
741 r'(?:www\.)?invidious\.namazso\.eu',
742 r'(?:www\.)?invidious\.silkky\.cloud',
743 r'(?:www\.)?invidious\.exonip\.de',
744 r'(?:www\.)?invidious\.riverside\.rocks',
745 r'(?:www\.)?invidious\.blamefran\.net',
746 r'(?:www\.)?invidious\.moomoo\.de',
747 r'(?:www\.)?ytb\.trom\.tf',
748 r'(?:www\.)?yt\.cyberhost\.uk',
749 r'(?:www\.)?kgg2m7yk5aybusll\.onion',
750 r'(?:www\.)?qklhadlycap4cnod\.onion',
751 r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
752 r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
753 r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
754 r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
755 r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
756 r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
757 r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
758 r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
759 r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
760 r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
761 )
762 _VALID_URL = r"""(?x)^
763 (
764 (?:https?://|//) # http(s):// or protocol-independent URL
765 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
766 (?:www\.)?deturl\.com/www\.youtube\.com|
767 (?:www\.)?pwnyoutube\.com|
768 (?:www\.)?hooktube\.com|
769 (?:www\.)?yourepeat\.com|
770 tube\.majestyc\.net|
771 %(invidious)s|
772 youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
773 (?:.*?\#/)? # handle anchor (#/) redirect urls
774 (?: # the various things that can precede the ID:
775 (?:(?:v|embed|e|shorts)/(?!videoseries)) # v/ or embed/ or e/ or shorts/
776 |(?: # or the v= param in all its forms
777 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
778 (?:\?|\#!?) # the params delimiter ? or # or #!
779 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
780 v=
781 )
782 ))
783 |(?:
784 youtu\.be| # just youtu.be/xxxx
785 vid\.plus| # or vid.plus/xxxx
786 zwearz\.com/watch| # or zwearz.com/watch/xxxx
787 %(invidious)s
788 )/
789 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
790 )
791 )? # all until now is optional -> you can pass the naked ID
792 (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
793 (?(1).+)? # if we found the ID, everything can follow
794 (?:\#|$)""" % {
795 'invidious': '|'.join(_INVIDIOUS_SITES),
796 }
797 _PLAYER_INFO_RE = (
798 r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
799 r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
800 r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
801 )
802 _formats = {
803 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
804 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
805 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
806 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
807 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
808 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
809 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
810 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
811 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
812 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
813 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
814 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
815 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
816 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
817 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
818 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
819 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
820 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
821
822
823 # 3D videos
824 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
825 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
826 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
827 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
828 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
829 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
830 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
831
832 # Apple HTTP Live Streaming
833 '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
834 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
835 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
836 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
837 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
838 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
839 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
840 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
841
842 # DASH mp4 video
843 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
844 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
845 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
846 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
847 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
848 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
849 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
850 '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
851 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
852 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
853 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
854 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
855
856 # Dash mp4 audio
857 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
858 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
859 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
860 '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
861 '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
862 '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
863 '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
864
865 # Dash webm
866 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
867 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
868 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
869 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
870 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
871 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
872 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
873 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
874 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
875 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
876 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
877 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
878 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
879 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
880 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
881 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
882 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
883 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
884 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
885 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
886 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
887 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
888
889 # Dash webm audio
890 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
891 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
892
893 # Dash webm audio with opus inside
894 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
895 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
896 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
897
898 # RTMP (unnamed)
899 '_rtmp': {'protocol': 'rtmp'},
900
901 # av01 video only formats sometimes served with "unknown" codecs
902 '394': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
903 '395': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
904 '396': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'av01.0.01M.08'},
905 '397': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'av01.0.04M.08'},
906 '398': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'av01.0.05M.08'},
907 '399': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'av01.0.08M.08'},
908 '400': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
909 '401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
910 }
911 _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
912
913 _GEO_BYPASS = False
914
915 IE_NAME = 'youtube'
916 _TESTS = [
917 {
918 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
919 'info_dict': {
920 'id': 'BaW_jenozKc',
921 'ext': 'mp4',
922 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
923 'uploader': 'Philipp Hagemeister',
924 'uploader_id': 'phihag',
925 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
926 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
927 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
928 'upload_date': '20121002',
929 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
930 'categories': ['Science & Technology'],
931 'tags': ['youtube-dl'],
932 'duration': 10,
933 'view_count': int,
934 'like_count': int,
935 'dislike_count': int,
936 'start_time': 1,
937 'end_time': 9,
938 }
939 },
940 {
941 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
942 'note': 'Embed-only video (#1746)',
943 'info_dict': {
944 'id': 'yZIXLfi8CZQ',
945 'ext': 'mp4',
946 'upload_date': '20120608',
947 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
948 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
949 'uploader': 'SET India',
950 'uploader_id': 'setindia',
951 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
952 'age_limit': 18,
953 },
954 'skip': 'Private video',
955 },
956 {
957 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
958 'note': 'Use the first video ID in the URL',
959 'info_dict': {
960 'id': 'BaW_jenozKc',
961 'ext': 'mp4',
962 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
963 'uploader': 'Philipp Hagemeister',
964 'uploader_id': 'phihag',
965 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
966 'upload_date': '20121002',
967 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
968 'categories': ['Science & Technology'],
969 'tags': ['youtube-dl'],
970 'duration': 10,
971 'view_count': int,
972 'like_count': int,
973 'dislike_count': int,
974 },
975 'params': {
976 'skip_download': True,
977 },
978 },
979 {
980 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
981 'note': '256k DASH audio (format 141) via DASH manifest',
982 'info_dict': {
983 'id': 'a9LDPn-MO4I',
984 'ext': 'm4a',
985 'upload_date': '20121002',
986 'uploader_id': '8KVIDEO',
987 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
988 'description': '',
989 'uploader': '8KVIDEO',
990 'title': 'UHDTV TEST 8K VIDEO.mp4'
991 },
992 'params': {
993 'youtube_include_dash_manifest': True,
994 'format': '141',
995 },
996 'skip': 'format 141 not served anymore',
997 },
998 # DASH manifest with encrypted signature
999 {
1000 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
1001 'info_dict': {
1002 'id': 'IB3lcPjvWLA',
1003 'ext': 'm4a',
1004 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
1005 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
1006 'duration': 244,
1007 'uploader': 'AfrojackVEVO',
1008 'uploader_id': 'AfrojackVEVO',
1009 'upload_date': '20131011',
1010 'abr': 129.495,
1011 },
1012 'params': {
1013 'youtube_include_dash_manifest': True,
1014 'format': '141/bestaudio[ext=m4a]',
1015 },
1016 },
1017 # Age-gate videos. See https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-888837000
1018 {
1019 'note': 'Embed allowed age-gate video',
1020 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
1021 'info_dict': {
1022 'id': 'HtVdAasjOgU',
1023 'ext': 'mp4',
1024 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
1025 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
1026 'duration': 142,
1027 'uploader': 'The Witcher',
1028 'uploader_id': 'WitcherGame',
1029 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
1030 'upload_date': '20140605',
1031 'age_limit': 18,
1032 },
1033 },
1034 {
1035 'note': 'Age-gate video with embed allowed in public site',
1036 'url': 'https://youtube.com/watch?v=HsUATh_Nc2U',
1037 'info_dict': {
1038 'id': 'HsUATh_Nc2U',
1039 'ext': 'mp4',
1040 'title': 'Godzilla 2 (Official Video)',
1041 'description': 'md5:bf77e03fcae5529475e500129b05668a',
1042 'upload_date': '20200408',
1043 'uploader_id': 'FlyingKitty900',
1044 'uploader': 'FlyingKitty',
1045 'age_limit': 18,
1046 },
1047 },
1048 {
1049 'note': 'Age-gate video embedable only with clientScreen=EMBED',
1050 'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg',
1051 'info_dict': {
1052 'id': 'Tq92D6wQ1mg',
1053 'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
1054 'ext': 'mp4',
1055 'upload_date': '20191227',
1056 'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
1057 'uploader': 'Projekt Melody',
1058 'description': 'md5:17eccca93a786d51bc67646756894066',
1059 'age_limit': 18,
1060 },
1061 },
1062 {
1063 'note': 'Non-Agegated non-embeddable video',
1064 'url': 'https://youtube.com/watch?v=MeJVWBSsPAY',
1065 'info_dict': {
1066 'id': 'MeJVWBSsPAY',
1067 'ext': 'mp4',
1068 'title': 'OOMPH! - Such Mich Find Mich (Lyrics)',
1069 'uploader': 'Herr Lurik',
1070 'uploader_id': 'st3in234',
1071 'description': 'Fan Video. Music & Lyrics by OOMPH!.',
1072 'upload_date': '20130730',
1073 },
1074 },
1075 {
1076 'note': 'Non-bypassable age-gated video',
1077 'url': 'https://youtube.com/watch?v=Cr381pDsSsA',
1078 'only_matching': True,
1079 },
1080 # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
1081 # YouTube Red ad is not captured for creator
1082 {
1083 'url': '__2ABJjxzNo',
1084 'info_dict': {
1085 'id': '__2ABJjxzNo',
1086 'ext': 'mp4',
1087 'duration': 266,
1088 'upload_date': '20100430',
1089 'uploader_id': 'deadmau5',
1090 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
1091 'creator': 'deadmau5',
1092 'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
1093 'uploader': 'deadmau5',
1094 'title': 'Deadmau5 - Some Chords (HD)',
1095 'alt_title': 'Some Chords',
1096 },
1097 'expected_warnings': [
1098 'DASH manifest missing',
1099 ]
1100 },
1101 # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
1102 {
1103 'url': 'lqQg6PlCWgI',
1104 'info_dict': {
1105 'id': 'lqQg6PlCWgI',
1106 'ext': 'mp4',
1107 'duration': 6085,
1108 'upload_date': '20150827',
1109 'uploader_id': 'olympic',
1110 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
1111 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
1112 'uploader': 'Olympics',
1113 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
1114 },
1115 'params': {
1116 'skip_download': 'requires avconv',
1117 }
1118 },
1119 # Non-square pixels
1120 {
1121 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
1122 'info_dict': {
1123 'id': '_b-2C3KPAM0',
1124 'ext': 'mp4',
1125 'stretched_ratio': 16 / 9.,
1126 'duration': 85,
1127 'upload_date': '20110310',
1128 'uploader_id': 'AllenMeow',
1129 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
1130 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
1131 'uploader': '孫ᄋᄅ',
1132 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
1133 },
1134 },
1135 # url_encoded_fmt_stream_map is empty string
1136 {
1137 'url': 'qEJwOuvDf7I',
1138 'info_dict': {
1139 'id': 'qEJwOuvDf7I',
1140 'ext': 'webm',
1141 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
1142 'description': '',
1143 'upload_date': '20150404',
1144 'uploader_id': 'spbelect',
1145 'uploader': 'Наблюдатели Петербурга',
1146 },
1147 'params': {
1148 'skip_download': 'requires avconv',
1149 },
1150 'skip': 'This live event has ended.',
1151 },
1152 # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
1153 {
1154 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
1155 'info_dict': {
1156 'id': 'FIl7x6_3R5Y',
1157 'ext': 'webm',
1158 'title': 'md5:7b81415841e02ecd4313668cde88737a',
1159 'description': 'md5:116377fd2963b81ec4ce64b542173306',
1160 'duration': 220,
1161 'upload_date': '20150625',
1162 'uploader_id': 'dorappi2000',
1163 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
1164 'uploader': 'dorappi2000',
1165 'formats': 'mincount:31',
1166 },
1167 'skip': 'not actual anymore',
1168 },
1169 # DASH manifest with segment_list
1170 {
1171 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
1172 'md5': '8ce563a1d667b599d21064e982ab9e31',
1173 'info_dict': {
1174 'id': 'CsmdDsKjzN8',
1175 'ext': 'mp4',
1176 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
1177 'uploader': 'Airtek',
1178 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
1179 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
1180 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
1181 },
1182 'params': {
1183 'youtube_include_dash_manifest': True,
1184 'format': '135', # bestvideo
1185 },
1186 'skip': 'This live event has ended.',
1187 },
1188 {
1189 # Multifeed videos (multiple cameras), URL is for Main Camera
1190 'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
1191 'info_dict': {
1192 'id': 'jvGDaLqkpTg',
1193 'title': 'Tom Clancy Free Weekend Rainbow Whatever',
1194 'description': 'md5:e03b909557865076822aa169218d6a5d',
1195 },
1196 'playlist': [{
1197 'info_dict': {
1198 'id': 'jvGDaLqkpTg',
1199 'ext': 'mp4',
1200 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
1201 'description': 'md5:e03b909557865076822aa169218d6a5d',
1202 'duration': 10643,
1203 'upload_date': '20161111',
1204 'uploader': 'Team PGP',
1205 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1206 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1207 },
1208 }, {
1209 'info_dict': {
1210 'id': '3AKt1R1aDnw',
1211 'ext': 'mp4',
1212 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
1213 'description': 'md5:e03b909557865076822aa169218d6a5d',
1214 'duration': 10991,
1215 'upload_date': '20161111',
1216 'uploader': 'Team PGP',
1217 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1218 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1219 },
1220 }, {
1221 'info_dict': {
1222 'id': 'RtAMM00gpVc',
1223 'ext': 'mp4',
1224 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
1225 'description': 'md5:e03b909557865076822aa169218d6a5d',
1226 'duration': 10995,
1227 'upload_date': '20161111',
1228 'uploader': 'Team PGP',
1229 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1230 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1231 },
1232 }, {
1233 'info_dict': {
1234 'id': '6N2fdlP3C5U',
1235 'ext': 'mp4',
1236 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
1237 'description': 'md5:e03b909557865076822aa169218d6a5d',
1238 'duration': 10990,
1239 'upload_date': '20161111',
1240 'uploader': 'Team PGP',
1241 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1242 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1243 },
1244 }],
1245 'params': {
1246 'skip_download': True,
1247 },
1248 'skip': 'Not multifeed anymore',
1249 },
1250 {
1251 # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
1252 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
1253 'info_dict': {
1254 'id': 'gVfLd0zydlo',
1255 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
1256 },
1257 'playlist_count': 2,
1258 'skip': 'Not multifeed anymore',
1259 },
1260 {
1261 'url': 'https://vid.plus/FlRa-iH7PGw',
1262 'only_matching': True,
1263 },
1264 {
1265 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
1266 'only_matching': True,
1267 },
1268 {
1269 # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
1270 # Also tests cut-off URL expansion in video description (see
1271 # https://github.com/ytdl-org/youtube-dl/issues/1892,
1272 # https://github.com/ytdl-org/youtube-dl/issues/8164)
1273 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
1274 'info_dict': {
1275 'id': 'lsguqyKfVQg',
1276 'ext': 'mp4',
1277 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
1278 'alt_title': 'Dark Walk',
1279 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
1280 'duration': 133,
1281 'upload_date': '20151119',
1282 'uploader_id': 'IronSoulElf',
1283 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
1284 'uploader': 'IronSoulElf',
1285 'creator': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
1286 'track': 'Dark Walk',
1287 'artist': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
1288 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
1289 },
1290 'params': {
1291 'skip_download': True,
1292 },
1293 },
1294 {
1295 # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
1296 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
1297 'only_matching': True,
1298 },
1299 {
1300 # Video with yt:stretch=17:0
1301 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
1302 'info_dict': {
1303 'id': 'Q39EVAstoRM',
1304 'ext': 'mp4',
1305 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
1306 'description': 'md5:ee18a25c350637c8faff806845bddee9',
1307 'upload_date': '20151107',
1308 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
1309 'uploader': 'CH GAMER DROID',
1310 },
1311 'params': {
1312 'skip_download': True,
1313 },
1314 'skip': 'This video does not exist.',
1315 },
1316 {
1317 # Video with incomplete 'yt:stretch=16:'
1318 'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
1319 'only_matching': True,
1320 },
1321 {
1322 # Video licensed under Creative Commons
1323 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
1324 'info_dict': {
1325 'id': 'M4gD1WSo5mA',
1326 'ext': 'mp4',
1327 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
1328 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
1329 'duration': 721,
1330 'upload_date': '20150127',
1331 'uploader_id': 'BerkmanCenter',
1332 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
1333 'uploader': 'The Berkman Klein Center for Internet & Society',
1334 'license': 'Creative Commons Attribution license (reuse allowed)',
1335 },
1336 'params': {
1337 'skip_download': True,
1338 },
1339 },
1340 {
1341 # Channel-like uploader_url
1342 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
1343 'info_dict': {
1344 'id': 'eQcmzGIKrzg',
1345 'ext': 'mp4',
1346 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
1347 'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
1348 'duration': 4060,
1349 'upload_date': '20151119',
1350 'uploader': 'Bernie Sanders',
1351 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
1352 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
1353 'license': 'Creative Commons Attribution license (reuse allowed)',
1354 },
1355 'params': {
1356 'skip_download': True,
1357 },
1358 },
1359 {
1360 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
1361 'only_matching': True,
1362 },
1363 {
1364 # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
1365 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
1366 'only_matching': True,
1367 },
1368 {
1369 # Rental video preview
1370 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
1371 'info_dict': {
1372 'id': 'uGpuVWrhIzE',
1373 'ext': 'mp4',
1374 'title': 'Piku - Trailer',
1375 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
1376 'upload_date': '20150811',
1377 'uploader': 'FlixMatrix',
1378 'uploader_id': 'FlixMatrixKaravan',
1379 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
1380 'license': 'Standard YouTube License',
1381 },
1382 'params': {
1383 'skip_download': True,
1384 },
1385 'skip': 'This video is not available.',
1386 },
1387 {
1388 # YouTube Red video with episode data
1389 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
1390 'info_dict': {
1391 'id': 'iqKdEhx-dD4',
1392 'ext': 'mp4',
1393 'title': 'Isolation - Mind Field (Ep 1)',
1394 'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
1395 'duration': 2085,
1396 'upload_date': '20170118',
1397 'uploader': 'Vsauce',
1398 'uploader_id': 'Vsauce',
1399 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
1400 'series': 'Mind Field',
1401 'season_number': 1,
1402 'episode_number': 1,
1403 },
1404 'params': {
1405 'skip_download': True,
1406 },
1407 'expected_warnings': [
1408 'Skipping DASH manifest',
1409 ],
1410 },
1411 {
1412 # The following content has been identified by the YouTube community
1413 # as inappropriate or offensive to some audiences.
1414 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
1415 'info_dict': {
1416 'id': '6SJNVb0GnPI',
1417 'ext': 'mp4',
1418 'title': 'Race Differences in Intelligence',
1419 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
1420 'duration': 965,
1421 'upload_date': '20140124',
1422 'uploader': 'New Century Foundation',
1423 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
1424 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
1425 },
1426 'params': {
1427 'skip_download': True,
1428 },
1429 'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
1430 },
1431 {
1432 # itag 212
1433 'url': '1t24XAntNCY',
1434 'only_matching': True,
1435 },
1436 {
1437 # geo restricted to JP
1438 'url': 'sJL6WA-aGkQ',
1439 'only_matching': True,
1440 },
1441 {
1442 'url': 'https://invidio.us/watch?v=BaW_jenozKc',
1443 'only_matching': True,
1444 },
1445 {
1446 'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
1447 'only_matching': True,
1448 },
1449 {
1450 # from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
1451 'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
1452 'only_matching': True,
1453 },
1454 {
1455 # DRM protected
1456 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
1457 'only_matching': True,
1458 },
1459 {
1460 # Video with unsupported adaptive stream type formats
1461 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
1462 'info_dict': {
1463 'id': 'Z4Vy8R84T1U',
1464 'ext': 'mp4',
1465 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
1466 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
1467 'duration': 433,
1468 'upload_date': '20130923',
1469 'uploader': 'Amelia Putri Harwita',
1470 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
1471 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
1472 'formats': 'maxcount:10',
1473 },
1474 'params': {
1475 'skip_download': True,
1476 'youtube_include_dash_manifest': False,
1477 },
1478 'skip': 'not actual anymore',
1479 },
1480 {
1481 # Youtube Music Auto-generated description
1482 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
1483 'info_dict': {
1484 'id': 'MgNrAu2pzNs',
1485 'ext': 'mp4',
1486 'title': 'Voyeur Girl',
1487 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
1488 'upload_date': '20190312',
1489 'uploader': 'Stephen - Topic',
1490 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
1491 'artist': 'Stephen',
1492 'track': 'Voyeur Girl',
1493 'album': 'it\'s too much love to know my dear',
1494 'release_date': '20190313',
1495 'release_year': 2019,
1496 },
1497 'params': {
1498 'skip_download': True,
1499 },
1500 },
1501 {
1502 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
1503 'only_matching': True,
1504 },
1505 {
1506 # invalid -> valid video id redirection
1507 'url': 'DJztXj2GPfl',
1508 'info_dict': {
1509 'id': 'DJztXj2GPfk',
1510 'ext': 'mp4',
1511 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
1512 'description': 'md5:bf577a41da97918e94fa9798d9228825',
1513 'upload_date': '20090125',
1514 'uploader': 'Prochorowka',
1515 'uploader_id': 'Prochorowka',
1516 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
1517 'artist': 'Panjabi MC',
1518 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
1519 'album': 'Beware of the Boys (Mundian To Bach Ke)',
1520 },
1521 'params': {
1522 'skip_download': True,
1523 },
1524 'skip': 'Video unavailable',
1525 },
1526 {
1527 # empty description results in an empty string
1528 'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
1529 'info_dict': {
1530 'id': 'x41yOUIvK2k',
1531 'ext': 'mp4',
1532 'title': 'IMG 3456',
1533 'description': '',
1534 'upload_date': '20170613',
1535 'uploader_id': 'ElevageOrVert',
1536 'uploader': 'ElevageOrVert',
1537 },
1538 'params': {
1539 'skip_download': True,
1540 },
1541 },
1542 {
1543 # with '};' inside yt initial data (see [1])
1544 # see [2] for an example with '};' inside ytInitialPlayerResponse
1545 # 1. https://github.com/ytdl-org/youtube-dl/issues/27093
1546 # 2. https://github.com/ytdl-org/youtube-dl/issues/27216
1547 'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
1548 'info_dict': {
1549 'id': 'CHqg6qOn4no',
1550 'ext': 'mp4',
1551 'title': 'Part 77 Sort a list of simple types in c#',
1552 'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
1553 'upload_date': '20130831',
1554 'uploader_id': 'kudvenkat',
1555 'uploader': 'kudvenkat',
1556 },
1557 'params': {
1558 'skip_download': True,
1559 },
1560 },
1561 {
1562 # another example of '};' in ytInitialData
1563 'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
1564 'only_matching': True,
1565 },
1566 {
1567 'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
1568 'only_matching': True,
1569 },
1570 {
1571 # https://github.com/ytdl-org/youtube-dl/pull/28094
1572 'url': 'OtqTfy26tG0',
1573 'info_dict': {
1574 'id': 'OtqTfy26tG0',
1575 'ext': 'mp4',
1576 'title': 'Burn Out',
1577 'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
1578 'upload_date': '20141120',
1579 'uploader': 'The Cinematic Orchestra - Topic',
1580 'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
1581 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
1582 'artist': 'The Cinematic Orchestra',
1583 'track': 'Burn Out',
1584 'album': 'Every Day',
1585 'release_data': None,
1586 'release_year': None,
1587 },
1588 'params': {
1589 'skip_download': True,
1590 },
1591 },
1592 {
1593 # controversial video, only works with bpctr when authenticated with cookies
1594 'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
1595 'only_matching': True,
1596 },
1597 {
1598 # controversial video, requires bpctr/contentCheckOk
1599 'url': 'https://www.youtube.com/watch?v=SZJvDhaSDnc',
1600 'info_dict': {
1601 'id': 'SZJvDhaSDnc',
1602 'ext': 'mp4',
1603 'title': 'San Diego teen commits suicide after bullying over embarrassing video',
1604 'channel_id': 'UC-SJ6nODDmufqBzPBwCvYvQ',
1605 'uploader': 'CBS This Morning',
1606 'uploader_id': 'CBSThisMorning',
1607 'upload_date': '20140716',
1608 'description': 'md5:acde3a73d3f133fc97e837a9f76b53b7'
1609 }
1610 },
1611 {
1612 # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
1613 'url': 'cBvYw8_A0vQ',
1614 'info_dict': {
1615 'id': 'cBvYw8_A0vQ',
1616 'ext': 'mp4',
1617 'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
1618 'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
1619 'upload_date': '20201120',
1620 'uploader': 'Walk around Japan',
1621 'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
1622 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
1623 },
1624 'params': {
1625 'skip_download': True,
1626 },
1627 }, {
1628 # Has multiple audio streams
1629 'url': 'WaOKSUlf4TM',
1630 'only_matching': True
1631 }, {
1632 # Requires Premium: has format 141 when requested using YTM url
1633 'url': 'https://music.youtube.com/watch?v=XclachpHxis',
1634 'only_matching': True
1635 }, {
1636 # multiple subtitles with same lang_code
1637 'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug',
1638 'only_matching': True,
1639 }, {
1640 # Force use android client fallback
1641 'url': 'https://www.youtube.com/watch?v=YOelRv7fMxY',
1642 'info_dict': {
1643 'id': 'YOelRv7fMxY',
1644 'title': 'DIGGING A SECRET TUNNEL Part 1',
1645 'ext': '3gp',
1646 'upload_date': '20210624',
1647 'channel_id': 'UCp68_FLety0O-n9QU6phsgw',
1648 'uploader': 'colinfurze',
1649 'uploader_id': 'colinfurze',
1650 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCp68_FLety0O-n9QU6phsgw',
1651 'description': 'md5:b5096f56af7ccd7a555c84db81738b22'
1652 },
1653 'params': {
1654 'format': '17', # 3gp format available on android
1655 'extractor_args': {'youtube': {'player_client': ['android']}},
1656 },
1657 },
1658 {
1659 # Skip download of additional client configs (remix client config in this case)
1660 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
1661 'only_matching': True,
1662 'params': {
1663 'extractor_args': {'youtube': {'player_skip': ['configs']}},
1664 },
1665 }, {
1666 # shorts
1667 'url': 'https://www.youtube.com/shorts/BGQWPY4IigY',
1668 'only_matching': True,
1669 },
1670 ]
1671
1672 @classmethod
1673 def suitable(cls, url):
1674 from ..utils import parse_qs
1675
1676 qs = parse_qs(url)
1677 if qs.get('list', [None])[0]:
1678 return False
1679 return super(YoutubeIE, cls).suitable(url)
1680
1681 def __init__(self, *args, **kwargs):
1682 super(YoutubeIE, self).__init__(*args, **kwargs)
1683 self._code_cache = {}
1684 self._player_cache = {}
1685
1686 def _extract_player_url(self, *ytcfgs, webpage=None):
1687 player_url = traverse_obj(
1688 ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'),
1689 get_all=False, expected_type=compat_str)
1690 if not player_url:
1691 return
1692 if player_url.startswith('//'):
1693 player_url = 'https:' + player_url
1694 elif not re.match(r'https?://', player_url):
1695 player_url = compat_urlparse.urljoin(
1696 'https://www.youtube.com', player_url)
1697 return player_url
1698
1699 def _download_player_url(self, video_id, fatal=False):
1700 res = self._download_webpage(
1701 'https://www.youtube.com/iframe_api',
1702 note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
1703 if res:
1704 player_version = self._search_regex(
1705 r'player\\?/([0-9a-fA-F]{8})\\?/', res, 'player version', fatal=fatal)
1706 if player_version:
1707 return f'https://www.youtube.com/s/player/{player_version}/player_ias.vflset/en_US/base.js'
1708
1709 def _signature_cache_id(self, example_sig):
1710 """ Return a string representation of a signature """
1711 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
1712
1713 @classmethod
1714 def _extract_player_info(cls, player_url):
1715 for player_re in cls._PLAYER_INFO_RE:
1716 id_m = re.search(player_re, player_url)
1717 if id_m:
1718 break
1719 else:
1720 raise ExtractorError('Cannot identify player %r' % player_url)
1721 return id_m.group('id')
1722
1723 def _load_player(self, video_id, player_url, fatal=True):
1724 player_id = self._extract_player_info(player_url)
1725 if player_id not in self._code_cache:
1726 code = self._download_webpage(
1727 player_url, video_id, fatal=fatal,
1728 note='Downloading player ' + player_id,
1729 errnote='Download of %s failed' % player_url)
1730 if code:
1731 self._code_cache[player_id] = code
1732 return self._code_cache.get(player_id)
1733
1734 def _extract_signature_function(self, video_id, player_url, example_sig):
1735 player_id = self._extract_player_info(player_url)
1736
1737 # Read from filesystem cache
1738 func_id = 'js_%s_%s' % (
1739 player_id, self._signature_cache_id(example_sig))
1740 assert os.path.basename(func_id) == func_id
1741
1742 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
1743 if cache_spec is not None:
1744 return lambda s: ''.join(s[i] for i in cache_spec)
1745
1746 code = self._load_player(video_id, player_url)
1747 if code:
1748 res = self._parse_sig_js(code)
1749
1750 test_string = ''.join(map(compat_chr, range(len(example_sig))))
1751 cache_res = res(test_string)
1752 cache_spec = [ord(c) for c in cache_res]
1753
1754 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
1755 return res
1756
1757 def _print_sig_code(self, func, example_sig):
1758 if not self.get_param('youtube_print_sig_code'):
1759 return
1760
1761 def gen_sig_code(idxs):
1762 def _genslice(start, end, step):
1763 starts = '' if start == 0 else str(start)
1764 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
1765 steps = '' if step == 1 else (':%d' % step)
1766 return 's[%s%s%s]' % (starts, ends, steps)
1767
1768 step = None
1769 # Quelch pyflakes warnings - start will be set when step is set
1770 start = '(Never used)'
1771 for i, prev in zip(idxs[1:], idxs[:-1]):
1772 if step is not None:
1773 if i - prev == step:
1774 continue
1775 yield _genslice(start, prev, step)
1776 step = None
1777 continue
1778 if i - prev in [-1, 1]:
1779 step = i - prev
1780 start = prev
1781 continue
1782 else:
1783 yield 's[%d]' % prev
1784 if step is None:
1785 yield 's[%d]' % i
1786 else:
1787 yield _genslice(start, i, step)
1788
1789 test_string = ''.join(map(compat_chr, range(len(example_sig))))
1790 cache_res = func(test_string)
1791 cache_spec = [ord(c) for c in cache_res]
1792 expr_code = ' + '.join(gen_sig_code(cache_spec))
1793 signature_id_tuple = '(%s)' % (
1794 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
1795 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
1796 ' return %s\n') % (signature_id_tuple, expr_code)
1797 self.to_screen('Extracted signature function:\n' + code)
1798
1799 def _parse_sig_js(self, jscode):
1800 funcname = self._search_regex(
1801 (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1802 r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1803 r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
1804 r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
1805 r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
1806 r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
1807 r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
1808 # Obsolete patterns
1809 r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1810 r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
1811 r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1812 r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1813 r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1814 r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1815 r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1816 r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
1817 jscode, 'Initial JS player signature function name', group='sig')
1818
1819 jsi = JSInterpreter(jscode)
1820 initial_function = jsi.extract_function(funcname)
1821 return lambda s: initial_function([s])
1822
1823 def _decrypt_signature(self, s, video_id, player_url):
1824 """Turn the encrypted s field into a working signature"""
1825
1826 if player_url is None:
1827 raise ExtractorError('Cannot decrypt signature without player_url')
1828
1829 try:
1830 player_id = (player_url, self._signature_cache_id(s))
1831 if player_id not in self._player_cache:
1832 func = self._extract_signature_function(
1833 video_id, player_url, s
1834 )
1835 self._player_cache[player_id] = func
1836 func = self._player_cache[player_id]
1837 self._print_sig_code(func, s)
1838 return func(s)
1839 except Exception as e:
1840 raise ExtractorError('Signature extraction failed: ' + traceback.format_exc(), cause=e)
1841
1842 def _decrypt_nsig(self, s, video_id, player_url):
1843 """Turn the encrypted n field into a working signature"""
1844 if player_url is None:
1845 raise ExtractorError('Cannot decrypt nsig without player_url')
1846 if player_url.startswith('//'):
1847 player_url = 'https:' + player_url
1848 elif not re.match(r'https?://', player_url):
1849 player_url = compat_urlparse.urljoin(
1850 'https://www.youtube.com', player_url)
1851
1852 sig_id = ('nsig_value', s)
1853 if sig_id in self._player_cache:
1854 return self._player_cache[sig_id]
1855
1856 try:
1857 player_id = ('nsig', player_url)
1858 if player_id not in self._player_cache:
1859 self._player_cache[player_id] = self._extract_n_function(video_id, player_url)
1860 func = self._player_cache[player_id]
1861 self._player_cache[sig_id] = func(s)
1862 self.write_debug(f'Decrypted nsig {s} => {self._player_cache[sig_id]}')
1863 return self._player_cache[sig_id]
1864 except Exception as e:
1865 raise ExtractorError(traceback.format_exc(), cause=e)
1866
1867 def _extract_n_function_name(self, jscode):
1868 return self._search_regex(
1869 (r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z0-9$]{3})\([a-zA-Z0-9]\)',),
1870 jscode, 'Initial JS player n function name', group='nfunc')
1871
1872 def _extract_n_function(self, video_id, player_url):
1873 player_id = self._extract_player_info(player_url)
1874 func_code = self._downloader.cache.load('youtube-nsig', player_id)
1875
1876 if func_code:
1877 jsi = JSInterpreter(func_code)
1878 else:
1879 jscode = self._load_player(video_id, player_url)
1880 funcname = self._extract_n_function_name(jscode)
1881 jsi = JSInterpreter(jscode)
1882 func_code = jsi.extract_function_code(funcname)
1883 self._downloader.cache.store('youtube-nsig', player_id, func_code)
1884
1885 if self.get_param('youtube_print_sig_code'):
1886 self.to_screen(f'Extracted nsig function from {player_id}:\n{func_code[1]}\n')
1887
1888 return lambda s: jsi.extract_function_from_code(*func_code)([s])
1889
1890 def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
1891 """
1892 Extract signatureTimestamp (sts)
1893 Required to tell API what sig/player version is in use.
1894 """
1895 sts = None
1896 if isinstance(ytcfg, dict):
1897 sts = int_or_none(ytcfg.get('STS'))
1898
1899 if not sts:
1900 # Attempt to extract from player
1901 if player_url is None:
1902 error_msg = 'Cannot extract signature timestamp without player_url.'
1903 if fatal:
1904 raise ExtractorError(error_msg)
1905 self.report_warning(error_msg)
1906 return
1907 code = self._load_player(video_id, player_url, fatal=fatal)
1908 if code:
1909 sts = int_or_none(self._search_regex(
1910 r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code,
1911 'JS player signature timestamp', group='sts', fatal=fatal))
1912 return sts
1913
1914 def _mark_watched(self, video_id, player_responses):
1915 playback_url = traverse_obj(
1916 player_responses, (..., 'playbackTracking', 'videostatsPlaybackUrl', 'baseUrl'),
1917 expected_type=url_or_none, get_all=False)
1918 if not playback_url:
1919 self.report_warning('Unable to mark watched')
1920 return
1921 parsed_playback_url = compat_urlparse.urlparse(playback_url)
1922 qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1923
1924 # cpn generation algorithm is reverse engineered from base.js.
1925 # In fact it works even with dummy cpn.
1926 CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1927 cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1928
1929 qs.update({
1930 'ver': ['2'],
1931 'cpn': [cpn],
1932 })
1933 playback_url = compat_urlparse.urlunparse(
1934 parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1935
1936 self._download_webpage(
1937 playback_url, video_id, 'Marking watched',
1938 'Unable to mark watched', fatal=False)
1939
1940 @staticmethod
1941 def _extract_urls(webpage):
1942 # Embedded YouTube player
1943 entries = [
1944 unescapeHTML(mobj.group('url'))
1945 for mobj in re.finditer(r'''(?x)
1946 (?:
1947 <iframe[^>]+?src=|
1948 data-video-url=|
1949 <embed[^>]+?src=|
1950 embedSWF\(?:\s*|
1951 <object[^>]+data=|
1952 new\s+SWFObject\(
1953 )
1954 (["\'])
1955 (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
1956 (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
1957 \1''', webpage)]
1958
1959 # lazyYT YouTube embed
1960 entries.extend(list(map(
1961 unescapeHTML,
1962 re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
1963
1964 # Wordpress "YouTube Video Importer" plugin
1965 matches = re.findall(r'''(?x)<div[^>]+
1966 class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
1967 data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
1968 entries.extend(m[-1] for m in matches)
1969
1970 return entries
1971
1972 @staticmethod
1973 def _extract_url(webpage):
1974 urls = YoutubeIE._extract_urls(webpage)
1975 return urls[0] if urls else None
1976
1977 @classmethod
1978 def extract_id(cls, url):
1979 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1980 if mobj is None:
1981 raise ExtractorError('Invalid URL: %s' % url)
1982 return mobj.group('id')
1983
1984 def _extract_chapters_from_json(self, data, duration):
1985 chapter_list = traverse_obj(
1986 data, (
1987 'playerOverlays', 'playerOverlayRenderer', 'decoratedPlayerBarRenderer',
1988 'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters'
1989 ), expected_type=list)
1990
1991 return self._extract_chapters(
1992 chapter_list,
1993 chapter_time=lambda chapter: float_or_none(
1994 traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000),
1995 chapter_title=lambda chapter: traverse_obj(
1996 chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str),
1997 duration=duration)
1998
1999 def _extract_chapters_from_engagement_panel(self, data, duration):
2000 content_list = traverse_obj(
2001 data,
2002 ('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'),
2003 expected_type=list, default=[])
2004 chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription'))
2005 chapter_title = lambda chapter: self._get_text(chapter, 'title')
2006
2007 return next((
2008 filter(None, (
2009 self._extract_chapters(
2010 traverse_obj(contents, (..., 'macroMarkersListItemRenderer')),
2011 chapter_time, chapter_title, duration)
2012 for contents in content_list
2013 ))), [])
2014
2015 def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration):
2016 chapters = []
2017 last_chapter = {'start_time': 0}
2018 for idx, chapter in enumerate(chapter_list or []):
2019 title = chapter_title(chapter)
2020 start_time = chapter_time(chapter)
2021 if start_time is None:
2022 continue
2023 last_chapter['end_time'] = start_time
2024 if start_time < last_chapter['start_time']:
2025 if idx == 1:
2026 chapters.pop()
2027 self.report_warning('Invalid start time for chapter "%s"' % last_chapter['title'])
2028 else:
2029 self.report_warning(f'Invalid start time for chapter "{title}"')
2030 continue
2031 last_chapter = {'start_time': start_time, 'title': title}
2032 chapters.append(last_chapter)
2033 last_chapter['end_time'] = duration
2034 return chapters
2035
2036 def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
2037 return self._parse_json(self._search_regex(
2038 (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
2039 regex), webpage, name, default='{}'), video_id, fatal=False)
2040
2041 @staticmethod
2042 def parse_time_text(time_text):
2043 """
2044 Parse the comment time text
2045 time_text is in the format 'X units ago (edited)'
2046 """
2047 time_text_split = time_text.split(' ')
2048 if len(time_text_split) >= 3:
2049 try:
2050 return datetime_from_str('now-%s%s' % (time_text_split[0], time_text_split[1]), precision='auto')
2051 except ValueError:
2052 return None
2053
2054 def _extract_comment(self, comment_renderer, parent=None):
2055 comment_id = comment_renderer.get('commentId')
2056 if not comment_id:
2057 return
2058
2059 text = self._get_text(comment_renderer, 'contentText')
2060
2061 # note: timestamp is an estimate calculated from the current time and time_text
2062 time_text = self._get_text(comment_renderer, 'publishedTimeText') or ''
2063 time_text_dt = self.parse_time_text(time_text)
2064 if isinstance(time_text_dt, datetime.datetime):
2065 timestamp = calendar.timegm(time_text_dt.timetuple())
2066 author = self._get_text(comment_renderer, 'authorText')
2067 author_id = try_get(comment_renderer,
2068 lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
2069
2070 votes = parse_count(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'],
2071 lambda x: x['likeCount']), compat_str)) or 0
2072 author_thumbnail = try_get(comment_renderer,
2073 lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str)
2074
2075 author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
2076 is_favorited = 'creatorHeart' in (try_get(
2077 comment_renderer, lambda x: x['actionButtons']['commentActionButtonsRenderer'], dict) or {})
2078 return {
2079 'id': comment_id,
2080 'text': text,
2081 'timestamp': timestamp,
2082 'time_text': time_text,
2083 'like_count': votes,
2084 'is_favorited': is_favorited,
2085 'author': author,
2086 'author_id': author_id,
2087 'author_thumbnail': author_thumbnail,
2088 'author_is_uploader': author_is_uploader,
2089 'parent': parent or 'root'
2090 }
2091
2092 def _comment_entries(self, root_continuation_data, ytcfg, video_id, parent=None, comment_counts=None):
2093
2094 def extract_header(contents):
2095 _continuation = None
2096 for content in contents:
2097 comments_header_renderer = try_get(content, lambda x: x['commentsHeaderRenderer'])
2098 expected_comment_count = parse_count(self._get_text(
2099 comments_header_renderer, 'countText', 'commentsCount', max_runs=1))
2100
2101 if expected_comment_count:
2102 comment_counts[1] = expected_comment_count
2103 self.to_screen('Downloading ~%d comments' % expected_comment_count)
2104 sort_mode_str = self._configuration_arg('comment_sort', [''])[0]
2105 comment_sort_index = int(sort_mode_str != 'top') # 1 = new, 0 = top
2106
2107 sort_menu_item = try_get(
2108 comments_header_renderer,
2109 lambda x: x['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'][comment_sort_index], dict) or {}
2110 sort_continuation_ep = sort_menu_item.get('serviceEndpoint') or {}
2111
2112 _continuation = self._extract_continuation_ep_data(sort_continuation_ep) or self._extract_continuation(sort_menu_item)
2113 if not _continuation:
2114 continue
2115
2116 sort_text = sort_menu_item.get('title')
2117 if isinstance(sort_text, compat_str):
2118 sort_text = sort_text.lower()
2119 else:
2120 sort_text = 'top comments' if comment_sort_index == 0 else 'newest first'
2121 self.to_screen('Sorting comments by %s' % sort_text)
2122 break
2123 return _continuation
2124
2125 def extract_thread(contents):
2126 if not parent:
2127 comment_counts[2] = 0
2128 for content in contents:
2129 comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer'])
2130 comment_renderer = try_get(
2131 comment_thread_renderer, (lambda x: x['comment']['commentRenderer'], dict)) or try_get(
2132 content, (lambda x: x['commentRenderer'], dict))
2133
2134 if not comment_renderer:
2135 continue
2136 comment = self._extract_comment(comment_renderer, parent)
2137 if not comment:
2138 continue
2139 comment_counts[0] += 1
2140 yield comment
2141 # Attempt to get the replies
2142 comment_replies_renderer = try_get(
2143 comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict)
2144
2145 if comment_replies_renderer:
2146 comment_counts[2] += 1
2147 comment_entries_iter = self._comment_entries(
2148 comment_replies_renderer, ytcfg, video_id,
2149 parent=comment.get('id'), comment_counts=comment_counts)
2150
2151 for reply_comment in comment_entries_iter:
2152 yield reply_comment
2153
2154 # YouTube comments have a max depth of 2
2155 max_depth = int_or_none(self._configuration_arg('max_comment_depth', [''])[0]) or float('inf')
2156 if max_depth == 1 and parent:
2157 return
2158 if not comment_counts:
2159 # comment so far, est. total comments, current comment thread #
2160 comment_counts = [0, 0, 0]
2161
2162 continuation = self._extract_continuation(root_continuation_data)
2163 if continuation and len(continuation['continuation']) < 27:
2164 self.write_debug('Detected old API continuation token. Generating new API compatible token.')
2165 continuation_token = self._generate_comment_continuation(video_id)
2166 continuation = self._build_api_continuation_query(continuation_token, None)
2167
2168 message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1)
2169 if message and not parent:
2170 self.report_warning(message, video_id=video_id)
2171
2172 visitor_data = None
2173 is_first_continuation = parent is None
2174
2175 for page_num in itertools.count(0):
2176 if not continuation:
2177 break
2178 headers = self.generate_api_headers(ytcfg=ytcfg, visitor_data=visitor_data)
2179 comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1])
2180 if page_num == 0:
2181 if is_first_continuation:
2182 note_prefix = 'Downloading comment section API JSON'
2183 else:
2184 note_prefix = ' Downloading comment API JSON reply thread %d %s' % (
2185 comment_counts[2], comment_prog_str)
2186 else:
2187 note_prefix = '%sDownloading comment%s API JSON page %d %s' % (
2188 ' ' if parent else '', ' replies' if parent else '',
2189 page_num, comment_prog_str)
2190
2191 response = self._extract_response(
2192 item_id=None, query=continuation,
2193 ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix,
2194 check_get_keys=('onResponseReceivedEndpoints', 'continuationContents'))
2195 if not response:
2196 break
2197 visitor_data = try_get(
2198 response,
2199 lambda x: x['responseContext']['webResponseContextExtensionData']['ytConfigData']['visitorData'],
2200 compat_str) or visitor_data
2201
2202 continuation_contents = dict_get(response, ('onResponseReceivedEndpoints', 'continuationContents'))
2203
2204 continuation = None
2205 if isinstance(continuation_contents, list):
2206 for continuation_section in continuation_contents:
2207 if not isinstance(continuation_section, dict):
2208 continue
2209 continuation_items = try_get(
2210 continuation_section,
2211 (lambda x: x['reloadContinuationItemsCommand']['continuationItems'],
2212 lambda x: x['appendContinuationItemsAction']['continuationItems']),
2213 list) or []
2214 if is_first_continuation:
2215 continuation = extract_header(continuation_items)
2216 is_first_continuation = False
2217 if continuation:
2218 break
2219 continue
2220 count = 0
2221 for count, entry in enumerate(extract_thread(continuation_items)):
2222 yield entry
2223 continuation = self._extract_continuation({'contents': continuation_items})
2224 if continuation:
2225 # Sometimes YouTube provides a continuation without any comments
2226 # In most cases we end up just downloading these with very little comments to come.
2227 if count == 0:
2228 if not parent:
2229 self.report_warning('No comments received - assuming end of comments')
2230 continuation = None
2231 break
2232
2233 # Deprecated response structure
2234 elif isinstance(continuation_contents, dict):
2235 known_continuation_renderers = ('itemSectionContinuation', 'commentRepliesContinuation')
2236 for key, continuation_renderer in continuation_contents.items():
2237 if key not in known_continuation_renderers:
2238 continue
2239 if not isinstance(continuation_renderer, dict):
2240 continue
2241 if is_first_continuation:
2242 header_continuation_items = [continuation_renderer.get('header') or {}]
2243 continuation = extract_header(header_continuation_items)
2244 is_first_continuation = False
2245 if continuation:
2246 break
2247
2248 # Sometimes YouTube provides a continuation without any comments
2249 # In most cases we end up just downloading these with very little comments to come.
2250 count = 0
2251 for count, entry in enumerate(extract_thread(continuation_renderer.get('contents') or {})):
2252 yield entry
2253 continuation = self._extract_continuation(continuation_renderer)
2254 if count == 0:
2255 if not parent:
2256 self.report_warning('No comments received - assuming end of comments')
2257 continuation = None
2258 break
2259
2260 @staticmethod
2261 def _generate_comment_continuation(video_id):
2262 """
2263 Generates initial comment section continuation token from given video id
2264 """
2265 b64_vid_id = base64.b64encode(bytes(video_id.encode('utf-8')))
2266 parts = ('Eg0SCw==', b64_vid_id, 'GAYyJyIRIgs=', b64_vid_id, 'MAB4AjAAQhBjb21tZW50cy1zZWN0aW9u')
2267 new_continuation_intlist = list(itertools.chain.from_iterable(
2268 [bytes_to_intlist(base64.b64decode(part)) for part in parts]))
2269 return base64.b64encode(intlist_to_bytes(new_continuation_intlist)).decode('utf-8')
2270
2271 def _get_comments(self, ytcfg, video_id, contents, webpage):
2272 """Entry for comment extraction"""
2273 def _real_comment_extract(contents):
2274 renderer = next((
2275 item for item in traverse_obj(contents, (..., 'itemSectionRenderer'), default={})
2276 if item.get('sectionIdentifier') == 'comment-item-section'), None)
2277 yield from self._comment_entries(renderer, ytcfg, video_id)
2278
2279 max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0])
2280 # Force English regardless of account setting to prevent parsing issues
2281 # See: https://github.com/yt-dlp/yt-dlp/issues/532
2282 ytcfg = copy.deepcopy(ytcfg)
2283 traverse_obj(
2284 ytcfg, ('INNERTUBE_CONTEXT', 'client'), expected_type=dict, default={})['hl'] = 'en'
2285 return itertools.islice(_real_comment_extract(contents), 0, max_comments)
2286
2287 @staticmethod
2288 def _get_checkok_params():
2289 return {'contentCheckOk': True, 'racyCheckOk': True}
2290
2291 @classmethod
2292 def _generate_player_context(cls, sts=None):
2293 context = {
2294 'html5Preference': 'HTML5_PREF_WANTS',
2295 }
2296 if sts is not None:
2297 context['signatureTimestamp'] = sts
2298 return {
2299 'playbackContext': {
2300 'contentPlaybackContext': context
2301 },
2302 **cls._get_checkok_params()
2303 }
2304
2305 @staticmethod
2306 def _is_agegated(player_response):
2307 if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')):
2308 return True
2309
2310 reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[])
2311 AGE_GATE_REASONS = (
2312 'confirm your age', 'age-restricted', 'inappropriate', # reason
2313 'age_verification_required', 'age_check_required', # status
2314 )
2315 return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons)
2316
2317 @staticmethod
2318 def _is_unplayable(player_response):
2319 return traverse_obj(player_response, ('playabilityStatus', 'status')) == 'UNPLAYABLE'
2320
2321 def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr):
2322
2323 session_index = self._extract_session_index(player_ytcfg, master_ytcfg)
2324 syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr)
2325 sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) if player_url else None
2326 headers = self.generate_api_headers(
2327 ytcfg=player_ytcfg, account_syncid=syncid, session_index=session_index, default_client=client)
2328
2329 yt_query = {'videoId': video_id}
2330 yt_query.update(self._generate_player_context(sts))
2331 return self._extract_response(
2332 item_id=video_id, ep='player', query=yt_query,
2333 ytcfg=player_ytcfg, headers=headers, fatal=True,
2334 default_client=client,
2335 note='Downloading %s player API JSON' % client.replace('_', ' ').strip()
2336 ) or None
2337
2338 def _get_requested_clients(self, url, smuggled_data):
2339 requested_clients = []
2340 allowed_clients = sorted(
2341 [client for client in INNERTUBE_CLIENTS.keys() if client[:1] != '_'],
2342 key=lambda client: INNERTUBE_CLIENTS[client]['priority'], reverse=True)
2343 for client in self._configuration_arg('player_client'):
2344 if client in allowed_clients:
2345 requested_clients.append(client)
2346 elif client == 'all':
2347 requested_clients.extend(allowed_clients)
2348 else:
2349 self.report_warning(f'Skipping unsupported client {client}')
2350 if not requested_clients:
2351 requested_clients = ['android', 'web']
2352
2353 if smuggled_data.get('is_music_url') or self.is_music_url(url):
2354 requested_clients.extend(
2355 f'{client}_music' for client in requested_clients if f'{client}_music' in INNERTUBE_CLIENTS)
2356
2357 return orderedSet(requested_clients)
2358
2359 def _extract_player_ytcfg(self, client, video_id):
2360 url = {
2361 'web_music': 'https://music.youtube.com',
2362 'web_embedded': f'https://www.youtube.com/embed/{video_id}?html5=1'
2363 }.get(client)
2364 if not url:
2365 return {}
2366 webpage = self._download_webpage(url, video_id, fatal=False, note=f'Downloading {client} config')
2367 return self.extract_ytcfg(video_id, webpage) or {}
2368
2369 def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg):
2370 initial_pr = None
2371 if webpage:
2372 initial_pr = self._extract_yt_initial_variable(
2373 webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
2374 video_id, 'initial player response')
2375
2376 original_clients = clients
2377 clients = clients[::-1]
2378 prs = []
2379
2380 def append_client(client_name):
2381 if client_name in INNERTUBE_CLIENTS and client_name not in original_clients:
2382 clients.append(client_name)
2383
2384 # Android player_response does not have microFormats which are needed for
2385 # extraction of some data. So we return the initial_pr with formats
2386 # stripped out even if not requested by the user
2387 # See: https://github.com/yt-dlp/yt-dlp/issues/501
2388 if initial_pr:
2389 pr = dict(initial_pr)
2390 pr['streamingData'] = None
2391 prs.append(pr)
2392
2393 last_error = None
2394 tried_iframe_fallback = False
2395 player_url = None
2396 while clients:
2397 client = clients.pop()
2398 player_ytcfg = master_ytcfg if client == 'web' else {}
2399 if 'configs' not in self._configuration_arg('player_skip'):
2400 player_ytcfg = self._extract_player_ytcfg(client, video_id) or player_ytcfg
2401
2402 player_url = player_url or self._extract_player_url(master_ytcfg, player_ytcfg, webpage=webpage)
2403 require_js_player = self._get_default_ytcfg(client).get('REQUIRE_JS_PLAYER')
2404 if 'js' in self._configuration_arg('player_skip'):
2405 require_js_player = False
2406 player_url = None
2407
2408 if not player_url and not tried_iframe_fallback and require_js_player:
2409 player_url = self._download_player_url(video_id)
2410 tried_iframe_fallback = True
2411
2412 try:
2413 pr = initial_pr if client == 'web' and initial_pr else self._extract_player_response(
2414 client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr)
2415 except ExtractorError as e:
2416 if last_error:
2417 self.report_warning(last_error)
2418 last_error = e
2419 continue
2420
2421 if pr:
2422 prs.append(pr)
2423
2424 # creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in
2425 if client.endswith('_agegate') and self._is_unplayable(pr) and self.is_authenticated:
2426 append_client(client.replace('_agegate', '_creator'))
2427 elif self._is_agegated(pr):
2428 append_client(f'{client}_agegate')
2429
2430 if last_error:
2431 if not len(prs):
2432 raise last_error
2433 self.report_warning(last_error)
2434 return prs, player_url
2435
2436 def _extract_formats(self, streaming_data, video_id, player_url, is_live):
2437 itags, stream_ids = [], []
2438 itag_qualities, res_qualities = {}, {}
2439 q = qualities([
2440 # Normally tiny is the smallest video-only formats. But
2441 # audio-only formats with unknown quality may get tagged as tiny
2442 'tiny',
2443 'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats
2444 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
2445 ])
2446 streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[])
2447
2448 for fmt in streaming_formats:
2449 if fmt.get('targetDurationSec') or fmt.get('drmFamilies'):
2450 continue
2451
2452 itag = str_or_none(fmt.get('itag'))
2453 audio_track = fmt.get('audioTrack') or {}
2454 stream_id = '%s.%s' % (itag or '', audio_track.get('id', ''))
2455 if stream_id in stream_ids:
2456 continue
2457
2458 quality = fmt.get('quality')
2459 height = int_or_none(fmt.get('height'))
2460 if quality == 'tiny' or not quality:
2461 quality = fmt.get('audioQuality', '').lower() or quality
2462 # The 3gp format (17) in android client has a quality of "small",
2463 # but is actually worse than other formats
2464 if itag == '17':
2465 quality = 'tiny'
2466 if quality:
2467 if itag:
2468 itag_qualities[itag] = quality
2469 if height:
2470 res_qualities[height] = quality
2471 # FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
2472 # (adding `&sq=0` to the URL) and parsing emsg box to determine the
2473 # number of fragment that would subsequently requested with (`&sq=N`)
2474 if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
2475 continue
2476
2477 fmt_url = fmt.get('url')
2478 if not fmt_url:
2479 sc = compat_parse_qs(fmt.get('signatureCipher'))
2480 fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0]))
2481 encrypted_sig = try_get(sc, lambda x: x['s'][0])
2482 if not (sc and fmt_url and encrypted_sig):
2483 continue
2484 if not player_url:
2485 continue
2486 signature = self._decrypt_signature(sc['s'][0], video_id, player_url)
2487 sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
2488 fmt_url += '&' + sp + '=' + signature
2489
2490 query = parse_qs(fmt_url)
2491 throttled = False
2492 if query.get('ratebypass') != ['yes'] and query.get('n'):
2493 try:
2494 fmt_url = update_url_query(fmt_url, {
2495 'n': self._decrypt_nsig(query['n'][0], video_id, player_url)})
2496 except ExtractorError as e:
2497 self.report_warning(f'nsig extraction failed: You may experience throttling for some formats\n{e}', only_once=True)
2498 throttled = True
2499
2500 if itag:
2501 itags.append(itag)
2502 stream_ids.append(stream_id)
2503
2504 tbr = float_or_none(
2505 fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
2506 dct = {
2507 'asr': int_or_none(fmt.get('audioSampleRate')),
2508 'filesize': int_or_none(fmt.get('contentLength')),
2509 'format_id': itag,
2510 'format_note': ', '.join(filter(None, (
2511 '%s%s' % (audio_track.get('displayName') or '',
2512 ' (default)' if audio_track.get('audioIsDefault') else ''),
2513 fmt.get('qualityLabel') or quality.replace('audio_quality_', ''),
2514 throttled and 'THROTTLED'))),
2515 'source_preference': -10 if not throttled else -1,
2516 'fps': int_or_none(fmt.get('fps')),
2517 'height': height,
2518 'quality': q(quality),
2519 'tbr': tbr,
2520 'url': fmt_url,
2521 'width': int_or_none(fmt.get('width')),
2522 'language': audio_track.get('id', '').split('.')[0],
2523 'language_preference': 1 if audio_track.get('audioIsDefault') else -1,
2524 }
2525 mime_mobj = re.match(
2526 r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
2527 if mime_mobj:
2528 dct['ext'] = mimetype2ext(mime_mobj.group(1))
2529 dct.update(parse_codecs(mime_mobj.group(2)))
2530 no_audio = dct.get('acodec') == 'none'
2531 no_video = dct.get('vcodec') == 'none'
2532 if no_audio:
2533 dct['vbr'] = tbr
2534 if no_video:
2535 dct['abr'] = tbr
2536 if no_audio or no_video:
2537 dct['downloader_options'] = {
2538 # Youtube throttles chunks >~10M
2539 'http_chunk_size': 10485760,
2540 }
2541 if dct.get('ext'):
2542 dct['container'] = dct['ext'] + '_dash'
2543 yield dct
2544
2545 skip_manifests = self._configuration_arg('skip')
2546 get_dash = (
2547 (not is_live or self._configuration_arg('include_live_dash'))
2548 and 'dash' not in skip_manifests and self.get_param('youtube_include_dash_manifest', True))
2549 get_hls = 'hls' not in skip_manifests and self.get_param('youtube_include_hls_manifest', True)
2550
2551 def guess_quality(f):
2552 for val, qdict in ((f.get('format_id'), itag_qualities), (f.get('height'), res_qualities)):
2553 if val in qdict:
2554 return q(qdict[val])
2555 return -1
2556
2557 for sd in streaming_data:
2558 hls_manifest_url = get_hls and sd.get('hlsManifestUrl')
2559 if hls_manifest_url:
2560 for f in self._extract_m3u8_formats(hls_manifest_url, video_id, 'mp4', fatal=False):
2561 itag = self._search_regex(
2562 r'/itag/(\d+)', f['url'], 'itag', default=None)
2563 if itag in itags:
2564 itag += '-hls'
2565 if itag in itags:
2566 continue
2567 if itag:
2568 f['format_id'] = itag
2569 itags.append(itag)
2570 f['quality'] = guess_quality(f)
2571 yield f
2572
2573 dash_manifest_url = get_dash and sd.get('dashManifestUrl')
2574 if dash_manifest_url:
2575 for f in self._extract_mpd_formats(dash_manifest_url, video_id, fatal=False):
2576 itag = f['format_id']
2577 if itag in itags:
2578 itag += '-dash'
2579 if itag in itags:
2580 continue
2581 if itag:
2582 f['format_id'] = itag
2583 itags.append(itag)
2584 f['quality'] = guess_quality(f)
2585 filesize = int_or_none(self._search_regex(
2586 r'/clen/(\d+)', f.get('fragment_base_url')
2587 or f['url'], 'file size', default=None))
2588 if filesize:
2589 f['filesize'] = filesize
2590 yield f
2591
2592 def _real_extract(self, url):
2593 url, smuggled_data = unsmuggle_url(url, {})
2594 video_id = self._match_id(url)
2595
2596 base_url = self.http_scheme() + '//www.youtube.com/'
2597 webpage_url = base_url + 'watch?v=' + video_id
2598 webpage = None
2599 if 'webpage' not in self._configuration_arg('player_skip'):
2600 webpage = self._download_webpage(
2601 webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
2602
2603 master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg()
2604
2605 player_responses, player_url = self._extract_player_responses(
2606 self._get_requested_clients(url, smuggled_data),
2607 video_id, webpage, master_ytcfg)
2608
2609 get_first = lambda obj, keys, **kwargs: traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False)
2610
2611 playability_statuses = traverse_obj(
2612 player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[])
2613
2614 trailer_video_id = get_first(
2615 playability_statuses,
2616 ('errorScreen', 'playerLegacyDesktopYpcTrailerRenderer', 'trailerVideoId'),
2617 expected_type=str)
2618 if trailer_video_id:
2619 return self.url_result(
2620 trailer_video_id, self.ie_key(), trailer_video_id)
2621
2622 search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None))
2623 if webpage else (lambda x: None))
2624
2625 video_details = traverse_obj(
2626 player_responses, (..., 'videoDetails'), expected_type=dict, default=[])
2627 microformats = traverse_obj(
2628 player_responses, (..., 'microformat', 'playerMicroformatRenderer'),
2629 expected_type=dict, default=[])
2630 video_title = (
2631 get_first(video_details, 'title')
2632 or self._get_text(microformats, (..., 'title'))
2633 or search_meta(['og:title', 'twitter:title', 'title']))
2634 video_description = get_first(video_details, 'shortDescription')
2635
2636 if not smuggled_data.get('force_singlefeed', False):
2637 if not self.get_param('noplaylist'):
2638 multifeed_metadata_list = get_first(
2639 player_responses,
2640 ('multicamera', 'playerLegacyMulticameraRenderer', 'metadataList'),
2641 expected_type=str)
2642 if multifeed_metadata_list:
2643 entries = []
2644 feed_ids = []
2645 for feed in multifeed_metadata_list.split(','):
2646 # Unquote should take place before split on comma (,) since textual
2647 # fields may contain comma as well (see
2648 # https://github.com/ytdl-org/youtube-dl/issues/8536)
2649 feed_data = compat_parse_qs(
2650 compat_urllib_parse_unquote_plus(feed))
2651
2652 def feed_entry(name):
2653 return try_get(
2654 feed_data, lambda x: x[name][0], compat_str)
2655
2656 feed_id = feed_entry('id')
2657 if not feed_id:
2658 continue
2659 feed_title = feed_entry('title')
2660 title = video_title
2661 if feed_title:
2662 title += ' (%s)' % feed_title
2663 entries.append({
2664 '_type': 'url_transparent',
2665 'ie_key': 'Youtube',
2666 'url': smuggle_url(
2667 '%swatch?v=%s' % (base_url, feed_data['id'][0]),
2668 {'force_singlefeed': True}),
2669 'title': title,
2670 })
2671 feed_ids.append(feed_id)
2672 self.to_screen(
2673 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
2674 % (', '.join(feed_ids), video_id))
2675 return self.playlist_result(
2676 entries, video_id, video_title, video_description)
2677 else:
2678 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
2679
2680 live_broadcast_details = traverse_obj(microformats, (..., 'liveBroadcastDetails'))
2681 is_live = get_first(video_details, 'isLive')
2682 if is_live is None:
2683 is_live = get_first(live_broadcast_details, 'isLiveNow')
2684
2685 streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[])
2686 formats = list(self._extract_formats(streaming_data, video_id, player_url, is_live))
2687
2688 if not formats:
2689 if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')):
2690 self.report_drm(video_id)
2691 pemr = get_first(
2692 playability_statuses,
2693 ('errorScreen', 'playerErrorMessageRenderer'), expected_type=dict) or {}
2694 reason = self._get_text(pemr, 'reason') or get_first(playability_statuses, 'reason')
2695 subreason = clean_html(self._get_text(pemr, 'subreason') or '')
2696 if subreason:
2697 if subreason == 'The uploader has not made this video available in your country.':
2698 countries = get_first(microformats, 'availableCountries')
2699 if not countries:
2700 regions_allowed = search_meta('regionsAllowed')
2701 countries = regions_allowed.split(',') if regions_allowed else None
2702 self.raise_geo_restricted(subreason, countries, metadata_available=True)
2703 reason += f'. {subreason}'
2704 if reason:
2705 self.raise_no_formats(reason, expected=True)
2706
2707 # Source is given priority since formats that throttle are given lower source_preference
2708 # When throttling issue is fully fixed, remove this
2709 self._sort_formats(formats, ('quality', 'res', 'fps', 'hdr:12', 'source', 'codec:vp9.2', 'lang'))
2710
2711 keywords = get_first(video_details, 'keywords', expected_type=list) or []
2712 if not keywords and webpage:
2713 keywords = [
2714 unescapeHTML(m.group('content'))
2715 for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
2716 for keyword in keywords:
2717 if keyword.startswith('yt:stretch='):
2718 mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
2719 if mobj:
2720 # NB: float is intentional for forcing float division
2721 w, h = (float(v) for v in mobj.groups())
2722 if w > 0 and h > 0:
2723 ratio = w / h
2724 for f in formats:
2725 if f.get('vcodec') != 'none':
2726 f['stretched_ratio'] = ratio
2727 break
2728
2729 thumbnails = []
2730 thumbnail_dicts = traverse_obj(
2731 (video_details, microformats), (..., ..., 'thumbnail', 'thumbnails', ...),
2732 expected_type=dict, default=[])
2733 for thumbnail in thumbnail_dicts:
2734 thumbnail_url = thumbnail.get('url')
2735 if not thumbnail_url:
2736 continue
2737 # Sometimes youtube gives a wrong thumbnail URL. See:
2738 # https://github.com/yt-dlp/yt-dlp/issues/233
2739 # https://github.com/ytdl-org/youtube-dl/issues/28023
2740 if 'maxresdefault' in thumbnail_url:
2741 thumbnail_url = thumbnail_url.split('?')[0]
2742 thumbnails.append({
2743 'url': thumbnail_url,
2744 'height': int_or_none(thumbnail.get('height')),
2745 'width': int_or_none(thumbnail.get('width')),
2746 })
2747 thumbnail_url = search_meta(['og:image', 'twitter:image'])
2748 if thumbnail_url:
2749 thumbnails.append({
2750 'url': thumbnail_url,
2751 })
2752 original_thumbnails = thumbnails.copy()
2753
2754 # The best resolution thumbnails sometimes does not appear in the webpage
2755 # See: https://github.com/ytdl-org/youtube-dl/issues/29049, https://github.com/yt-dlp/yt-dlp/issues/340
2756 # List of possible thumbnails - Ref: <https://stackoverflow.com/a/20542029>
2757 thumbnail_names = [
2758 'maxresdefault', 'hq720', 'sddefault', 'sd1', 'sd2', 'sd3',
2759 'hqdefault', 'hq1', 'hq2', 'hq3', '0',
2760 'mqdefault', 'mq1', 'mq2', 'mq3',
2761 'default', '1', '2', '3'
2762 ]
2763 n_thumbnail_names = len(thumbnail_names)
2764 thumbnails.extend({
2765 'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format(
2766 video_id=video_id, name=name, ext=ext,
2767 webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''),
2768 } for name in thumbnail_names for ext in ('webp', 'jpg'))
2769 for thumb in thumbnails:
2770 i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names)
2771 thumb['preference'] = (0 if '.webp' in thumb['url'] else -1) - (2 * i)
2772 self._remove_duplicate_formats(thumbnails)
2773 self._downloader._sort_thumbnails(original_thumbnails)
2774
2775 category = get_first(microformats, 'category') or search_meta('genre')
2776 channel_id = str_or_none(
2777 get_first(video_details, 'channelId')
2778 or get_first(microformats, 'externalChannelId')
2779 or search_meta('channelId'))
2780 duration = int_or_none(
2781 get_first(video_details, 'lengthSeconds')
2782 or get_first(microformats, 'lengthSeconds')
2783 or parse_duration(search_meta('duration'))) or None
2784 owner_profile_url = get_first(microformats, 'ownerProfileUrl')
2785
2786 live_content = get_first(video_details, 'isLiveContent')
2787 is_upcoming = get_first(video_details, 'isUpcoming')
2788 if is_live is None:
2789 if is_upcoming or live_content is False:
2790 is_live = False
2791 if is_upcoming is None and (live_content or is_live):
2792 is_upcoming = False
2793 live_starttime = parse_iso8601(get_first(live_broadcast_details, 'startTimestamp'))
2794 live_endtime = parse_iso8601(get_first(live_broadcast_details, 'endTimestamp'))
2795 if not duration and live_endtime and live_starttime:
2796 duration = live_endtime - live_starttime
2797
2798 info = {
2799 'id': video_id,
2800 'title': self._live_title(video_title) if is_live else video_title,
2801 'formats': formats,
2802 'thumbnails': thumbnails,
2803 # The best thumbnail that we are sure exists. Prevents unnecessary
2804 # URL checking if user don't care about getting the best possible thumbnail
2805 'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')),
2806 'description': video_description,
2807 'upload_date': unified_strdate(
2808 get_first(microformats, 'uploadDate')
2809 or search_meta('uploadDate')),
2810 'uploader': get_first(video_details, 'author'),
2811 'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
2812 'uploader_url': owner_profile_url,
2813 'channel_id': channel_id,
2814 'channel_url': f'https://www.youtube.com/channel/{channel_id}' if channel_id else None,
2815 'duration': duration,
2816 'view_count': int_or_none(
2817 get_first((video_details, microformats), (..., 'viewCount'))
2818 or search_meta('interactionCount')),
2819 'average_rating': float_or_none(get_first(video_details, 'averageRating')),
2820 'age_limit': 18 if (
2821 get_first(microformats, 'isFamilySafe') is False
2822 or search_meta('isFamilyFriendly') == 'false'
2823 or search_meta('og:restrictions:age') == '18+') else 0,
2824 'webpage_url': webpage_url,
2825 'categories': [category] if category else None,
2826 'tags': keywords,
2827 'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'),
2828 'is_live': is_live,
2829 'was_live': (False if is_live or is_upcoming or live_content is False
2830 else None if is_live is None or is_upcoming is None
2831 else live_content),
2832 'live_status': 'is_upcoming' if is_upcoming else None, # rest will be set by YoutubeDL
2833 'release_timestamp': live_starttime,
2834 }
2835
2836 pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict)
2837 if pctr:
2838 def get_lang_code(track):
2839 return (remove_start(track.get('vssId') or '', '.').replace('.', '-')
2840 or track.get('languageCode'))
2841
2842 # Converted into dicts to remove duplicates
2843 captions = {
2844 get_lang_code(sub): sub
2845 for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
2846 translation_languages = {
2847 lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
2848 for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
2849
2850 def process_language(container, base_url, lang_code, sub_name, query):
2851 lang_subs = container.setdefault(lang_code, [])
2852 for fmt in self._SUBTITLE_FORMATS:
2853 query.update({
2854 'fmt': fmt,
2855 })
2856 lang_subs.append({
2857 'ext': fmt,
2858 'url': update_url_query(base_url, query),
2859 'name': sub_name,
2860 })
2861
2862 subtitles, automatic_captions = {}, {}
2863 for lang_code, caption_track in captions.items():
2864 base_url = caption_track.get('baseUrl')
2865 if not base_url:
2866 continue
2867 lang_name = self._get_text(caption_track, 'name', max_runs=1)
2868 if caption_track.get('kind') != 'asr':
2869 if not lang_code:
2870 continue
2871 process_language(
2872 subtitles, base_url, lang_code, lang_name, {})
2873 if not caption_track.get('isTranslatable'):
2874 continue
2875 for trans_code, trans_name in translation_languages.items():
2876 if not trans_code:
2877 continue
2878 if caption_track.get('kind') != 'asr':
2879 trans_code += f'-{lang_code}'
2880 trans_name += format_field(lang_name, template=' from %s')
2881 process_language(
2882 automatic_captions, base_url, trans_code, trans_name, {'tlang': trans_code})
2883 info['automatic_captions'] = automatic_captions
2884 info['subtitles'] = subtitles
2885
2886 parsed_url = compat_urllib_parse_urlparse(url)
2887 for component in [parsed_url.fragment, parsed_url.query]:
2888 query = compat_parse_qs(component)
2889 for k, v in query.items():
2890 for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
2891 d_k += '_time'
2892 if d_k not in info and k in s_ks:
2893 info[d_k] = parse_duration(query[k][0])
2894
2895 # Youtube Music Auto-generated description
2896 if video_description:
2897 mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
2898 if mobj:
2899 release_year = mobj.group('release_year')
2900 release_date = mobj.group('release_date')
2901 if release_date:
2902 release_date = release_date.replace('-', '')
2903 if not release_year:
2904 release_year = release_date[:4]
2905 info.update({
2906 'album': mobj.group('album'.strip()),
2907 'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
2908 'track': mobj.group('track').strip(),
2909 'release_date': release_date,
2910 'release_year': int_or_none(release_year),
2911 })
2912
2913 initial_data = None
2914 if webpage:
2915 initial_data = self._extract_yt_initial_variable(
2916 webpage, self._YT_INITIAL_DATA_RE, video_id,
2917 'yt initial data')
2918 if not initial_data:
2919 query = {'videoId': video_id}
2920 query.update(self._get_checkok_params())
2921 initial_data = self._extract_response(
2922 item_id=video_id, ep='next', fatal=False,
2923 ytcfg=master_ytcfg, query=query,
2924 headers=self.generate_api_headers(ytcfg=master_ytcfg),
2925 note='Downloading initial data API JSON')
2926
2927 try:
2928 # This will error if there is no livechat
2929 initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
2930 info.setdefault('subtitles', {})['live_chat'] = [{
2931 'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies
2932 'video_id': video_id,
2933 'ext': 'json',
2934 'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay',
2935 }]
2936 except (KeyError, IndexError, TypeError):
2937 pass
2938
2939 if initial_data:
2940 info['chapters'] = (
2941 self._extract_chapters_from_json(initial_data, duration)
2942 or self._extract_chapters_from_engagement_panel(initial_data, duration)
2943 or None)
2944
2945 contents = try_get(
2946 initial_data,
2947 lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'],
2948 list) or []
2949 for content in contents:
2950 vpir = content.get('videoPrimaryInfoRenderer')
2951 if vpir:
2952 stl = vpir.get('superTitleLink')
2953 if stl:
2954 stl = self._get_text(stl)
2955 if try_get(
2956 vpir,
2957 lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
2958 info['location'] = stl
2959 else:
2960 mobj = re.search(r'(.+?)\s*S(\d+)\s*•\s*E(\d+)', stl)
2961 if mobj:
2962 info.update({
2963 'series': mobj.group(1),
2964 'season_number': int(mobj.group(2)),
2965 'episode_number': int(mobj.group(3)),
2966 })
2967 for tlb in (try_get(
2968 vpir,
2969 lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
2970 list) or []):
2971 tbr = tlb.get('toggleButtonRenderer') or {}
2972 for getter, regex in [(
2973 lambda x: x['defaultText']['accessibility']['accessibilityData'],
2974 r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
2975 lambda x: x['accessibility'],
2976 lambda x: x['accessibilityData']['accessibilityData'],
2977 ], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
2978 label = (try_get(tbr, getter, dict) or {}).get('label')
2979 if label:
2980 mobj = re.match(regex, label)
2981 if mobj:
2982 info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
2983 break
2984 sbr_tooltip = try_get(
2985 vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
2986 if sbr_tooltip:
2987 like_count, dislike_count = sbr_tooltip.split(' / ')
2988 info.update({
2989 'like_count': str_to_int(like_count),
2990 'dislike_count': str_to_int(dislike_count),
2991 })
2992 vsir = content.get('videoSecondaryInfoRenderer')
2993 if vsir:
2994 info['channel'] = self._get_text(vsir, ('owner', 'videoOwnerRenderer', 'title'))
2995 rows = try_get(
2996 vsir,
2997 lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
2998 list) or []
2999 multiple_songs = False
3000 for row in rows:
3001 if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
3002 multiple_songs = True
3003 break
3004 for row in rows:
3005 mrr = row.get('metadataRowRenderer') or {}
3006 mrr_title = mrr.get('title')
3007 if not mrr_title:
3008 continue
3009 mrr_title = self._get_text(mrr, 'title')
3010 mrr_contents_text = self._get_text(mrr, ('contents', 0))
3011 if mrr_title == 'License':
3012 info['license'] = mrr_contents_text
3013 elif not multiple_songs:
3014 if mrr_title == 'Album':
3015 info['album'] = mrr_contents_text
3016 elif mrr_title == 'Artist':
3017 info['artist'] = mrr_contents_text
3018 elif mrr_title == 'Song':
3019 info['track'] = mrr_contents_text
3020
3021 fallbacks = {
3022 'channel': 'uploader',
3023 'channel_id': 'uploader_id',
3024 'channel_url': 'uploader_url',
3025 }
3026 for to, frm in fallbacks.items():
3027 if not info.get(to):
3028 info[to] = info.get(frm)
3029
3030 for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
3031 v = info.get(s_k)
3032 if v:
3033 info[d_k] = v
3034
3035 is_private = get_first(video_details, 'isPrivate', expected_type=bool)
3036 is_unlisted = get_first(microformats, 'isUnlisted', expected_type=bool)
3037 is_membersonly = None
3038 is_premium = None
3039 if initial_data and is_private is not None:
3040 is_membersonly = False
3041 is_premium = False
3042 contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
3043 badge_labels = set()
3044 for content in contents:
3045 if not isinstance(content, dict):
3046 continue
3047 badge_labels.update(self._extract_badges(content.get('videoPrimaryInfoRenderer')))
3048 for badge_label in badge_labels:
3049 if badge_label.lower() == 'members only':
3050 is_membersonly = True
3051 elif badge_label.lower() == 'premium':
3052 is_premium = True
3053 elif badge_label.lower() == 'unlisted':
3054 is_unlisted = True
3055
3056 info['availability'] = self._availability(
3057 is_private=is_private,
3058 needs_premium=is_premium,
3059 needs_subscription=is_membersonly,
3060 needs_auth=info['age_limit'] >= 18,
3061 is_unlisted=None if is_private is None else is_unlisted)
3062
3063 info['__post_extractor'] = self.extract_comments(master_ytcfg, video_id, contents, webpage)
3064
3065 self.mark_watched(video_id, player_responses)
3066
3067 return info
3068
3069
3070 class YoutubeTabIE(YoutubeBaseInfoExtractor):
3071 IE_DESC = 'YouTube Tabs'
3072 _VALID_URL = r'''(?x)
3073 https?://
3074 (?:\w+\.)?
3075 (?:
3076 youtube(?:kids)?\.com|
3077 invidio\.us
3078 )/
3079 (?:
3080 (?P<channel_type>channel|c|user|browse)/|
3081 (?P<not_channel>
3082 feed/|hashtag/|
3083 (?:playlist|watch)\?.*?\blist=
3084 )|
3085 (?!(?:%s)\b) # Direct URLs
3086 )
3087 (?P<id>[^/?\#&]+)
3088 ''' % YoutubeBaseInfoExtractor._RESERVED_NAMES
3089 IE_NAME = 'youtube:tab'
3090
3091 _TESTS = [{
3092 'note': 'playlists, multipage',
3093 'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
3094 'playlist_mincount': 94,
3095 'info_dict': {
3096 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
3097 'title': 'Игорь Клейнер - Playlists',
3098 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
3099 'uploader': 'Игорь Клейнер',
3100 'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
3101 },
3102 }, {
3103 'note': 'playlists, multipage, different order',
3104 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
3105 'playlist_mincount': 94,
3106 'info_dict': {
3107 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
3108 'title': 'Игорь Клейнер - Playlists',
3109 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
3110 'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
3111 'uploader': 'Игорь Клейнер',
3112 },
3113 }, {
3114 'note': 'playlists, series',
3115 'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
3116 'playlist_mincount': 5,
3117 'info_dict': {
3118 'id': 'UCYO_jab_esuFRV4b17AJtAw',
3119 'title': '3Blue1Brown - Playlists',
3120 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
3121 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
3122 'uploader': '3Blue1Brown',
3123 },
3124 }, {
3125 'note': 'playlists, singlepage',
3126 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
3127 'playlist_mincount': 4,
3128 'info_dict': {
3129 'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
3130 'title': 'ThirstForScience - Playlists',
3131 'description': 'md5:609399d937ea957b0f53cbffb747a14c',
3132 'uploader': 'ThirstForScience',
3133 'uploader_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
3134 }
3135 }, {
3136 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
3137 'only_matching': True,
3138 }, {
3139 'note': 'basic, single video playlist',
3140 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
3141 'info_dict': {
3142 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
3143 'uploader': 'Sergey M.',
3144 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
3145 'title': 'youtube-dl public playlist',
3146 },
3147 'playlist_count': 1,
3148 }, {
3149 'note': 'empty playlist',
3150 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
3151 'info_dict': {
3152 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
3153 'uploader': 'Sergey M.',
3154 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
3155 'title': 'youtube-dl empty playlist',
3156 },
3157 'playlist_count': 0,
3158 }, {
3159 'note': 'Home tab',
3160 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
3161 'info_dict': {
3162 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3163 'title': 'lex will - Home',
3164 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3165 'uploader': 'lex will',
3166 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3167 },
3168 'playlist_mincount': 2,
3169 }, {
3170 'note': 'Videos tab',
3171 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
3172 'info_dict': {
3173 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3174 'title': 'lex will - Videos',
3175 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3176 'uploader': 'lex will',
3177 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3178 },
3179 'playlist_mincount': 975,
3180 }, {
3181 'note': 'Videos tab, sorted by popular',
3182 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
3183 'info_dict': {
3184 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3185 'title': 'lex will - Videos',
3186 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3187 'uploader': 'lex will',
3188 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3189 },
3190 'playlist_mincount': 199,
3191 }, {
3192 'note': 'Playlists tab',
3193 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
3194 'info_dict': {
3195 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3196 'title': 'lex will - Playlists',
3197 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3198 'uploader': 'lex will',
3199 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3200 },
3201 'playlist_mincount': 17,
3202 }, {
3203 'note': 'Community tab',
3204 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
3205 'info_dict': {
3206 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3207 'title': 'lex will - Community',
3208 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3209 'uploader': 'lex will',
3210 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3211 },
3212 'playlist_mincount': 18,
3213 }, {
3214 'note': 'Channels tab',
3215 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
3216 'info_dict': {
3217 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3218 'title': 'lex will - Channels',
3219 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3220 'uploader': 'lex will',
3221 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3222 },
3223 'playlist_mincount': 12,
3224 }, {
3225 'note': 'Search tab',
3226 'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
3227 'playlist_mincount': 40,
3228 'info_dict': {
3229 'id': 'UCYO_jab_esuFRV4b17AJtAw',
3230 'title': '3Blue1Brown - Search - linear algebra',
3231 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
3232 'uploader': '3Blue1Brown',
3233 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
3234 },
3235 }, {
3236 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
3237 'only_matching': True,
3238 }, {
3239 'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
3240 'only_matching': True,
3241 }, {
3242 'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
3243 'only_matching': True,
3244 }, {
3245 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
3246 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
3247 'info_dict': {
3248 'title': '29C3: Not my department',
3249 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
3250 'uploader': 'Christiaan008',
3251 'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
3252 'description': 'md5:a14dc1a8ef8307a9807fe136a0660268',
3253 },
3254 'playlist_count': 96,
3255 }, {
3256 'note': 'Large playlist',
3257 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
3258 'info_dict': {
3259 'title': 'Uploads from Cauchemar',
3260 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
3261 'uploader': 'Cauchemar',
3262 'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
3263 },
3264 'playlist_mincount': 1123,
3265 }, {
3266 'note': 'even larger playlist, 8832 videos',
3267 'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
3268 'only_matching': True,
3269 }, {
3270 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
3271 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
3272 'info_dict': {
3273 'title': 'Uploads from Interstellar Movie',
3274 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
3275 'uploader': 'Interstellar Movie',
3276 'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
3277 },
3278 'playlist_mincount': 21,
3279 }, {
3280 'note': 'Playlist with "show unavailable videos" button',
3281 'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q',
3282 'info_dict': {
3283 'title': 'Uploads from Phim Siêu Nhân Nhật Bản',
3284 'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q',
3285 'uploader': 'Phim Siêu Nhân Nhật Bản',
3286 'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
3287 },
3288 'playlist_mincount': 200,
3289 }, {
3290 'note': 'Playlist with unavailable videos in page 7',
3291 'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w',
3292 'info_dict': {
3293 'title': 'Uploads from BlankTV',
3294 'id': 'UU8l9frL61Yl5KFOl87nIm2w',
3295 'uploader': 'BlankTV',
3296 'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w',
3297 },
3298 'playlist_mincount': 1000,
3299 }, {
3300 'note': 'https://github.com/ytdl-org/youtube-dl/issues/21844',
3301 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
3302 'info_dict': {
3303 'title': 'Data Analysis with Dr Mike Pound',
3304 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
3305 'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
3306 'uploader': 'Computerphile',
3307 'description': 'md5:7f567c574d13d3f8c0954d9ffee4e487',
3308 },
3309 'playlist_mincount': 11,
3310 }, {
3311 'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
3312 'only_matching': True,
3313 }, {
3314 'note': 'Playlist URL that does not actually serve a playlist',
3315 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
3316 'info_dict': {
3317 'id': 'FqZTN594JQw',
3318 'ext': 'webm',
3319 'title': "Smiley's People 01 detective, Adventure Series, Action",
3320 'uploader': 'STREEM',
3321 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
3322 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
3323 'upload_date': '20150526',
3324 'license': 'Standard YouTube License',
3325 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
3326 'categories': ['People & Blogs'],
3327 'tags': list,
3328 'view_count': int,
3329 'like_count': int,
3330 'dislike_count': int,
3331 },
3332 'params': {
3333 'skip_download': True,
3334 },
3335 'skip': 'This video is not available.',
3336 'add_ie': [YoutubeIE.ie_key()],
3337 }, {
3338 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
3339 'only_matching': True,
3340 }, {
3341 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
3342 'only_matching': True,
3343 }, {
3344 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
3345 'info_dict': {
3346 'id': '3yImotZU3tw', # This will keep changing
3347 'ext': 'mp4',
3348 'title': compat_str,
3349 'uploader': 'Sky News',
3350 'uploader_id': 'skynews',
3351 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
3352 'upload_date': r're:\d{8}',
3353 'description': compat_str,
3354 'categories': ['News & Politics'],
3355 'tags': list,
3356 'like_count': int,
3357 'dislike_count': int,
3358 },
3359 'params': {
3360 'skip_download': True,
3361 },
3362 'expected_warnings': ['Downloading just video ', 'Ignoring subtitle tracks found in '],
3363 }, {
3364 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
3365 'info_dict': {
3366 'id': 'a48o2S1cPoo',
3367 'ext': 'mp4',
3368 'title': 'The Young Turks - Live Main Show',
3369 'uploader': 'The Young Turks',
3370 'uploader_id': 'TheYoungTurks',
3371 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
3372 'upload_date': '20150715',
3373 'license': 'Standard YouTube License',
3374 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
3375 'categories': ['News & Politics'],
3376 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
3377 'like_count': int,
3378 'dislike_count': int,
3379 },
3380 'params': {
3381 'skip_download': True,
3382 },
3383 'only_matching': True,
3384 }, {
3385 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
3386 'only_matching': True,
3387 }, {
3388 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
3389 'only_matching': True,
3390 }, {
3391 'note': 'A channel that is not live. Should raise error',
3392 'url': 'https://www.youtube.com/user/numberphile/live',
3393 'only_matching': True,
3394 }, {
3395 'url': 'https://www.youtube.com/feed/trending',
3396 'only_matching': True,
3397 }, {
3398 'url': 'https://www.youtube.com/feed/library',
3399 'only_matching': True,
3400 }, {
3401 'url': 'https://www.youtube.com/feed/history',
3402 'only_matching': True,
3403 }, {
3404 'url': 'https://www.youtube.com/feed/subscriptions',
3405 'only_matching': True,
3406 }, {
3407 'url': 'https://www.youtube.com/feed/watch_later',
3408 'only_matching': True,
3409 }, {
3410 'note': 'Recommended - redirects to home page.',
3411 'url': 'https://www.youtube.com/feed/recommended',
3412 'only_matching': True,
3413 }, {
3414 'note': 'inline playlist with not always working continuations',
3415 'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
3416 'only_matching': True,
3417 }, {
3418 'url': 'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8',
3419 'only_matching': True,
3420 }, {
3421 'url': 'https://www.youtube.com/course',
3422 'only_matching': True,
3423 }, {
3424 'url': 'https://www.youtube.com/zsecurity',
3425 'only_matching': True,
3426 }, {
3427 'url': 'http://www.youtube.com/NASAgovVideo/videos',
3428 'only_matching': True,
3429 }, {
3430 'url': 'https://www.youtube.com/TheYoungTurks/live',
3431 'only_matching': True,
3432 }, {
3433 'url': 'https://www.youtube.com/hashtag/cctv9',
3434 'info_dict': {
3435 'id': 'cctv9',
3436 'title': '#cctv9',
3437 },
3438 'playlist_mincount': 350,
3439 }, {
3440 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
3441 'only_matching': True,
3442 }, {
3443 'note': 'Requires Premium: should request additional YTM-info webpage (and have format 141) for videos in playlist',
3444 'url': 'https://music.youtube.com/playlist?list=PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
3445 'only_matching': True
3446 }, {
3447 'note': '/browse/ should redirect to /channel/',
3448 'url': 'https://music.youtube.com/browse/UC1a8OFewdjuLq6KlF8M_8Ng',
3449 'only_matching': True
3450 }, {
3451 'note': 'VLPL, should redirect to playlist?list=PL...',
3452 'url': 'https://music.youtube.com/browse/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
3453 'info_dict': {
3454 'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
3455 'uploader': 'NoCopyrightSounds',
3456 'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
3457 'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
3458 'title': 'NCS Releases',
3459 },
3460 'playlist_mincount': 166,
3461 }, {
3462 'note': 'Topic, should redirect to playlist?list=UU...',
3463 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
3464 'info_dict': {
3465 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
3466 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
3467 'title': 'Uploads from Royalty Free Music - Topic',
3468 'uploader': 'Royalty Free Music - Topic',
3469 },
3470 'expected_warnings': [
3471 'A channel/user page was given',
3472 'The URL does not have a videos tab',
3473 ],
3474 'playlist_mincount': 101,
3475 }, {
3476 'note': 'Topic without a UU playlist',
3477 'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg',
3478 'info_dict': {
3479 'id': 'UCtFRv9O2AHqOZjjynzrv-xg',
3480 'title': 'UCtFRv9O2AHqOZjjynzrv-xg',
3481 },
3482 'expected_warnings': [
3483 'A channel/user page was given',
3484 'The URL does not have a videos tab',
3485 'Falling back to channel URL',
3486 ],
3487 'playlist_mincount': 9,
3488 }, {
3489 'note': 'Youtube music Album',
3490 'url': 'https://music.youtube.com/browse/MPREb_gTAcphH99wE',
3491 'info_dict': {
3492 'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0',
3493 'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
3494 },
3495 'playlist_count': 50,
3496 }, {
3497 'note': 'unlisted single video playlist',
3498 'url': 'https://www.youtube.com/playlist?list=PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
3499 'info_dict': {
3500 'uploader_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
3501 'uploader': 'colethedj',
3502 'id': 'PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
3503 'title': 'yt-dlp unlisted playlist test',
3504 'availability': 'unlisted'
3505 },
3506 'playlist_count': 1,
3507 }, {
3508 'note': 'API Fallback: Recommended - redirects to home page. Requires visitorData',
3509 'url': 'https://www.youtube.com/feed/recommended',
3510 'info_dict': {
3511 'id': 'recommended',
3512 'title': 'recommended',
3513 },
3514 'playlist_mincount': 50,
3515 'params': {
3516 'skip_download': True,
3517 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
3518 },
3519 }, {
3520 'note': 'API Fallback: /videos tab, sorted by oldest first',
3521 'url': 'https://www.youtube.com/user/theCodyReeder/videos?view=0&sort=da&flow=grid',
3522 'info_dict': {
3523 'id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
3524 'title': 'Cody\'sLab - Videos',
3525 'description': 'md5:d083b7c2f0c67ee7a6c74c3e9b4243fa',
3526 'uploader': 'Cody\'sLab',
3527 'uploader_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
3528 },
3529 'playlist_mincount': 650,
3530 'params': {
3531 'skip_download': True,
3532 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
3533 },
3534 }, {
3535 'note': 'API Fallback: Topic, should redirect to playlist?list=UU...',
3536 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
3537 'info_dict': {
3538 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
3539 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
3540 'title': 'Uploads from Royalty Free Music - Topic',
3541 'uploader': 'Royalty Free Music - Topic',
3542 },
3543 'expected_warnings': [
3544 'A channel/user page was given',
3545 'The URL does not have a videos tab',
3546 ],
3547 'playlist_mincount': 101,
3548 'params': {
3549 'skip_download': True,
3550 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
3551 },
3552 }]
3553
3554 @classmethod
3555 def suitable(cls, url):
3556 return False if YoutubeIE.suitable(url) else super(
3557 YoutubeTabIE, cls).suitable(url)
3558
3559 def _extract_channel_id(self, webpage):
3560 channel_id = self._html_search_meta(
3561 'channelId', webpage, 'channel id', default=None)
3562 if channel_id:
3563 return channel_id
3564 channel_url = self._html_search_meta(
3565 ('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
3566 'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
3567 'twitter:app:url:googleplay'), webpage, 'channel url')
3568 return self._search_regex(
3569 r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
3570 channel_url, 'channel id')
3571
3572 @staticmethod
3573 def _extract_basic_item_renderer(item):
3574 # Modified from _extract_grid_item_renderer
3575 known_basic_renderers = (
3576 'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer'
3577 )
3578 for key, renderer in item.items():
3579 if not isinstance(renderer, dict):
3580 continue
3581 elif key in known_basic_renderers:
3582 return renderer
3583 elif key.startswith('grid') and key.endswith('Renderer'):
3584 return renderer
3585
3586 def _grid_entries(self, grid_renderer):
3587 for item in grid_renderer['items']:
3588 if not isinstance(item, dict):
3589 continue
3590 renderer = self._extract_basic_item_renderer(item)
3591 if not isinstance(renderer, dict):
3592 continue
3593 title = self._get_text(renderer, 'title')
3594
3595 # playlist
3596 playlist_id = renderer.get('playlistId')
3597 if playlist_id:
3598 yield self.url_result(
3599 'https://www.youtube.com/playlist?list=%s' % playlist_id,
3600 ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
3601 video_title=title)
3602 continue
3603 # video
3604 video_id = renderer.get('videoId')
3605 if video_id:
3606 yield self._extract_video(renderer)
3607 continue
3608 # channel
3609 channel_id = renderer.get('channelId')
3610 if channel_id:
3611 yield self.url_result(
3612 'https://www.youtube.com/channel/%s' % channel_id,
3613 ie=YoutubeTabIE.ie_key(), video_title=title)
3614 continue
3615 # generic endpoint URL support
3616 ep_url = urljoin('https://www.youtube.com/', try_get(
3617 renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
3618 compat_str))
3619 if ep_url:
3620 for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
3621 if ie.suitable(ep_url):
3622 yield self.url_result(
3623 ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
3624 break
3625
3626 def _shelf_entries_from_content(self, shelf_renderer):
3627 content = shelf_renderer.get('content')
3628 if not isinstance(content, dict):
3629 return
3630 renderer = content.get('gridRenderer') or content.get('expandedShelfContentsRenderer')
3631 if renderer:
3632 # TODO: add support for nested playlists so each shelf is processed
3633 # as separate playlist
3634 # TODO: this includes only first N items
3635 for entry in self._grid_entries(renderer):
3636 yield entry
3637 renderer = content.get('horizontalListRenderer')
3638 if renderer:
3639 # TODO
3640 pass
3641
3642 def _shelf_entries(self, shelf_renderer, skip_channels=False):
3643 ep = try_get(
3644 shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
3645 compat_str)
3646 shelf_url = urljoin('https://www.youtube.com', ep)
3647 if shelf_url:
3648 # Skipping links to another channels, note that checking for
3649 # endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
3650 # will not work
3651 if skip_channels and '/channels?' in shelf_url:
3652 return
3653 title = self._get_text(shelf_renderer, 'title')
3654 yield self.url_result(shelf_url, video_title=title)
3655 # Shelf may not contain shelf URL, fallback to extraction from content
3656 for entry in self._shelf_entries_from_content(shelf_renderer):
3657 yield entry
3658
3659 def _playlist_entries(self, video_list_renderer):
3660 for content in video_list_renderer['contents']:
3661 if not isinstance(content, dict):
3662 continue
3663 renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
3664 if not isinstance(renderer, dict):
3665 continue
3666 video_id = renderer.get('videoId')
3667 if not video_id:
3668 continue
3669 yield self._extract_video(renderer)
3670
3671 def _rich_entries(self, rich_grid_renderer):
3672 renderer = try_get(
3673 rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
3674 video_id = renderer.get('videoId')
3675 if not video_id:
3676 return
3677 yield self._extract_video(renderer)
3678
3679 def _video_entry(self, video_renderer):
3680 video_id = video_renderer.get('videoId')
3681 if video_id:
3682 return self._extract_video(video_renderer)
3683
3684 def _post_thread_entries(self, post_thread_renderer):
3685 post_renderer = try_get(
3686 post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
3687 if not post_renderer:
3688 return
3689 # video attachment
3690 video_renderer = try_get(
3691 post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {}
3692 video_id = video_renderer.get('videoId')
3693 if video_id:
3694 entry = self._extract_video(video_renderer)
3695 if entry:
3696 yield entry
3697 # playlist attachment
3698 playlist_id = try_get(
3699 post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str)
3700 if playlist_id:
3701 yield self.url_result(
3702 'https://www.youtube.com/playlist?list=%s' % playlist_id,
3703 ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
3704 # inline video links
3705 runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
3706 for run in runs:
3707 if not isinstance(run, dict):
3708 continue
3709 ep_url = try_get(
3710 run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
3711 if not ep_url:
3712 continue
3713 if not YoutubeIE.suitable(ep_url):
3714 continue
3715 ep_video_id = YoutubeIE._match_id(ep_url)
3716 if video_id == ep_video_id:
3717 continue
3718 yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id)
3719
3720 def _post_thread_continuation_entries(self, post_thread_continuation):
3721 contents = post_thread_continuation.get('contents')
3722 if not isinstance(contents, list):
3723 return
3724 for content in contents:
3725 renderer = content.get('backstagePostThreadRenderer')
3726 if not isinstance(renderer, dict):
3727 continue
3728 for entry in self._post_thread_entries(renderer):
3729 yield entry
3730
3731 r''' # unused
3732 def _rich_grid_entries(self, contents):
3733 for content in contents:
3734 video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
3735 if video_renderer:
3736 entry = self._video_entry(video_renderer)
3737 if entry:
3738 yield entry
3739 '''
3740 def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data):
3741
3742 def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds
3743 contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
3744 for content in contents:
3745 if not isinstance(content, dict):
3746 continue
3747 is_renderer = try_get(content, lambda x: x['itemSectionRenderer'], dict)
3748 if not is_renderer:
3749 renderer = content.get('richItemRenderer')
3750 if renderer:
3751 for entry in self._rich_entries(renderer):
3752 yield entry
3753 continuation_list[0] = self._extract_continuation(parent_renderer)
3754 continue
3755 isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
3756 for isr_content in isr_contents:
3757 if not isinstance(isr_content, dict):
3758 continue
3759
3760 known_renderers = {
3761 'playlistVideoListRenderer': self._playlist_entries,
3762 'gridRenderer': self._grid_entries,
3763 'shelfRenderer': lambda x: self._shelf_entries(x, tab.get('title') != 'Channels'),
3764 'backstagePostThreadRenderer': self._post_thread_entries,
3765 'videoRenderer': lambda x: [self._video_entry(x)],
3766 }
3767 for key, renderer in isr_content.items():
3768 if key not in known_renderers:
3769 continue
3770 for entry in known_renderers[key](renderer):
3771 if entry:
3772 yield entry
3773 continuation_list[0] = self._extract_continuation(renderer)
3774 break
3775
3776 if not continuation_list[0]:
3777 continuation_list[0] = self._extract_continuation(is_renderer)
3778
3779 if not continuation_list[0]:
3780 continuation_list[0] = self._extract_continuation(parent_renderer)
3781
3782 continuation_list = [None] # Python 2 does not support nonlocal
3783 tab_content = try_get(tab, lambda x: x['content'], dict)
3784 if not tab_content:
3785 return
3786 parent_renderer = (
3787 try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
3788 or try_get(tab_content, lambda x: x['richGridRenderer'], dict) or {})
3789 for entry in extract_entries(parent_renderer):
3790 yield entry
3791 continuation = continuation_list[0]
3792
3793 for page_num in itertools.count(1):
3794 if not continuation:
3795 break
3796 headers = self.generate_api_headers(
3797 ytcfg=ytcfg, account_syncid=account_syncid, visitor_data=visitor_data)
3798 response = self._extract_response(
3799 item_id='%s page %s' % (item_id, page_num),
3800 query=continuation, headers=headers, ytcfg=ytcfg,
3801 check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints'))
3802
3803 if not response:
3804 break
3805 # Extracting updated visitor data is required to prevent an infinite extraction loop in some cases
3806 # See: https://github.com/ytdl-org/youtube-dl/issues/28702
3807 visitor_data = self._extract_visitor_data(response) or visitor_data
3808
3809 known_continuation_renderers = {
3810 'playlistVideoListContinuation': self._playlist_entries,
3811 'gridContinuation': self._grid_entries,
3812 'itemSectionContinuation': self._post_thread_continuation_entries,
3813 'sectionListContinuation': extract_entries, # for feeds
3814 }
3815 continuation_contents = try_get(
3816 response, lambda x: x['continuationContents'], dict) or {}
3817 continuation_renderer = None
3818 for key, value in continuation_contents.items():
3819 if key not in known_continuation_renderers:
3820 continue
3821 continuation_renderer = value
3822 continuation_list = [None]
3823 for entry in known_continuation_renderers[key](continuation_renderer):
3824 yield entry
3825 continuation = continuation_list[0] or self._extract_continuation(continuation_renderer)
3826 break
3827 if continuation_renderer:
3828 continue
3829
3830 known_renderers = {
3831 'gridPlaylistRenderer': (self._grid_entries, 'items'),
3832 'gridVideoRenderer': (self._grid_entries, 'items'),
3833 'gridChannelRenderer': (self._grid_entries, 'items'),
3834 'playlistVideoRenderer': (self._playlist_entries, 'contents'),
3835 'itemSectionRenderer': (extract_entries, 'contents'), # for feeds
3836 'richItemRenderer': (extract_entries, 'contents'), # for hashtag
3837 'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
3838 }
3839 on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
3840 continuation_items = try_get(
3841 on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
3842 continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
3843 video_items_renderer = None
3844 for key, value in continuation_item.items():
3845 if key not in known_renderers:
3846 continue
3847 video_items_renderer = {known_renderers[key][1]: continuation_items}
3848 continuation_list = [None]
3849 for entry in known_renderers[key][0](video_items_renderer):
3850 yield entry
3851 continuation = continuation_list[0] or self._extract_continuation(video_items_renderer)
3852 break
3853 if video_items_renderer:
3854 continue
3855 break
3856
3857 @staticmethod
3858 def _extract_selected_tab(tabs):
3859 for tab in tabs:
3860 renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
3861 if renderer.get('selected') is True:
3862 return renderer
3863 else:
3864 raise ExtractorError('Unable to find selected tab')
3865
3866 @classmethod
3867 def _extract_uploader(cls, data):
3868 uploader = {}
3869 renderer = cls._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {}
3870 owner = try_get(
3871 renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
3872 if owner:
3873 uploader['uploader'] = owner.get('text')
3874 uploader['uploader_id'] = try_get(
3875 owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
3876 uploader['uploader_url'] = urljoin(
3877 'https://www.youtube.com/',
3878 try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
3879 return {k: v for k, v in uploader.items() if v is not None}
3880
3881 def _extract_from_tabs(self, item_id, ytcfg, data, tabs):
3882 playlist_id = title = description = channel_url = channel_name = channel_id = None
3883 thumbnails_list = []
3884 tags = []
3885
3886 selected_tab = self._extract_selected_tab(tabs)
3887 renderer = try_get(
3888 data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
3889 if renderer:
3890 channel_name = renderer.get('title')
3891 channel_url = renderer.get('channelUrl')
3892 channel_id = renderer.get('externalId')
3893 else:
3894 renderer = try_get(
3895 data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
3896
3897 if renderer:
3898 title = renderer.get('title')
3899 description = renderer.get('description', '')
3900 playlist_id = channel_id
3901 tags = renderer.get('keywords', '').split()
3902 thumbnails_list = (
3903 try_get(renderer, lambda x: x['avatar']['thumbnails'], list)
3904 or try_get(
3905 self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer'),
3906 lambda x: x['thumbnailRenderer']['playlistVideoThumbnailRenderer']['thumbnail']['thumbnails'],
3907 list)
3908 or [])
3909
3910 thumbnails = []
3911 for t in thumbnails_list:
3912 if not isinstance(t, dict):
3913 continue
3914 thumbnail_url = url_or_none(t.get('url'))
3915 if not thumbnail_url:
3916 continue
3917 thumbnails.append({
3918 'url': thumbnail_url,
3919 'width': int_or_none(t.get('width')),
3920 'height': int_or_none(t.get('height')),
3921 })
3922 if playlist_id is None:
3923 playlist_id = item_id
3924 if title is None:
3925 title = (
3926 try_get(data, lambda x: x['header']['hashtagHeaderRenderer']['hashtag']['simpleText'])
3927 or playlist_id)
3928 title += format_field(selected_tab, 'title', ' - %s')
3929 title += format_field(selected_tab, 'expandedText', ' - %s')
3930 metadata = {
3931 'playlist_id': playlist_id,
3932 'playlist_title': title,
3933 'playlist_description': description,
3934 'uploader': channel_name,
3935 'uploader_id': channel_id,
3936 'uploader_url': channel_url,
3937 'thumbnails': thumbnails,
3938 'tags': tags,
3939 }
3940 availability = self._extract_availability(data)
3941 if availability:
3942 metadata['availability'] = availability
3943 if not channel_id:
3944 metadata.update(self._extract_uploader(data))
3945 metadata.update({
3946 'channel': metadata['uploader'],
3947 'channel_id': metadata['uploader_id'],
3948 'channel_url': metadata['uploader_url']})
3949 return self.playlist_result(
3950 self._entries(
3951 selected_tab, playlist_id, ytcfg,
3952 self._extract_account_syncid(ytcfg, data),
3953 self._extract_visitor_data(data, ytcfg)),
3954 **metadata)
3955
3956 def _extract_mix_playlist(self, playlist, playlist_id, data, ytcfg):
3957 first_id = last_id = response = None
3958 for page_num in itertools.count(1):
3959 videos = list(self._playlist_entries(playlist))
3960 if not videos:
3961 return
3962 start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1
3963 if start >= len(videos):
3964 return
3965 for video in videos[start:]:
3966 if video['id'] == first_id:
3967 self.to_screen('First video %s found again; Assuming end of Mix' % first_id)
3968 return
3969 yield video
3970 first_id = first_id or videos[0]['id']
3971 last_id = videos[-1]['id']
3972 watch_endpoint = try_get(
3973 playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
3974 headers = self.generate_api_headers(
3975 ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
3976 visitor_data=self._extract_visitor_data(response, data, ytcfg))
3977 query = {
3978 'playlistId': playlist_id,
3979 'videoId': watch_endpoint.get('videoId') or last_id,
3980 'index': watch_endpoint.get('index') or len(videos),
3981 'params': watch_endpoint.get('params') or 'OAE%3D'
3982 }
3983 response = self._extract_response(
3984 item_id='%s page %d' % (playlist_id, page_num),
3985 query=query, ep='next', headers=headers, ytcfg=ytcfg,
3986 check_get_keys='contents'
3987 )
3988 playlist = try_get(
3989 response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
3990
3991 def _extract_from_playlist(self, item_id, url, data, playlist, ytcfg):
3992 title = playlist.get('title') or try_get(
3993 data, lambda x: x['titleText']['simpleText'], compat_str)
3994 playlist_id = playlist.get('playlistId') or item_id
3995
3996 # Delegating everything except mix playlists to regular tab-based playlist URL
3997 playlist_url = urljoin(url, try_get(
3998 playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
3999 compat_str))
4000 if playlist_url and playlist_url != url:
4001 return self.url_result(
4002 playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
4003 video_title=title)
4004
4005 return self.playlist_result(
4006 self._extract_mix_playlist(playlist, playlist_id, data, ytcfg),
4007 playlist_id=playlist_id, playlist_title=title)
4008
4009 def _extract_availability(self, data):
4010 """
4011 Gets the availability of a given playlist/tab.
4012 Note: Unless YouTube tells us explicitly, we do not assume it is public
4013 @param data: response
4014 """
4015 is_private = is_unlisted = None
4016 renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {}
4017 badge_labels = self._extract_badges(renderer)
4018
4019 # Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge
4020 privacy_dropdown_entries = try_get(
4021 renderer, lambda x: x['privacyForm']['dropdownFormFieldRenderer']['dropdown']['dropdownRenderer']['entries'], list) or []
4022 for renderer_dict in privacy_dropdown_entries:
4023 is_selected = try_get(
4024 renderer_dict, lambda x: x['privacyDropdownItemRenderer']['isSelected'], bool) or False
4025 if not is_selected:
4026 continue
4027 label = self._get_text(renderer_dict, ('privacyDropdownItemRenderer', 'label'))
4028 if label:
4029 badge_labels.add(label.lower())
4030 break
4031
4032 for badge_label in badge_labels:
4033 if badge_label == 'unlisted':
4034 is_unlisted = True
4035 elif badge_label == 'private':
4036 is_private = True
4037 elif badge_label == 'public':
4038 is_unlisted = is_private = False
4039 return self._availability(is_private, False, False, False, is_unlisted)
4040
4041 @staticmethod
4042 def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict):
4043 sidebar_renderer = try_get(
4044 data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) or []
4045 for item in sidebar_renderer:
4046 renderer = try_get(item, lambda x: x[info_renderer], expected_type)
4047 if renderer:
4048 return renderer
4049
4050 def _reload_with_unavailable_videos(self, item_id, data, ytcfg):
4051 """
4052 Get playlist with unavailable videos if the 'show unavailable videos' button exists.
4053 """
4054 browse_id = params = None
4055 renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
4056 if not renderer:
4057 return
4058 menu_renderer = try_get(
4059 renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
4060 for menu_item in menu_renderer:
4061 if not isinstance(menu_item, dict):
4062 continue
4063 nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
4064 text = try_get(
4065 nav_item_renderer, lambda x: x['text']['simpleText'], compat_str)
4066 if not text or text.lower() != 'show unavailable videos':
4067 continue
4068 browse_endpoint = try_get(
4069 nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
4070 browse_id = browse_endpoint.get('browseId')
4071 params = browse_endpoint.get('params')
4072 break
4073
4074 headers = self.generate_api_headers(
4075 ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
4076 visitor_data=self._extract_visitor_data(data, ytcfg))
4077 query = {
4078 'params': params or 'wgYCCAA=',
4079 'browseId': browse_id or 'VL%s' % item_id
4080 }
4081 return self._extract_response(
4082 item_id=item_id, headers=headers, query=query,
4083 check_get_keys='contents', fatal=False, ytcfg=ytcfg,
4084 note='Downloading API JSON with unavailable videos')
4085
4086 def _extract_webpage(self, url, item_id, fatal=True):
4087 retries = self.get_param('extractor_retries', 3)
4088 count = -1
4089 webpage = data = last_error = None
4090 while count < retries:
4091 count += 1
4092 # Sometimes youtube returns a webpage with incomplete ytInitialData
4093 # See: https://github.com/yt-dlp/yt-dlp/issues/116
4094 if last_error:
4095 self.report_warning('%s. Retrying ...' % last_error)
4096 try:
4097 webpage = self._download_webpage(
4098 url, item_id,
4099 note='Downloading webpage%s' % (' (retry #%d)' % count if count else '',))
4100 data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {}
4101 except ExtractorError as e:
4102 if isinstance(e.cause, network_exceptions):
4103 if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
4104 last_error = error_to_compat_str(e.cause or e.msg)
4105 if count < retries:
4106 continue
4107 if fatal:
4108 raise
4109 self.report_warning(error_to_compat_str(e))
4110 break
4111 else:
4112 try:
4113 self._extract_and_report_alerts(data)
4114 except ExtractorError as e:
4115 if fatal:
4116 raise
4117 self.report_warning(error_to_compat_str(e))
4118 break
4119
4120 if dict_get(data, ('contents', 'currentVideoEndpoint')):
4121 break
4122
4123 last_error = 'Incomplete yt initial data received'
4124 if count >= retries:
4125 if fatal:
4126 raise ExtractorError(last_error)
4127 self.report_warning(last_error)
4128 break
4129
4130 return webpage, data
4131
4132 def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=False, default_client='web'):
4133 data = None
4134 if 'webpage' not in self._configuration_arg('skip'):
4135 webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal)
4136 ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage)
4137 if not data:
4138 if not ytcfg and self.is_authenticated:
4139 msg = 'Playlists that require authentication may not extract correctly without a successful webpage download.'
4140 if 'authcheck' not in self._configuration_arg('skip') and fatal:
4141 raise ExtractorError(
4142 msg + ' If you are not downloading private content, or your cookies are only for the first account and channel,'
4143 ' pass "--extractor-args youtubetab:skip=authcheck" to skip this check',
4144 expected=True)
4145 self.report_warning(msg, only_once=True)
4146 data = self._extract_tab_endpoint(url, item_id, ytcfg, fatal=fatal, default_client=default_client)
4147 return data, ytcfg
4148
4149 def _extract_tab_endpoint(self, url, item_id, ytcfg=None, fatal=True, default_client='web'):
4150 headers = self.generate_api_headers(ytcfg=ytcfg, default_client=default_client)
4151 resolve_response = self._extract_response(
4152 item_id=item_id, query={'url': url}, check_get_keys='endpoint', headers=headers, ytcfg=ytcfg, fatal=fatal,
4153 ep='navigation/resolve_url', note='Downloading API parameters API JSON', default_client=default_client)
4154 endpoints = {'browseEndpoint': 'browse', 'watchEndpoint': 'next'}
4155 for ep_key, ep in endpoints.items():
4156 params = try_get(resolve_response, lambda x: x['endpoint'][ep_key], dict)
4157 if params:
4158 return self._extract_response(
4159 item_id=item_id, query=params, ep=ep, headers=headers,
4160 ytcfg=ytcfg, fatal=fatal, default_client=default_client,
4161 check_get_keys=('contents', 'currentVideoEndpoint'))
4162 err_note = 'Failed to resolve url (does the playlist exist?)'
4163 if fatal:
4164 raise ExtractorError(err_note, expected=True)
4165 self.report_warning(err_note, item_id)
4166
4167 @staticmethod
4168 def _smuggle_data(entries, data):
4169 for entry in entries:
4170 if data:
4171 entry['url'] = smuggle_url(entry['url'], data)
4172 yield entry
4173
4174 def _real_extract(self, url):
4175 url, smuggled_data = unsmuggle_url(url, {})
4176 if self.is_music_url(url):
4177 smuggled_data['is_music_url'] = True
4178 info_dict = self.__real_extract(url, smuggled_data)
4179 if info_dict.get('entries'):
4180 info_dict['entries'] = self._smuggle_data(info_dict['entries'], smuggled_data)
4181 return info_dict
4182
4183 _url_re = re.compile(r'(?P<pre>%s)(?(channel_type)(?P<tab>/\w+))?(?P<post>.*)$' % _VALID_URL)
4184
4185 def __real_extract(self, url, smuggled_data):
4186 item_id = self._match_id(url)
4187 url = compat_urlparse.urlunparse(
4188 compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
4189 compat_opts = self.get_param('compat_opts', [])
4190
4191 def get_mobj(url):
4192 mobj = self._url_re.match(url).groupdict()
4193 mobj.update((k, '') for k, v in mobj.items() if v is None)
4194 return mobj
4195
4196 mobj = get_mobj(url)
4197 # Youtube returns incomplete data if tabname is not lower case
4198 pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
4199 if is_channel:
4200 if smuggled_data.get('is_music_url'):
4201 if item_id[:2] == 'VL':
4202 # Youtube music VL channels have an equivalent playlist
4203 item_id = item_id[2:]
4204 pre, tab, post, is_channel = 'https://www.youtube.com/playlist?list=%s' % item_id, '', '', False
4205 elif item_id[:2] == 'MP':
4206 # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
4207 mdata = self._extract_tab_endpoint(
4208 'https://music.youtube.com/channel/%s' % item_id, item_id, default_client='web_music')
4209 murl = traverse_obj(
4210 mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'), get_all=False, expected_type=compat_str)
4211 if not murl:
4212 raise ExtractorError('Failed to resolve album to playlist.')
4213 return self.url_result(murl, ie=YoutubeTabIE.ie_key())
4214 elif mobj['channel_type'] == 'browse':
4215 # Youtube music /browse/ should be changed to /channel/
4216 pre = 'https://www.youtube.com/channel/%s' % item_id
4217 if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
4218 # Home URLs should redirect to /videos/
4219 self.report_warning(
4220 'A channel/user page was given. All the channel\'s videos will be downloaded. '
4221 'To download only the videos in the home page, add a "/featured" to the URL')
4222 tab = '/videos'
4223
4224 url = ''.join((pre, tab, post))
4225 mobj = get_mobj(url)
4226
4227 # Handle both video/playlist URLs
4228 qs = parse_qs(url)
4229 video_id = qs.get('v', [None])[0]
4230 playlist_id = qs.get('list', [None])[0]
4231
4232 if not video_id and mobj['not_channel'].startswith('watch'):
4233 if not playlist_id:
4234 # If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
4235 raise ExtractorError('Unable to recognize tab page')
4236 # Common mistake: https://www.youtube.com/watch?list=playlist_id
4237 self.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
4238 url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
4239 mobj = get_mobj(url)
4240
4241 if video_id and playlist_id:
4242 if self.get_param('noplaylist'):
4243 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
4244 return self.url_result(f'https://www.youtube.com/watch?v={video_id}', ie=YoutubeIE.ie_key(), video_id=video_id)
4245 self.to_screen('Downloading playlist %s; add --no-playlist to just download video %s' % (playlist_id, video_id))
4246
4247 data, ytcfg = self._extract_data(url, item_id)
4248
4249 tabs = try_get(
4250 data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
4251 if tabs:
4252 selected_tab = self._extract_selected_tab(tabs)
4253 tab_name = selected_tab.get('title', '')
4254 if 'no-youtube-channel-redirect' not in compat_opts:
4255 if mobj['tab'] == '/live':
4256 # Live tab should have redirected to the video
4257 raise ExtractorError('The channel is not currently live', expected=True)
4258 if mobj['tab'] == '/videos' and tab_name.lower() != mobj['tab'][1:]:
4259 if not mobj['not_channel'] and item_id[:2] == 'UC':
4260 # Topic channels don't have /videos. Use the equivalent playlist instead
4261 self.report_warning('The URL does not have a %s tab. Trying to redirect to playlist UU%s instead' % (mobj['tab'][1:], item_id[2:]))
4262 pl_id = 'UU%s' % item_id[2:]
4263 pl_url = 'https://www.youtube.com/playlist?list=%s%s' % (pl_id, mobj['post'])
4264 try:
4265 data, ytcfg, item_id, url = *self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True), pl_id, pl_url
4266 except ExtractorError:
4267 self.report_warning('The playlist gave error. Falling back to channel URL')
4268 else:
4269 self.report_warning('The URL does not have a %s tab. %s is being downloaded instead' % (mobj['tab'][1:], tab_name))
4270
4271 self.write_debug('Final URL: %s' % url)
4272
4273 # YouTube sometimes provides a button to reload playlist with unavailable videos.
4274 if 'no-youtube-unavailable-videos' not in compat_opts:
4275 data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
4276 self._extract_and_report_alerts(data, only_once=True)
4277 tabs = try_get(
4278 data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
4279 if tabs:
4280 return self._extract_from_tabs(item_id, ytcfg, data, tabs)
4281
4282 playlist = try_get(
4283 data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
4284 if playlist:
4285 return self._extract_from_playlist(item_id, url, data, playlist, ytcfg)
4286
4287 video_id = try_get(
4288 data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
4289 compat_str) or video_id
4290 if video_id:
4291 if mobj['tab'] != '/live': # live tab is expected to redirect to video
4292 self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
4293 return self.url_result(f'https://www.youtube.com/watch?v={video_id}', ie=YoutubeIE.ie_key(), video_id=video_id)
4294
4295 raise ExtractorError('Unable to recognize tab page')
4296
4297
4298 class YoutubePlaylistIE(InfoExtractor):
4299 IE_DESC = 'YouTube playlists'
4300 _VALID_URL = r'''(?x)(?:
4301 (?:https?://)?
4302 (?:\w+\.)?
4303 (?:
4304 (?:
4305 youtube(?:kids)?\.com|
4306 invidio\.us
4307 )
4308 /.*?\?.*?\blist=
4309 )?
4310 (?P<id>%(playlist_id)s)
4311 )''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
4312 IE_NAME = 'youtube:playlist'
4313 _TESTS = [{
4314 'note': 'issue #673',
4315 'url': 'PLBB231211A4F62143',
4316 'info_dict': {
4317 'title': '[OLD]Team Fortress 2 (Class-based LP)',
4318 'id': 'PLBB231211A4F62143',
4319 'uploader': 'Wickydoo',
4320 'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
4321 'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2',
4322 },
4323 'playlist_mincount': 29,
4324 }, {
4325 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
4326 'info_dict': {
4327 'title': 'YDL_safe_search',
4328 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
4329 },
4330 'playlist_count': 2,
4331 'skip': 'This playlist is private',
4332 }, {
4333 'note': 'embedded',
4334 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
4335 'playlist_count': 4,
4336 'info_dict': {
4337 'title': 'JODA15',
4338 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
4339 'uploader': 'milan',
4340 'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
4341 }
4342 }, {
4343 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
4344 'playlist_mincount': 654,
4345 'info_dict': {
4346 'title': '2018 Chinese New Singles (11/6 updated)',
4347 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
4348 'uploader': 'LBK',
4349 'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
4350 'description': 'md5:da521864744d60a198e3a88af4db0d9d',
4351 }
4352 }, {
4353 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
4354 'only_matching': True,
4355 }, {
4356 # music album playlist
4357 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
4358 'only_matching': True,
4359 }]
4360
4361 @classmethod
4362 def suitable(cls, url):
4363 if YoutubeTabIE.suitable(url):
4364 return False
4365 from ..utils import parse_qs
4366 qs = parse_qs(url)
4367 if qs.get('v', [None])[0]:
4368 return False
4369 return super(YoutubePlaylistIE, cls).suitable(url)
4370
4371 def _real_extract(self, url):
4372 playlist_id = self._match_id(url)
4373 is_music_url = YoutubeBaseInfoExtractor.is_music_url(url)
4374 url = update_url_query(
4375 'https://www.youtube.com/playlist',
4376 parse_qs(url) or {'list': playlist_id})
4377 if is_music_url:
4378 url = smuggle_url(url, {'is_music_url': True})
4379 return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
4380
4381
4382 class YoutubeYtBeIE(InfoExtractor):
4383 IE_DESC = 'youtu.be'
4384 _VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
4385 _TESTS = [{
4386 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
4387 'info_dict': {
4388 'id': 'yeWKywCrFtk',
4389 'ext': 'mp4',
4390 'title': 'Small Scale Baler and Braiding Rugs',
4391 'uploader': 'Backus-Page House Museum',
4392 'uploader_id': 'backuspagemuseum',
4393 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
4394 'upload_date': '20161008',
4395 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
4396 'categories': ['Nonprofits & Activism'],
4397 'tags': list,
4398 'like_count': int,
4399 'dislike_count': int,
4400 },
4401 'params': {
4402 'noplaylist': True,
4403 'skip_download': True,
4404 },
4405 }, {
4406 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
4407 'only_matching': True,
4408 }]
4409
4410 def _real_extract(self, url):
4411 mobj = self._match_valid_url(url)
4412 video_id = mobj.group('id')
4413 playlist_id = mobj.group('playlist_id')
4414 return self.url_result(
4415 update_url_query('https://www.youtube.com/watch', {
4416 'v': video_id,
4417 'list': playlist_id,
4418 'feature': 'youtu.be',
4419 }), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
4420
4421
4422 class YoutubeYtUserIE(InfoExtractor):
4423 IE_DESC = 'YouTube user videos; "ytuser:" prefix'
4424 _VALID_URL = r'ytuser:(?P<id>.+)'
4425 _TESTS = [{
4426 'url': 'ytuser:phihag',
4427 'only_matching': True,
4428 }]
4429
4430 def _real_extract(self, url):
4431 user_id = self._match_id(url)
4432 return self.url_result(
4433 'https://www.youtube.com/user/%s' % user_id,
4434 ie=YoutubeTabIE.ie_key(), video_id=user_id)
4435
4436
4437 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
4438 IE_NAME = 'youtube:favorites'
4439 IE_DESC = 'YouTube liked videos; ":ytfav" keyword (requires cookies)'
4440 _VALID_URL = r':ytfav(?:ou?rite)?s?'
4441 _LOGIN_REQUIRED = True
4442 _TESTS = [{
4443 'url': ':ytfav',
4444 'only_matching': True,
4445 }, {
4446 'url': ':ytfavorites',
4447 'only_matching': True,
4448 }]
4449
4450 def _real_extract(self, url):
4451 return self.url_result(
4452 'https://www.youtube.com/playlist?list=LL',
4453 ie=YoutubeTabIE.ie_key())
4454
4455
4456 class YoutubeSearchIE(SearchInfoExtractor, YoutubeTabIE):
4457 IE_DESC = 'YouTube searches'
4458 IE_NAME = 'youtube:search'
4459 _SEARCH_KEY = 'ytsearch'
4460 _SEARCH_PARAMS = None
4461 _TESTS = []
4462
4463 def _search_results(self, query):
4464 data = {'query': query}
4465 if self._SEARCH_PARAMS:
4466 data['params'] = self._SEARCH_PARAMS
4467 continuation = {}
4468 for page_num in itertools.count(1):
4469 data.update(continuation)
4470 search = self._extract_response(
4471 item_id='query "%s" page %s' % (query, page_num), ep='search', query=data,
4472 check_get_keys=('contents', 'onResponseReceivedCommands')
4473 )
4474 if not search:
4475 break
4476 slr_contents = try_get(
4477 search,
4478 (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
4479 lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
4480 list)
4481 if not slr_contents:
4482 break
4483
4484 # Youtube sometimes adds promoted content to searches,
4485 # changing the index location of videos and token.
4486 # So we search through all entries till we find them.
4487 continuation = None
4488 for slr_content in slr_contents:
4489 if not continuation:
4490 continuation = self._extract_continuation({'contents': [slr_content]})
4491
4492 isr_contents = try_get(
4493 slr_content,
4494 lambda x: x['itemSectionRenderer']['contents'],
4495 list)
4496 if not isr_contents:
4497 continue
4498 for content in isr_contents:
4499 if not isinstance(content, dict):
4500 continue
4501 video = content.get('videoRenderer')
4502 if not isinstance(video, dict):
4503 continue
4504 video_id = video.get('videoId')
4505 if not video_id:
4506 continue
4507
4508 yield self._extract_video(video)
4509
4510 if not continuation:
4511 break
4512
4513
4514 class YoutubeSearchDateIE(YoutubeSearchIE):
4515 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
4516 _SEARCH_KEY = 'ytsearchdate'
4517 IE_DESC = 'YouTube searches, newest videos first'
4518 _SEARCH_PARAMS = 'CAI%3D'
4519
4520
4521 class YoutubeSearchURLIE(YoutubeSearchIE):
4522 IE_DESC = 'YouTube search URLs with sorting and filter support'
4523 IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
4524 _SEARCH_KEY = None
4525 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?:[^&]+)(?:[&]|$)'
4526 # _MAX_RESULTS = 100
4527 _TESTS = [{
4528 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
4529 'playlist_mincount': 5,
4530 'info_dict': {
4531 'id': 'youtube-dl test video',
4532 'title': 'youtube-dl test video',
4533 }
4534 }, {
4535 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
4536 'only_matching': True,
4537 }]
4538
4539 @classmethod
4540 def _make_valid_url(cls):
4541 return cls._VALID_URL
4542
4543 def _real_extract(self, url):
4544 qs = parse_qs(url)
4545 query = (qs.get('search_query') or qs.get('q'))[0]
4546 self._SEARCH_PARAMS = qs.get('sp', ('',))[0]
4547 return self._get_n_results(query, self._MAX_RESULTS)
4548
4549
4550 class YoutubeFeedsInfoExtractor(YoutubeTabIE):
4551 """
4552 Base class for feed extractors
4553 Subclasses must define the _FEED_NAME property.
4554 """
4555 _LOGIN_REQUIRED = True
4556 _TESTS = []
4557
4558 @property
4559 def IE_NAME(self):
4560 return 'youtube:%s' % self._FEED_NAME
4561
4562 def _real_extract(self, url):
4563 return self.url_result(
4564 'https://www.youtube.com/feed/%s' % self._FEED_NAME,
4565 ie=YoutubeTabIE.ie_key())
4566
4567
4568 class YoutubeWatchLaterIE(InfoExtractor):
4569 IE_NAME = 'youtube:watchlater'
4570 IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
4571 _VALID_URL = r':ytwatchlater'
4572 _TESTS = [{
4573 'url': ':ytwatchlater',
4574 'only_matching': True,
4575 }]
4576
4577 def _real_extract(self, url):
4578 return self.url_result(
4579 'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
4580
4581
4582 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
4583 IE_DESC = 'YouTube recommended videos; ":ytrec" keyword'
4584 _VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
4585 _FEED_NAME = 'recommended'
4586 _LOGIN_REQUIRED = False
4587 _TESTS = [{
4588 'url': ':ytrec',
4589 'only_matching': True,
4590 }, {
4591 'url': ':ytrecommended',
4592 'only_matching': True,
4593 }, {
4594 'url': 'https://youtube.com',
4595 'only_matching': True,
4596 }]
4597
4598
4599 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
4600 IE_DESC = 'YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)'
4601 _VALID_URL = r':ytsub(?:scription)?s?'
4602 _FEED_NAME = 'subscriptions'
4603 _TESTS = [{
4604 'url': ':ytsubs',
4605 'only_matching': True,
4606 }, {
4607 'url': ':ytsubscriptions',
4608 'only_matching': True,
4609 }]
4610
4611
4612 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
4613 IE_DESC = 'Youtube watch history; ":ythis" keyword (requires cookies)'
4614 _VALID_URL = r':ythis(?:tory)?'
4615 _FEED_NAME = 'history'
4616 _TESTS = [{
4617 'url': ':ythistory',
4618 'only_matching': True,
4619 }]
4620
4621
4622 class YoutubeTruncatedURLIE(InfoExtractor):
4623 IE_NAME = 'youtube:truncated_url'
4624 IE_DESC = False # Do not list
4625 _VALID_URL = r'''(?x)
4626 (?:https?://)?
4627 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
4628 (?:watch\?(?:
4629 feature=[a-z_]+|
4630 annotation_id=annotation_[^&]+|
4631 x-yt-cl=[0-9]+|
4632 hl=[^&]*|
4633 t=[0-9]+
4634 )?
4635 |
4636 attribution_link\?a=[^&]+
4637 )
4638 $
4639 '''
4640
4641 _TESTS = [{
4642 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
4643 'only_matching': True,
4644 }, {
4645 'url': 'https://www.youtube.com/watch?',
4646 'only_matching': True,
4647 }, {
4648 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
4649 'only_matching': True,
4650 }, {
4651 'url': 'https://www.youtube.com/watch?feature=foo',
4652 'only_matching': True,
4653 }, {
4654 'url': 'https://www.youtube.com/watch?hl=en-GB',
4655 'only_matching': True,
4656 }, {
4657 'url': 'https://www.youtube.com/watch?t=2372',
4658 'only_matching': True,
4659 }]
4660
4661 def _real_extract(self, url):
4662 raise ExtractorError(
4663 'Did you forget to quote the URL? Remember that & is a meta '
4664 'character in most shells, so you want to put the URL in quotes, '
4665 'like youtube-dl '
4666 '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
4667 ' or simply youtube-dl BaW_jenozKc .',
4668 expected=True)
4669
4670
4671 class YoutubeClipIE(InfoExtractor):
4672 IE_NAME = 'youtube:clip'
4673 IE_DESC = False # Do not list
4674 _VALID_URL = r'https?://(?:www\.)?youtube\.com/clip/'
4675
4676 def _real_extract(self, url):
4677 self.report_warning('YouTube clips are not currently supported. The entire video will be downloaded instead')
4678 return self.url_result(url, 'Generic')
4679
4680
4681 class YoutubeTruncatedIDIE(InfoExtractor):
4682 IE_NAME = 'youtube:truncated_id'
4683 IE_DESC = False # Do not list
4684 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
4685
4686 _TESTS = [{
4687 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
4688 'only_matching': True,
4689 }]
4690
4691 def _real_extract(self, url):
4692 video_id = self._match_id(url)
4693 raise ExtractorError(
4694 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
4695 expected=True)