]> jfr.im git - yt-dlp.git/blob - yt_dlp/extractor/youtube.py
[youtube] Populate `thumbnail` with the best "known" thumbnail
[yt-dlp.git] / yt_dlp / extractor / youtube.py
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5 import base64
6 import calendar
7 import copy
8 import datetime
9 import hashlib
10 import itertools
11 import json
12 import os.path
13 import random
14 import re
15 import time
16 import traceback
17
18 from .common import InfoExtractor, SearchInfoExtractor
19 from ..compat import (
20 compat_chr,
21 compat_HTTPError,
22 compat_parse_qs,
23 compat_str,
24 compat_urllib_parse_unquote_plus,
25 compat_urllib_parse_urlencode,
26 compat_urllib_parse_urlparse,
27 compat_urlparse,
28 )
29 from ..jsinterp import JSInterpreter
30 from ..utils import (
31 bytes_to_intlist,
32 clean_html,
33 datetime_from_str,
34 dict_get,
35 error_to_compat_str,
36 ExtractorError,
37 float_or_none,
38 format_field,
39 int_or_none,
40 intlist_to_bytes,
41 is_html,
42 mimetype2ext,
43 network_exceptions,
44 orderedSet,
45 parse_codecs,
46 parse_count,
47 parse_duration,
48 parse_iso8601,
49 parse_qs,
50 qualities,
51 remove_end,
52 remove_start,
53 smuggle_url,
54 str_or_none,
55 str_to_int,
56 traverse_obj,
57 try_get,
58 unescapeHTML,
59 unified_strdate,
60 unsmuggle_url,
61 update_url_query,
62 url_or_none,
63 urljoin,
64 variadic,
65 )
66
67
68 # any clients starting with _ cannot be explicity requested by the user
69 INNERTUBE_CLIENTS = {
70 'web': {
71 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
72 'INNERTUBE_CONTEXT': {
73 'client': {
74 'clientName': 'WEB',
75 'clientVersion': '2.20210622.10.00',
76 }
77 },
78 'INNERTUBE_CONTEXT_CLIENT_NAME': 1
79 },
80 'web_embedded': {
81 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
82 'INNERTUBE_CONTEXT': {
83 'client': {
84 'clientName': 'WEB_EMBEDDED_PLAYER',
85 'clientVersion': '1.20210620.0.1',
86 },
87 },
88 'INNERTUBE_CONTEXT_CLIENT_NAME': 56
89 },
90 'web_music': {
91 'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
92 'INNERTUBE_HOST': 'music.youtube.com',
93 'INNERTUBE_CONTEXT': {
94 'client': {
95 'clientName': 'WEB_REMIX',
96 'clientVersion': '1.20210621.00.00',
97 }
98 },
99 'INNERTUBE_CONTEXT_CLIENT_NAME': 67,
100 },
101 'web_creator': {
102 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
103 'INNERTUBE_CONTEXT': {
104 'client': {
105 'clientName': 'WEB_CREATOR',
106 'clientVersion': '1.20210621.00.00',
107 }
108 },
109 'INNERTUBE_CONTEXT_CLIENT_NAME': 62,
110 },
111 'android': {
112 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
113 'INNERTUBE_CONTEXT': {
114 'client': {
115 'clientName': 'ANDROID',
116 'clientVersion': '16.20',
117 }
118 },
119 'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
120 'REQUIRE_JS_PLAYER': False
121 },
122 'android_embedded': {
123 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
124 'INNERTUBE_CONTEXT': {
125 'client': {
126 'clientName': 'ANDROID_EMBEDDED_PLAYER',
127 'clientVersion': '16.20',
128 },
129 },
130 'INNERTUBE_CONTEXT_CLIENT_NAME': 55,
131 'REQUIRE_JS_PLAYER': False
132 },
133 'android_music': {
134 'INNERTUBE_API_KEY': 'AIzaSyC9XL3ZjWddXya6X74dJoCTL-WEYFDNX30',
135 'INNERTUBE_HOST': 'music.youtube.com',
136 'INNERTUBE_CONTEXT': {
137 'client': {
138 'clientName': 'ANDROID_MUSIC',
139 'clientVersion': '4.32',
140 }
141 },
142 'INNERTUBE_CONTEXT_CLIENT_NAME': 21,
143 'REQUIRE_JS_PLAYER': False
144 },
145 'android_creator': {
146 'INNERTUBE_CONTEXT': {
147 'client': {
148 'clientName': 'ANDROID_CREATOR',
149 'clientVersion': '21.24.100',
150 },
151 },
152 'INNERTUBE_CONTEXT_CLIENT_NAME': 14,
153 'REQUIRE_JS_PLAYER': False
154 },
155 # ios has HLS live streams
156 # See: https://github.com/TeamNewPipe/NewPipeExtractor/issues/680
157 'ios': {
158 'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
159 'INNERTUBE_CONTEXT': {
160 'client': {
161 'clientName': 'IOS',
162 'clientVersion': '16.20',
163 }
164 },
165 'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
166 'REQUIRE_JS_PLAYER': False
167 },
168 'ios_embedded': {
169 'INNERTUBE_API_KEY': 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8',
170 'INNERTUBE_CONTEXT': {
171 'client': {
172 'clientName': 'IOS_MESSAGES_EXTENSION',
173 'clientVersion': '16.20',
174 },
175 },
176 'INNERTUBE_CONTEXT_CLIENT_NAME': 66,
177 'REQUIRE_JS_PLAYER': False
178 },
179 'ios_music': {
180 'INNERTUBE_API_KEY': 'AIzaSyDK3iBpDP9nHVTk2qL73FLJICfOC3c51Og',
181 'INNERTUBE_HOST': 'music.youtube.com',
182 'INNERTUBE_CONTEXT': {
183 'client': {
184 'clientName': 'IOS_MUSIC',
185 'clientVersion': '4.32',
186 },
187 },
188 'INNERTUBE_CONTEXT_CLIENT_NAME': 26,
189 'REQUIRE_JS_PLAYER': False
190 },
191 'ios_creator': {
192 'INNERTUBE_CONTEXT': {
193 'client': {
194 'clientName': 'IOS_CREATOR',
195 'clientVersion': '21.24.100',
196 },
197 },
198 'INNERTUBE_CONTEXT_CLIENT_NAME': 15,
199 'REQUIRE_JS_PLAYER': False
200 },
201 # mweb has 'ultralow' formats
202 # See: https://github.com/yt-dlp/yt-dlp/pull/557
203 'mweb': {
204 'INNERTUBE_API_KEY': 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8',
205 'INNERTUBE_CONTEXT': {
206 'client': {
207 'clientName': 'MWEB',
208 'clientVersion': '2.20210721.07.00',
209 }
210 },
211 'INNERTUBE_CONTEXT_CLIENT_NAME': 2
212 },
213 }
214
215
216 def build_innertube_clients():
217 third_party = {
218 'embedUrl': 'https://google.com', # Can be any valid URL
219 }
220 base_clients = ('android', 'web', 'ios', 'mweb')
221 priority = qualities(base_clients[::-1])
222
223 for client, ytcfg in tuple(INNERTUBE_CLIENTS.items()):
224 ytcfg.setdefault('INNERTUBE_API_KEY', 'AIzaSyDCU8hByM-4DrUqRUYnGn-3llEO78bcxq8')
225 ytcfg.setdefault('INNERTUBE_HOST', 'www.youtube.com')
226 ytcfg.setdefault('REQUIRE_JS_PLAYER', True)
227 ytcfg['INNERTUBE_CONTEXT']['client'].setdefault('hl', 'en')
228 ytcfg['priority'] = 10 * priority(client.split('_', 1)[0])
229
230 if client in base_clients:
231 INNERTUBE_CLIENTS[f'{client}_agegate'] = agegate_ytcfg = copy.deepcopy(ytcfg)
232 agegate_ytcfg['INNERTUBE_CONTEXT']['client']['clientScreen'] = 'EMBED'
233 agegate_ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = third_party
234 agegate_ytcfg['priority'] -= 1
235 elif client.endswith('_embedded'):
236 ytcfg['INNERTUBE_CONTEXT']['thirdParty'] = third_party
237 ytcfg['priority'] -= 2
238 else:
239 ytcfg['priority'] -= 3
240
241
242 build_innertube_clients()
243
244
245 class YoutubeBaseInfoExtractor(InfoExtractor):
246 """Provide base functions for Youtube extractors"""
247
248 _RESERVED_NAMES = (
249 r'channel|c|user|playlist|watch|w|v|embed|e|watch_popup|clip|'
250 r'shorts|movies|results|shared|hashtag|trending|feed|feeds|'
251 r'browse|oembed|get_video_info|iframe_api|s/player|'
252 r'storefront|oops|index|account|reporthistory|t/terms|about|upload|signin|logout')
253
254 _PLAYLIST_ID_RE = r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM|WL|LL|LM)'
255
256 _NETRC_MACHINE = 'youtube'
257
258 # If True it will raise an error if no login info is provided
259 _LOGIN_REQUIRED = False
260
261 def _login(self):
262 """
263 Attempt to log in to YouTube.
264 If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
265 """
266
267 if (self._LOGIN_REQUIRED
268 and self.get_param('cookiefile') is None
269 and self.get_param('cookiesfrombrowser') is None):
270 self.raise_login_required(
271 'Login details are needed to download this content', method='cookies')
272 username, password = self._get_login_info()
273 if username:
274 self.report_warning(f'Cannot login to YouTube using username and password. {self._LOGIN_HINTS["cookies"]}')
275
276 def _initialize_consent(self):
277 cookies = self._get_cookies('https://www.youtube.com/')
278 if cookies.get('__Secure-3PSID'):
279 return
280 consent_id = None
281 consent = cookies.get('CONSENT')
282 if consent:
283 if 'YES' in consent.value:
284 return
285 consent_id = self._search_regex(
286 r'PENDING\+(\d+)', consent.value, 'consent', default=None)
287 if not consent_id:
288 consent_id = random.randint(100, 999)
289 self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
290
291 def _real_initialize(self):
292 self._initialize_consent()
293 self._login()
294
295 _YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
296 _YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
297 _YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
298
299 def _get_default_ytcfg(self, client='web'):
300 return copy.deepcopy(INNERTUBE_CLIENTS[client])
301
302 def _get_innertube_host(self, client='web'):
303 return INNERTUBE_CLIENTS[client]['INNERTUBE_HOST']
304
305 def _ytcfg_get_safe(self, ytcfg, getter, expected_type=None, default_client='web'):
306 # try_get but with fallback to default ytcfg client values when present
307 _func = lambda y: try_get(y, getter, expected_type)
308 return _func(ytcfg) or _func(self._get_default_ytcfg(default_client))
309
310 def _extract_client_name(self, ytcfg, default_client='web'):
311 return self._ytcfg_get_safe(
312 ytcfg, (lambda x: x['INNERTUBE_CLIENT_NAME'],
313 lambda x: x['INNERTUBE_CONTEXT']['client']['clientName']), compat_str, default_client)
314
315 def _extract_client_version(self, ytcfg, default_client='web'):
316 return self._ytcfg_get_safe(
317 ytcfg, (lambda x: x['INNERTUBE_CLIENT_VERSION'],
318 lambda x: x['INNERTUBE_CONTEXT']['client']['clientVersion']), compat_str, default_client)
319
320 def _extract_api_key(self, ytcfg=None, default_client='web'):
321 return self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_API_KEY'], compat_str, default_client)
322
323 def _extract_context(self, ytcfg=None, default_client='web'):
324 _get_context = lambda y: try_get(y, lambda x: x['INNERTUBE_CONTEXT'], dict)
325 context = _get_context(ytcfg)
326 if context:
327 return context
328
329 context = _get_context(self._get_default_ytcfg(default_client))
330 if not ytcfg:
331 return context
332
333 # Recreate the client context (required)
334 context['client'].update({
335 'clientVersion': self._extract_client_version(ytcfg, default_client),
336 'clientName': self._extract_client_name(ytcfg, default_client),
337 })
338 visitor_data = try_get(ytcfg, lambda x: x['VISITOR_DATA'], compat_str)
339 if visitor_data:
340 context['client']['visitorData'] = visitor_data
341 return context
342
343 _SAPISID = None
344
345 def _generate_sapisidhash_header(self, origin='https://www.youtube.com'):
346 time_now = round(time.time())
347 if self._SAPISID is None:
348 yt_cookies = self._get_cookies('https://www.youtube.com')
349 # Sometimes SAPISID cookie isn't present but __Secure-3PAPISID is.
350 # See: https://github.com/yt-dlp/yt-dlp/issues/393
351 sapisid_cookie = dict_get(
352 yt_cookies, ('__Secure-3PAPISID', 'SAPISID'))
353 if sapisid_cookie and sapisid_cookie.value:
354 self._SAPISID = sapisid_cookie.value
355 self.write_debug('Extracted SAPISID cookie')
356 # SAPISID cookie is required if not already present
357 if not yt_cookies.get('SAPISID'):
358 self.write_debug('Copying __Secure-3PAPISID cookie to SAPISID cookie')
359 self._set_cookie(
360 '.youtube.com', 'SAPISID', self._SAPISID, secure=True, expire_time=time_now + 3600)
361 else:
362 self._SAPISID = False
363 if not self._SAPISID:
364 return None
365 # SAPISIDHASH algorithm from https://stackoverflow.com/a/32065323
366 sapisidhash = hashlib.sha1(
367 f'{time_now} {self._SAPISID} {origin}'.encode('utf-8')).hexdigest()
368 return f'SAPISIDHASH {time_now}_{sapisidhash}'
369
370 def _call_api(self, ep, query, video_id, fatal=True, headers=None,
371 note='Downloading API JSON', errnote='Unable to download API page',
372 context=None, api_key=None, api_hostname=None, default_client='web'):
373
374 data = {'context': context} if context else {'context': self._extract_context(default_client=default_client)}
375 data.update(query)
376 real_headers = self.generate_api_headers(default_client=default_client)
377 real_headers.update({'content-type': 'application/json'})
378 if headers:
379 real_headers.update(headers)
380 return self._download_json(
381 'https://%s/youtubei/v1/%s' % (api_hostname or self._get_innertube_host(default_client), ep),
382 video_id=video_id, fatal=fatal, note=note, errnote=errnote,
383 data=json.dumps(data).encode('utf8'), headers=real_headers,
384 query={'key': api_key or self._extract_api_key()})
385
386 def extract_yt_initial_data(self, item_id, webpage, fatal=True):
387 data = self._search_regex(
388 (r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
389 self._YT_INITIAL_DATA_RE), webpage, 'yt initial data', fatal=fatal)
390 if data:
391 return self._parse_json(data, item_id, fatal=fatal)
392
393 @staticmethod
394 def _extract_session_index(*data):
395 """
396 Index of current account in account list.
397 See: https://github.com/yt-dlp/yt-dlp/pull/519
398 """
399 for ytcfg in data:
400 session_index = int_or_none(try_get(ytcfg, lambda x: x['SESSION_INDEX']))
401 if session_index is not None:
402 return session_index
403
404 # Deprecated?
405 def _extract_identity_token(self, ytcfg=None, webpage=None):
406 if ytcfg:
407 token = try_get(ytcfg, lambda x: x['ID_TOKEN'], compat_str)
408 if token:
409 return token
410 if webpage:
411 return self._search_regex(
412 r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
413 'identity token', default=None, fatal=False)
414
415 @staticmethod
416 def _extract_account_syncid(*args):
417 """
418 Extract syncId required to download private playlists of secondary channels
419 @params response and/or ytcfg
420 """
421 for data in args:
422 # ytcfg includes channel_syncid if on secondary channel
423 delegated_sid = try_get(data, lambda x: x['DELEGATED_SESSION_ID'], compat_str)
424 if delegated_sid:
425 return delegated_sid
426 sync_ids = (try_get(
427 data, (lambda x: x['responseContext']['mainAppWebResponseContext']['datasyncId'],
428 lambda x: x['DATASYNC_ID']), compat_str) or '').split('||')
429 if len(sync_ids) >= 2 and sync_ids[1]:
430 # datasyncid is of the form "channel_syncid||user_syncid" for secondary channel
431 # and just "user_syncid||" for primary channel. We only want the channel_syncid
432 return sync_ids[0]
433
434 @staticmethod
435 def _extract_visitor_data(*args):
436 """
437 Extracts visitorData from an API response or ytcfg
438 Appears to be used to track session state
439 """
440 return traverse_obj(
441 args, (..., ('VISITOR_DATA', ('INNERTUBE_CONTEXT', 'client', 'visitorData'), ('responseContext', 'visitorData'))),
442 expected_type=compat_str, get_all=False)
443
444 @property
445 def is_authenticated(self):
446 return bool(self._generate_sapisidhash_header())
447
448 def extract_ytcfg(self, video_id, webpage):
449 if not webpage:
450 return {}
451 return self._parse_json(
452 self._search_regex(
453 r'ytcfg\.set\s*\(\s*({.+?})\s*\)\s*;', webpage, 'ytcfg',
454 default='{}'), video_id, fatal=False) or {}
455
456 def generate_api_headers(
457 self, *, ytcfg=None, account_syncid=None, session_index=None,
458 visitor_data=None, identity_token=None, api_hostname=None, default_client='web'):
459
460 origin = 'https://' + (api_hostname if api_hostname else self._get_innertube_host(default_client))
461 headers = {
462 'X-YouTube-Client-Name': compat_str(
463 self._ytcfg_get_safe(ytcfg, lambda x: x['INNERTUBE_CONTEXT_CLIENT_NAME'], default_client=default_client)),
464 'X-YouTube-Client-Version': self._extract_client_version(ytcfg, default_client),
465 'Origin': origin,
466 'X-Youtube-Identity-Token': identity_token or self._extract_identity_token(ytcfg),
467 'X-Goog-PageId': account_syncid or self._extract_account_syncid(ytcfg),
468 'X-Goog-Visitor-Id': visitor_data or self._extract_visitor_data(ytcfg)
469 }
470 if session_index is None:
471 session_index = self._extract_session_index(ytcfg)
472 if account_syncid or session_index is not None:
473 headers['X-Goog-AuthUser'] = session_index if session_index is not None else 0
474
475 auth = self._generate_sapisidhash_header(origin)
476 if auth is not None:
477 headers['Authorization'] = auth
478 headers['X-Origin'] = origin
479 return {h: v for h, v in headers.items() if v is not None}
480
481 @staticmethod
482 def _build_api_continuation_query(continuation, ctp=None):
483 query = {
484 'continuation': continuation
485 }
486 # TODO: Inconsistency with clickTrackingParams.
487 # Currently we have a fixed ctp contained within context (from ytcfg)
488 # and a ctp in root query for continuation.
489 if ctp:
490 query['clickTracking'] = {'clickTrackingParams': ctp}
491 return query
492
493 @classmethod
494 def _extract_next_continuation_data(cls, renderer):
495 next_continuation = try_get(
496 renderer, (lambda x: x['continuations'][0]['nextContinuationData'],
497 lambda x: x['continuation']['reloadContinuationData']), dict)
498 if not next_continuation:
499 return
500 continuation = next_continuation.get('continuation')
501 if not continuation:
502 return
503 ctp = next_continuation.get('clickTrackingParams')
504 return cls._build_api_continuation_query(continuation, ctp)
505
506 @classmethod
507 def _extract_continuation_ep_data(cls, continuation_ep: dict):
508 if isinstance(continuation_ep, dict):
509 continuation = try_get(
510 continuation_ep, lambda x: x['continuationCommand']['token'], compat_str)
511 if not continuation:
512 return
513 ctp = continuation_ep.get('clickTrackingParams')
514 return cls._build_api_continuation_query(continuation, ctp)
515
516 @classmethod
517 def _extract_continuation(cls, renderer):
518 next_continuation = cls._extract_next_continuation_data(renderer)
519 if next_continuation:
520 return next_continuation
521
522 contents = []
523 for key in ('contents', 'items'):
524 contents.extend(try_get(renderer, lambda x: x[key], list) or [])
525
526 for content in contents:
527 if not isinstance(content, dict):
528 continue
529 continuation_ep = try_get(
530 content, (lambda x: x['continuationItemRenderer']['continuationEndpoint'],
531 lambda x: x['continuationItemRenderer']['button']['buttonRenderer']['command']),
532 dict)
533 continuation = cls._extract_continuation_ep_data(continuation_ep)
534 if continuation:
535 return continuation
536
537 @classmethod
538 def _extract_alerts(cls, data):
539 for alert_dict in try_get(data, lambda x: x['alerts'], list) or []:
540 if not isinstance(alert_dict, dict):
541 continue
542 for alert in alert_dict.values():
543 alert_type = alert.get('type')
544 if not alert_type:
545 continue
546 message = cls._get_text(alert, 'text')
547 if message:
548 yield alert_type, message
549
550 def _report_alerts(self, alerts, expected=True, fatal=True, only_once=False):
551 errors = []
552 warnings = []
553 for alert_type, alert_message in alerts:
554 if alert_type.lower() == 'error' and fatal:
555 errors.append([alert_type, alert_message])
556 else:
557 warnings.append([alert_type, alert_message])
558
559 for alert_type, alert_message in (warnings + errors[:-1]):
560 self.report_warning('YouTube said: %s - %s' % (alert_type, alert_message), only_once=only_once)
561 if errors:
562 raise ExtractorError('YouTube said: %s' % errors[-1][1], expected=expected)
563
564 def _extract_and_report_alerts(self, data, *args, **kwargs):
565 return self._report_alerts(self._extract_alerts(data), *args, **kwargs)
566
567 def _extract_badges(self, renderer: dict):
568 badges = set()
569 for badge in try_get(renderer, lambda x: x['badges'], list) or []:
570 label = try_get(badge, lambda x: x['metadataBadgeRenderer']['label'], compat_str)
571 if label:
572 badges.add(label.lower())
573 return badges
574
575 @staticmethod
576 def _get_text(data, *path_list, max_runs=None):
577 for path in path_list or [None]:
578 if path is None:
579 obj = [data]
580 else:
581 obj = traverse_obj(data, path, default=[])
582 if not any(key is ... or isinstance(key, (list, tuple)) for key in variadic(path)):
583 obj = [obj]
584 for item in obj:
585 text = try_get(item, lambda x: x['simpleText'], compat_str)
586 if text:
587 return text
588 runs = try_get(item, lambda x: x['runs'], list) or []
589 if not runs and isinstance(item, list):
590 runs = item
591
592 runs = runs[:min(len(runs), max_runs or len(runs))]
593 text = ''.join(traverse_obj(runs, (..., 'text'), expected_type=str, default=[]))
594 if text:
595 return text
596
597 def _extract_response(self, item_id, query, note='Downloading API JSON', headers=None,
598 ytcfg=None, check_get_keys=None, ep='browse', fatal=True, api_hostname=None,
599 default_client='web'):
600 response = None
601 last_error = None
602 count = -1
603 retries = self.get_param('extractor_retries', 3)
604 if check_get_keys is None:
605 check_get_keys = []
606 while count < retries:
607 count += 1
608 if last_error:
609 self.report_warning('%s. Retrying ...' % remove_end(last_error, '.'))
610 try:
611 response = self._call_api(
612 ep=ep, fatal=True, headers=headers,
613 video_id=item_id, query=query,
614 context=self._extract_context(ytcfg, default_client),
615 api_key=self._extract_api_key(ytcfg, default_client),
616 api_hostname=api_hostname, default_client=default_client,
617 note='%s%s' % (note, ' (retry #%d)' % count if count else ''))
618 except ExtractorError as e:
619 if isinstance(e.cause, network_exceptions):
620 if isinstance(e.cause, compat_HTTPError) and not is_html(e.cause.read(512)):
621 e.cause.seek(0)
622 yt_error = try_get(
623 self._parse_json(e.cause.read().decode(), item_id, fatal=False),
624 lambda x: x['error']['message'], compat_str)
625 if yt_error:
626 self._report_alerts([('ERROR', yt_error)], fatal=False)
627 # Downloading page may result in intermittent 5xx HTTP error
628 # Sometimes a 404 is also recieved. See: https://github.com/ytdl-org/youtube-dl/issues/28289
629 # We also want to catch all other network exceptions since errors in later pages can be troublesome
630 # See https://github.com/yt-dlp/yt-dlp/issues/507#issuecomment-880188210
631 if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
632 last_error = error_to_compat_str(e.cause or e.msg)
633 if count < retries:
634 continue
635 if fatal:
636 raise
637 else:
638 self.report_warning(error_to_compat_str(e))
639 return
640
641 else:
642 try:
643 self._extract_and_report_alerts(response, only_once=True)
644 except ExtractorError as e:
645 # YouTube servers may return errors we want to retry on in a 200 OK response
646 # See: https://github.com/yt-dlp/yt-dlp/issues/839
647 if 'unknown error' in e.msg.lower():
648 last_error = e.msg
649 continue
650 if fatal:
651 raise
652 self.report_warning(error_to_compat_str(e))
653 return
654 if not check_get_keys or dict_get(response, check_get_keys):
655 break
656 # Youtube sometimes sends incomplete data
657 # See: https://github.com/ytdl-org/youtube-dl/issues/28194
658 last_error = 'Incomplete data received'
659 if count >= retries:
660 if fatal:
661 raise ExtractorError(last_error)
662 else:
663 self.report_warning(last_error)
664 return
665 return response
666
667 @staticmethod
668 def is_music_url(url):
669 return re.match(r'https?://music\.youtube\.com/', url) is not None
670
671 def _extract_video(self, renderer):
672 video_id = renderer.get('videoId')
673 title = self._get_text(renderer, 'title')
674 description = self._get_text(renderer, 'descriptionSnippet')
675 duration = parse_duration(self._get_text(
676 renderer, 'lengthText', ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'text')))
677 view_count_text = self._get_text(renderer, 'viewCountText') or ''
678 view_count = str_to_int(self._search_regex(
679 r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
680 'view count', default=None))
681
682 uploader = self._get_text(renderer, 'ownerText', 'shortBylineText')
683
684 return {
685 '_type': 'url',
686 'ie_key': YoutubeIE.ie_key(),
687 'id': video_id,
688 'url': f'https://www.youtube.com/watch?v={video_id}',
689 'title': title,
690 'description': description,
691 'duration': duration,
692 'view_count': view_count,
693 'uploader': uploader,
694 }
695
696
697 class YoutubeIE(YoutubeBaseInfoExtractor):
698 IE_DESC = 'YouTube'
699 _INVIDIOUS_SITES = (
700 # invidious-redirect websites
701 r'(?:www\.)?redirect\.invidious\.io',
702 r'(?:(?:www|dev)\.)?invidio\.us',
703 # Invidious instances taken from https://github.com/iv-org/documentation/blob/master/Invidious-Instances.md
704 r'(?:www\.)?invidious\.pussthecat\.org',
705 r'(?:www\.)?invidious\.zee\.li',
706 r'(?:www\.)?invidious\.ethibox\.fr',
707 r'(?:www\.)?invidious\.3o7z6yfxhbw7n3za4rss6l434kmv55cgw2vuziwuigpwegswvwzqipyd\.onion',
708 # youtube-dl invidious instances list
709 r'(?:(?:www|no)\.)?invidiou\.sh',
710 r'(?:(?:www|fi)\.)?invidious\.snopyta\.org',
711 r'(?:www\.)?invidious\.kabi\.tk',
712 r'(?:www\.)?invidious\.mastodon\.host',
713 r'(?:www\.)?invidious\.zapashcanon\.fr',
714 r'(?:www\.)?(?:invidious(?:-us)?|piped)\.kavin\.rocks',
715 r'(?:www\.)?invidious\.tinfoil-hat\.net',
716 r'(?:www\.)?invidious\.himiko\.cloud',
717 r'(?:www\.)?invidious\.reallyancient\.tech',
718 r'(?:www\.)?invidious\.tube',
719 r'(?:www\.)?invidiou\.site',
720 r'(?:www\.)?invidious\.site',
721 r'(?:www\.)?invidious\.xyz',
722 r'(?:www\.)?invidious\.nixnet\.xyz',
723 r'(?:www\.)?invidious\.048596\.xyz',
724 r'(?:www\.)?invidious\.drycat\.fr',
725 r'(?:www\.)?inv\.skyn3t\.in',
726 r'(?:www\.)?tube\.poal\.co',
727 r'(?:www\.)?tube\.connect\.cafe',
728 r'(?:www\.)?vid\.wxzm\.sx',
729 r'(?:www\.)?vid\.mint\.lgbt',
730 r'(?:www\.)?vid\.puffyan\.us',
731 r'(?:www\.)?yewtu\.be',
732 r'(?:www\.)?yt\.elukerio\.org',
733 r'(?:www\.)?yt\.lelux\.fi',
734 r'(?:www\.)?invidious\.ggc-project\.de',
735 r'(?:www\.)?yt\.maisputain\.ovh',
736 r'(?:www\.)?ytprivate\.com',
737 r'(?:www\.)?invidious\.13ad\.de',
738 r'(?:www\.)?invidious\.toot\.koeln',
739 r'(?:www\.)?invidious\.fdn\.fr',
740 r'(?:www\.)?watch\.nettohikari\.com',
741 r'(?:www\.)?invidious\.namazso\.eu',
742 r'(?:www\.)?invidious\.silkky\.cloud',
743 r'(?:www\.)?invidious\.exonip\.de',
744 r'(?:www\.)?invidious\.riverside\.rocks',
745 r'(?:www\.)?invidious\.blamefran\.net',
746 r'(?:www\.)?invidious\.moomoo\.de',
747 r'(?:www\.)?ytb\.trom\.tf',
748 r'(?:www\.)?yt\.cyberhost\.uk',
749 r'(?:www\.)?kgg2m7yk5aybusll\.onion',
750 r'(?:www\.)?qklhadlycap4cnod\.onion',
751 r'(?:www\.)?axqzx4s6s54s32yentfqojs3x5i7faxza6xo3ehd4bzzsg2ii4fv2iid\.onion',
752 r'(?:www\.)?c7hqkpkpemu6e7emz5b4vyz7idjgdvgaaa3dyimmeojqbgpea3xqjoid\.onion',
753 r'(?:www\.)?fz253lmuao3strwbfbmx46yu7acac2jz27iwtorgmbqlkurlclmancad\.onion',
754 r'(?:www\.)?invidious\.l4qlywnpwqsluw65ts7md3khrivpirse744un3x7mlskqauz5pyuzgqd\.onion',
755 r'(?:www\.)?owxfohz4kjyv25fvlqilyxast7inivgiktls3th44jhk3ej3i7ya\.b32\.i2p',
756 r'(?:www\.)?4l2dgddgsrkf2ous66i6seeyi6etzfgrue332grh2n7madpwopotugyd\.onion',
757 r'(?:www\.)?w6ijuptxiku4xpnnaetxvnkc5vqcdu7mgns2u77qefoixi63vbvnpnqd\.onion',
758 r'(?:www\.)?kbjggqkzv65ivcqj6bumvp337z6264huv5kpkwuv6gu5yjiskvan7fad\.onion',
759 r'(?:www\.)?grwp24hodrefzvjjuccrkw3mjq4tzhaaq32amf33dzpmuxe7ilepcmad\.onion',
760 r'(?:www\.)?hpniueoejy4opn7bc4ftgazyqjoeqwlvh2uiku2xqku6zpoa4bf5ruid\.onion',
761 )
762 _VALID_URL = r"""(?x)^
763 (
764 (?:https?://|//) # http(s):// or protocol-independent URL
765 (?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie|kids)?\.com|
766 (?:www\.)?deturl\.com/www\.youtube\.com|
767 (?:www\.)?pwnyoutube\.com|
768 (?:www\.)?hooktube\.com|
769 (?:www\.)?yourepeat\.com|
770 tube\.majestyc\.net|
771 %(invidious)s|
772 youtube\.googleapis\.com)/ # the various hostnames, with wildcard subdomains
773 (?:.*?\#/)? # handle anchor (#/) redirect urls
774 (?: # the various things that can precede the ID:
775 (?:(?:v|embed|e|shorts)/(?!videoseries)) # v/ or embed/ or e/ or shorts/
776 |(?: # or the v= param in all its forms
777 (?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
778 (?:\?|\#!?) # the params delimiter ? or # or #!
779 (?:.*?[&;])?? # any other preceding param (like /?s=tuff&v=xxxx or ?s=tuff&amp;v=V36LpHqtcDY)
780 v=
781 )
782 ))
783 |(?:
784 youtu\.be| # just youtu.be/xxxx
785 vid\.plus| # or vid.plus/xxxx
786 zwearz\.com/watch| # or zwearz.com/watch/xxxx
787 %(invidious)s
788 )/
789 |(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
790 )
791 )? # all until now is optional -> you can pass the naked ID
792 (?P<id>[0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
793 (?(1).+)? # if we found the ID, everything can follow
794 (?:\#|$)""" % {
795 'invidious': '|'.join(_INVIDIOUS_SITES),
796 }
797 _PLAYER_INFO_RE = (
798 r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
799 r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
800 r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
801 )
802 _formats = {
803 '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
804 '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'abr': 64, 'vcodec': 'h263'},
805 '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'},
806 '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'abr': 24, 'vcodec': 'mp4v'},
807 '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 96, 'vcodec': 'h264'},
808 '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
809 '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
810 '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
811 # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well
812 '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'},
813 '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
814 '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264'},
815 '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
816 '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8'},
817 '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
818 '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8'},
819 '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
820 '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264'},
821
822
823 # 3D videos
824 '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
825 '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -20},
826 '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
827 '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'abr': 192, 'vcodec': 'h264', 'preference': -20},
828 '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 128, 'vcodec': 'vp8', 'preference': -20},
829 '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
830 '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'abr': 192, 'vcodec': 'vp8', 'preference': -20},
831
832 # Apple HTTP Live Streaming
833 '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
834 '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
835 '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
836 '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 128, 'vcodec': 'h264', 'preference': -10},
837 '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
838 '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 256, 'vcodec': 'h264', 'preference': -10},
839 '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 48, 'vcodec': 'h264', 'preference': -10},
840 '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'abr': 24, 'vcodec': 'h264', 'preference': -10},
841
842 # DASH mp4 video
843 '133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'h264'},
844 '134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'h264'},
845 '135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
846 '136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264'},
847 '137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264'},
848 '138': {'ext': 'mp4', 'format_note': 'DASH video', 'vcodec': 'h264'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559)
849 '160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'h264'},
850 '212': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'h264'},
851 '264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'h264'},
852 '298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
853 '299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'h264', 'fps': 60},
854 '266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'h264'},
855
856 # Dash mp4 audio
857 '139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 48, 'container': 'm4a_dash'},
858 '140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 128, 'container': 'm4a_dash'},
859 '141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'abr': 256, 'container': 'm4a_dash'},
860 '256': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
861 '258': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'container': 'm4a_dash'},
862 '325': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'dtse', 'container': 'm4a_dash'},
863 '328': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'ec-3', 'container': 'm4a_dash'},
864
865 # Dash webm
866 '167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
867 '168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
868 '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
869 '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
870 '218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
871 '219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp8'},
872 '278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'container': 'webm', 'vcodec': 'vp9'},
873 '242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'vp9'},
874 '243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'vp9'},
875 '244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
876 '245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
877 '246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'vp9'},
878 '247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9'},
879 '248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9'},
880 '271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9'},
881 # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug)
882 '272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
883 '302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
884 '303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
885 '308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
886 '313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9'},
887 '315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'vp9', 'fps': 60},
888
889 # Dash webm audio
890 '171': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 128},
891 '172': {'ext': 'webm', 'acodec': 'vorbis', 'format_note': 'DASH audio', 'abr': 256},
892
893 # Dash webm audio with opus inside
894 '249': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50},
895 '250': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70},
896 '251': {'ext': 'webm', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160},
897
898 # RTMP (unnamed)
899 '_rtmp': {'protocol': 'rtmp'},
900
901 # av01 video only formats sometimes served with "unknown" codecs
902 '394': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
903 '395': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
904 '396': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'av01.0.01M.08'},
905 '397': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'av01.0.04M.08'},
906 '398': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'av01.0.05M.08'},
907 '399': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'av01.0.08M.08'},
908 '400': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
909 '401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
910 }
911 _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
912
913 _GEO_BYPASS = False
914
915 IE_NAME = 'youtube'
916 _TESTS = [
917 {
918 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&t=1s&end=9',
919 'info_dict': {
920 'id': 'BaW_jenozKc',
921 'ext': 'mp4',
922 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
923 'uploader': 'Philipp Hagemeister',
924 'uploader_id': 'phihag',
925 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
926 'channel_id': 'UCLqxVugv74EIW3VWh2NOa3Q',
927 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCLqxVugv74EIW3VWh2NOa3Q',
928 'upload_date': '20121002',
929 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
930 'categories': ['Science & Technology'],
931 'tags': ['youtube-dl'],
932 'duration': 10,
933 'view_count': int,
934 'like_count': int,
935 'dislike_count': int,
936 'start_time': 1,
937 'end_time': 9,
938 }
939 },
940 {
941 'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
942 'note': 'Embed-only video (#1746)',
943 'info_dict': {
944 'id': 'yZIXLfi8CZQ',
945 'ext': 'mp4',
946 'upload_date': '20120608',
947 'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
948 'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
949 'uploader': 'SET India',
950 'uploader_id': 'setindia',
951 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/setindia',
952 'age_limit': 18,
953 },
954 'skip': 'Private video',
955 },
956 {
957 'url': 'https://www.youtube.com/watch?v=BaW_jenozKc&v=yZIXLfi8CZQ',
958 'note': 'Use the first video ID in the URL',
959 'info_dict': {
960 'id': 'BaW_jenozKc',
961 'ext': 'mp4',
962 'title': 'youtube-dl test video "\'/\\ä↭𝕐',
963 'uploader': 'Philipp Hagemeister',
964 'uploader_id': 'phihag',
965 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/phihag',
966 'upload_date': '20121002',
967 'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de .',
968 'categories': ['Science & Technology'],
969 'tags': ['youtube-dl'],
970 'duration': 10,
971 'view_count': int,
972 'like_count': int,
973 'dislike_count': int,
974 },
975 'params': {
976 'skip_download': True,
977 },
978 },
979 {
980 'url': 'https://www.youtube.com/watch?v=a9LDPn-MO4I',
981 'note': '256k DASH audio (format 141) via DASH manifest',
982 'info_dict': {
983 'id': 'a9LDPn-MO4I',
984 'ext': 'm4a',
985 'upload_date': '20121002',
986 'uploader_id': '8KVIDEO',
987 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
988 'description': '',
989 'uploader': '8KVIDEO',
990 'title': 'UHDTV TEST 8K VIDEO.mp4'
991 },
992 'params': {
993 'youtube_include_dash_manifest': True,
994 'format': '141',
995 },
996 'skip': 'format 141 not served anymore',
997 },
998 # DASH manifest with encrypted signature
999 {
1000 'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
1001 'info_dict': {
1002 'id': 'IB3lcPjvWLA',
1003 'ext': 'm4a',
1004 'title': 'Afrojack, Spree Wilson - The Spark (Official Music Video) ft. Spree Wilson',
1005 'description': 'md5:8f5e2b82460520b619ccac1f509d43bf',
1006 'duration': 244,
1007 'uploader': 'AfrojackVEVO',
1008 'uploader_id': 'AfrojackVEVO',
1009 'upload_date': '20131011',
1010 'abr': 129.495,
1011 },
1012 'params': {
1013 'youtube_include_dash_manifest': True,
1014 'format': '141/bestaudio[ext=m4a]',
1015 },
1016 },
1017 # Age-gate videos. See https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-888837000
1018 {
1019 'note': 'Embed allowed age-gate video',
1020 'url': 'https://youtube.com/watch?v=HtVdAasjOgU',
1021 'info_dict': {
1022 'id': 'HtVdAasjOgU',
1023 'ext': 'mp4',
1024 'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
1025 'description': r're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
1026 'duration': 142,
1027 'uploader': 'The Witcher',
1028 'uploader_id': 'WitcherGame',
1029 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/WitcherGame',
1030 'upload_date': '20140605',
1031 'age_limit': 18,
1032 },
1033 },
1034 {
1035 'note': 'Age-gate video with embed allowed in public site',
1036 'url': 'https://youtube.com/watch?v=HsUATh_Nc2U',
1037 'info_dict': {
1038 'id': 'HsUATh_Nc2U',
1039 'ext': 'mp4',
1040 'title': 'Godzilla 2 (Official Video)',
1041 'description': 'md5:bf77e03fcae5529475e500129b05668a',
1042 'upload_date': '20200408',
1043 'uploader_id': 'FlyingKitty900',
1044 'uploader': 'FlyingKitty',
1045 'age_limit': 18,
1046 },
1047 },
1048 {
1049 'note': 'Age-gate video embedable only with clientScreen=EMBED',
1050 'url': 'https://youtube.com/watch?v=Tq92D6wQ1mg',
1051 'info_dict': {
1052 'id': 'Tq92D6wQ1mg',
1053 'title': '[MMD] Adios - EVERGLOW [+Motion DL]',
1054 'ext': 'mp4',
1055 'upload_date': '20191227',
1056 'uploader_id': 'UC1yoRdFoFJaCY-AGfD9W0wQ',
1057 'uploader': 'Projekt Melody',
1058 'description': 'md5:17eccca93a786d51bc67646756894066',
1059 'age_limit': 18,
1060 },
1061 },
1062 {
1063 'note': 'Non-Agegated non-embeddable video',
1064 'url': 'https://youtube.com/watch?v=MeJVWBSsPAY',
1065 'info_dict': {
1066 'id': 'MeJVWBSsPAY',
1067 'ext': 'mp4',
1068 'title': 'OOMPH! - Such Mich Find Mich (Lyrics)',
1069 'uploader': 'Herr Lurik',
1070 'uploader_id': 'st3in234',
1071 'description': 'Fan Video. Music & Lyrics by OOMPH!.',
1072 'upload_date': '20130730',
1073 },
1074 },
1075 {
1076 'note': 'Non-bypassable age-gated video',
1077 'url': 'https://youtube.com/watch?v=Cr381pDsSsA',
1078 'only_matching': True,
1079 },
1080 # video_info is None (https://github.com/ytdl-org/youtube-dl/issues/4421)
1081 # YouTube Red ad is not captured for creator
1082 {
1083 'url': '__2ABJjxzNo',
1084 'info_dict': {
1085 'id': '__2ABJjxzNo',
1086 'ext': 'mp4',
1087 'duration': 266,
1088 'upload_date': '20100430',
1089 'uploader_id': 'deadmau5',
1090 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/deadmau5',
1091 'creator': 'deadmau5',
1092 'description': 'md5:6cbcd3a92ce1bc676fc4d6ab4ace2336',
1093 'uploader': 'deadmau5',
1094 'title': 'Deadmau5 - Some Chords (HD)',
1095 'alt_title': 'Some Chords',
1096 },
1097 'expected_warnings': [
1098 'DASH manifest missing',
1099 ]
1100 },
1101 # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
1102 {
1103 'url': 'lqQg6PlCWgI',
1104 'info_dict': {
1105 'id': 'lqQg6PlCWgI',
1106 'ext': 'mp4',
1107 'duration': 6085,
1108 'upload_date': '20150827',
1109 'uploader_id': 'olympic',
1110 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/olympic',
1111 'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
1112 'uploader': 'Olympics',
1113 'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
1114 },
1115 'params': {
1116 'skip_download': 'requires avconv',
1117 }
1118 },
1119 # Non-square pixels
1120 {
1121 'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
1122 'info_dict': {
1123 'id': '_b-2C3KPAM0',
1124 'ext': 'mp4',
1125 'stretched_ratio': 16 / 9.,
1126 'duration': 85,
1127 'upload_date': '20110310',
1128 'uploader_id': 'AllenMeow',
1129 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/AllenMeow',
1130 'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
1131 'uploader': '孫ᄋᄅ',
1132 'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
1133 },
1134 },
1135 # url_encoded_fmt_stream_map is empty string
1136 {
1137 'url': 'qEJwOuvDf7I',
1138 'info_dict': {
1139 'id': 'qEJwOuvDf7I',
1140 'ext': 'webm',
1141 'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
1142 'description': '',
1143 'upload_date': '20150404',
1144 'uploader_id': 'spbelect',
1145 'uploader': 'Наблюдатели Петербурга',
1146 },
1147 'params': {
1148 'skip_download': 'requires avconv',
1149 },
1150 'skip': 'This live event has ended.',
1151 },
1152 # Extraction from multiple DASH manifests (https://github.com/ytdl-org/youtube-dl/pull/6097)
1153 {
1154 'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
1155 'info_dict': {
1156 'id': 'FIl7x6_3R5Y',
1157 'ext': 'webm',
1158 'title': 'md5:7b81415841e02ecd4313668cde88737a',
1159 'description': 'md5:116377fd2963b81ec4ce64b542173306',
1160 'duration': 220,
1161 'upload_date': '20150625',
1162 'uploader_id': 'dorappi2000',
1163 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/dorappi2000',
1164 'uploader': 'dorappi2000',
1165 'formats': 'mincount:31',
1166 },
1167 'skip': 'not actual anymore',
1168 },
1169 # DASH manifest with segment_list
1170 {
1171 'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
1172 'md5': '8ce563a1d667b599d21064e982ab9e31',
1173 'info_dict': {
1174 'id': 'CsmdDsKjzN8',
1175 'ext': 'mp4',
1176 'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
1177 'uploader': 'Airtek',
1178 'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
1179 'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
1180 'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
1181 },
1182 'params': {
1183 'youtube_include_dash_manifest': True,
1184 'format': '135', # bestvideo
1185 },
1186 'skip': 'This live event has ended.',
1187 },
1188 {
1189 # Multifeed videos (multiple cameras), URL is for Main Camera
1190 'url': 'https://www.youtube.com/watch?v=jvGDaLqkpTg',
1191 'info_dict': {
1192 'id': 'jvGDaLqkpTg',
1193 'title': 'Tom Clancy Free Weekend Rainbow Whatever',
1194 'description': 'md5:e03b909557865076822aa169218d6a5d',
1195 },
1196 'playlist': [{
1197 'info_dict': {
1198 'id': 'jvGDaLqkpTg',
1199 'ext': 'mp4',
1200 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Main Camera)',
1201 'description': 'md5:e03b909557865076822aa169218d6a5d',
1202 'duration': 10643,
1203 'upload_date': '20161111',
1204 'uploader': 'Team PGP',
1205 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1206 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1207 },
1208 }, {
1209 'info_dict': {
1210 'id': '3AKt1R1aDnw',
1211 'ext': 'mp4',
1212 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 2)',
1213 'description': 'md5:e03b909557865076822aa169218d6a5d',
1214 'duration': 10991,
1215 'upload_date': '20161111',
1216 'uploader': 'Team PGP',
1217 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1218 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1219 },
1220 }, {
1221 'info_dict': {
1222 'id': 'RtAMM00gpVc',
1223 'ext': 'mp4',
1224 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 3)',
1225 'description': 'md5:e03b909557865076822aa169218d6a5d',
1226 'duration': 10995,
1227 'upload_date': '20161111',
1228 'uploader': 'Team PGP',
1229 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1230 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1231 },
1232 }, {
1233 'info_dict': {
1234 'id': '6N2fdlP3C5U',
1235 'ext': 'mp4',
1236 'title': 'Tom Clancy Free Weekend Rainbow Whatever (Camera 4)',
1237 'description': 'md5:e03b909557865076822aa169218d6a5d',
1238 'duration': 10990,
1239 'upload_date': '20161111',
1240 'uploader': 'Team PGP',
1241 'uploader_id': 'UChORY56LMMETTuGjXaJXvLg',
1242 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UChORY56LMMETTuGjXaJXvLg',
1243 },
1244 }],
1245 'params': {
1246 'skip_download': True,
1247 },
1248 'skip': 'Not multifeed anymore',
1249 },
1250 {
1251 # Multifeed video with comma in title (see https://github.com/ytdl-org/youtube-dl/issues/8536)
1252 'url': 'https://www.youtube.com/watch?v=gVfLd0zydlo',
1253 'info_dict': {
1254 'id': 'gVfLd0zydlo',
1255 'title': 'DevConf.cz 2016 Day 2 Workshops 1 14:00 - 15:30',
1256 },
1257 'playlist_count': 2,
1258 'skip': 'Not multifeed anymore',
1259 },
1260 {
1261 'url': 'https://vid.plus/FlRa-iH7PGw',
1262 'only_matching': True,
1263 },
1264 {
1265 'url': 'https://zwearz.com/watch/9lWxNJF-ufM/electra-woman-dyna-girl-official-trailer-grace-helbig.html',
1266 'only_matching': True,
1267 },
1268 {
1269 # Title with JS-like syntax "};" (see https://github.com/ytdl-org/youtube-dl/issues/7468)
1270 # Also tests cut-off URL expansion in video description (see
1271 # https://github.com/ytdl-org/youtube-dl/issues/1892,
1272 # https://github.com/ytdl-org/youtube-dl/issues/8164)
1273 'url': 'https://www.youtube.com/watch?v=lsguqyKfVQg',
1274 'info_dict': {
1275 'id': 'lsguqyKfVQg',
1276 'ext': 'mp4',
1277 'title': '{dark walk}; Loki/AC/Dishonored; collab w/Elflover21',
1278 'alt_title': 'Dark Walk',
1279 'description': 'md5:8085699c11dc3f597ce0410b0dcbb34a',
1280 'duration': 133,
1281 'upload_date': '20151119',
1282 'uploader_id': 'IronSoulElf',
1283 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/IronSoulElf',
1284 'uploader': 'IronSoulElf',
1285 'creator': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
1286 'track': 'Dark Walk',
1287 'artist': 'Todd Haberman;\nDaniel Law Heath and Aaron Kaplan',
1288 'album': 'Position Music - Production Music Vol. 143 - Dark Walk',
1289 },
1290 'params': {
1291 'skip_download': True,
1292 },
1293 },
1294 {
1295 # Tags with '};' (see https://github.com/ytdl-org/youtube-dl/issues/7468)
1296 'url': 'https://www.youtube.com/watch?v=Ms7iBXnlUO8',
1297 'only_matching': True,
1298 },
1299 {
1300 # Video with yt:stretch=17:0
1301 'url': 'https://www.youtube.com/watch?v=Q39EVAstoRM',
1302 'info_dict': {
1303 'id': 'Q39EVAstoRM',
1304 'ext': 'mp4',
1305 'title': 'Clash Of Clans#14 Dicas De Ataque Para CV 4',
1306 'description': 'md5:ee18a25c350637c8faff806845bddee9',
1307 'upload_date': '20151107',
1308 'uploader_id': 'UCCr7TALkRbo3EtFzETQF1LA',
1309 'uploader': 'CH GAMER DROID',
1310 },
1311 'params': {
1312 'skip_download': True,
1313 },
1314 'skip': 'This video does not exist.',
1315 },
1316 {
1317 # Video with incomplete 'yt:stretch=16:'
1318 'url': 'https://www.youtube.com/watch?v=FRhJzUSJbGI',
1319 'only_matching': True,
1320 },
1321 {
1322 # Video licensed under Creative Commons
1323 'url': 'https://www.youtube.com/watch?v=M4gD1WSo5mA',
1324 'info_dict': {
1325 'id': 'M4gD1WSo5mA',
1326 'ext': 'mp4',
1327 'title': 'md5:e41008789470fc2533a3252216f1c1d1',
1328 'description': 'md5:a677553cf0840649b731a3024aeff4cc',
1329 'duration': 721,
1330 'upload_date': '20150127',
1331 'uploader_id': 'BerkmanCenter',
1332 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/BerkmanCenter',
1333 'uploader': 'The Berkman Klein Center for Internet & Society',
1334 'license': 'Creative Commons Attribution license (reuse allowed)',
1335 },
1336 'params': {
1337 'skip_download': True,
1338 },
1339 },
1340 {
1341 # Channel-like uploader_url
1342 'url': 'https://www.youtube.com/watch?v=eQcmzGIKrzg',
1343 'info_dict': {
1344 'id': 'eQcmzGIKrzg',
1345 'ext': 'mp4',
1346 'title': 'Democratic Socialism and Foreign Policy | Bernie Sanders',
1347 'description': 'md5:13a2503d7b5904ef4b223aa101628f39',
1348 'duration': 4060,
1349 'upload_date': '20151119',
1350 'uploader': 'Bernie Sanders',
1351 'uploader_id': 'UCH1dpzjCEiGAt8CXkryhkZg',
1352 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCH1dpzjCEiGAt8CXkryhkZg',
1353 'license': 'Creative Commons Attribution license (reuse allowed)',
1354 },
1355 'params': {
1356 'skip_download': True,
1357 },
1358 },
1359 {
1360 'url': 'https://www.youtube.com/watch?feature=player_embedded&amp;amp;v=V36LpHqtcDY',
1361 'only_matching': True,
1362 },
1363 {
1364 # YouTube Red paid video (https://github.com/ytdl-org/youtube-dl/issues/10059)
1365 'url': 'https://www.youtube.com/watch?v=i1Ko8UG-Tdo',
1366 'only_matching': True,
1367 },
1368 {
1369 # Rental video preview
1370 'url': 'https://www.youtube.com/watch?v=yYr8q0y5Jfg',
1371 'info_dict': {
1372 'id': 'uGpuVWrhIzE',
1373 'ext': 'mp4',
1374 'title': 'Piku - Trailer',
1375 'description': 'md5:c36bd60c3fd6f1954086c083c72092eb',
1376 'upload_date': '20150811',
1377 'uploader': 'FlixMatrix',
1378 'uploader_id': 'FlixMatrixKaravan',
1379 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/FlixMatrixKaravan',
1380 'license': 'Standard YouTube License',
1381 },
1382 'params': {
1383 'skip_download': True,
1384 },
1385 'skip': 'This video is not available.',
1386 },
1387 {
1388 # YouTube Red video with episode data
1389 'url': 'https://www.youtube.com/watch?v=iqKdEhx-dD4',
1390 'info_dict': {
1391 'id': 'iqKdEhx-dD4',
1392 'ext': 'mp4',
1393 'title': 'Isolation - Mind Field (Ep 1)',
1394 'description': 'md5:f540112edec5d09fc8cc752d3d4ba3cd',
1395 'duration': 2085,
1396 'upload_date': '20170118',
1397 'uploader': 'Vsauce',
1398 'uploader_id': 'Vsauce',
1399 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Vsauce',
1400 'series': 'Mind Field',
1401 'season_number': 1,
1402 'episode_number': 1,
1403 },
1404 'params': {
1405 'skip_download': True,
1406 },
1407 'expected_warnings': [
1408 'Skipping DASH manifest',
1409 ],
1410 },
1411 {
1412 # The following content has been identified by the YouTube community
1413 # as inappropriate or offensive to some audiences.
1414 'url': 'https://www.youtube.com/watch?v=6SJNVb0GnPI',
1415 'info_dict': {
1416 'id': '6SJNVb0GnPI',
1417 'ext': 'mp4',
1418 'title': 'Race Differences in Intelligence',
1419 'description': 'md5:5d161533167390427a1f8ee89a1fc6f1',
1420 'duration': 965,
1421 'upload_date': '20140124',
1422 'uploader': 'New Century Foundation',
1423 'uploader_id': 'UCEJYpZGqgUob0zVVEaLhvVg',
1424 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCEJYpZGqgUob0zVVEaLhvVg',
1425 },
1426 'params': {
1427 'skip_download': True,
1428 },
1429 'skip': 'This video has been removed for violating YouTube\'s policy on hate speech.',
1430 },
1431 {
1432 # itag 212
1433 'url': '1t24XAntNCY',
1434 'only_matching': True,
1435 },
1436 {
1437 # geo restricted to JP
1438 'url': 'sJL6WA-aGkQ',
1439 'only_matching': True,
1440 },
1441 {
1442 'url': 'https://invidio.us/watch?v=BaW_jenozKc',
1443 'only_matching': True,
1444 },
1445 {
1446 'url': 'https://redirect.invidious.io/watch?v=BaW_jenozKc',
1447 'only_matching': True,
1448 },
1449 {
1450 # from https://nitter.pussthecat.org/YouTube/status/1360363141947944964#m
1451 'url': 'https://redirect.invidious.io/Yh0AhrY9GjA',
1452 'only_matching': True,
1453 },
1454 {
1455 # DRM protected
1456 'url': 'https://www.youtube.com/watch?v=s7_qI6_mIXc',
1457 'only_matching': True,
1458 },
1459 {
1460 # Video with unsupported adaptive stream type formats
1461 'url': 'https://www.youtube.com/watch?v=Z4Vy8R84T1U',
1462 'info_dict': {
1463 'id': 'Z4Vy8R84T1U',
1464 'ext': 'mp4',
1465 'title': 'saman SMAN 53 Jakarta(Sancety) opening COFFEE4th at SMAN 53 Jakarta',
1466 'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
1467 'duration': 433,
1468 'upload_date': '20130923',
1469 'uploader': 'Amelia Putri Harwita',
1470 'uploader_id': 'UCpOxM49HJxmC1qCalXyB3_Q',
1471 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCpOxM49HJxmC1qCalXyB3_Q',
1472 'formats': 'maxcount:10',
1473 },
1474 'params': {
1475 'skip_download': True,
1476 'youtube_include_dash_manifest': False,
1477 },
1478 'skip': 'not actual anymore',
1479 },
1480 {
1481 # Youtube Music Auto-generated description
1482 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
1483 'info_dict': {
1484 'id': 'MgNrAu2pzNs',
1485 'ext': 'mp4',
1486 'title': 'Voyeur Girl',
1487 'description': 'md5:7ae382a65843d6df2685993e90a8628f',
1488 'upload_date': '20190312',
1489 'uploader': 'Stephen - Topic',
1490 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
1491 'artist': 'Stephen',
1492 'track': 'Voyeur Girl',
1493 'album': 'it\'s too much love to know my dear',
1494 'release_date': '20190313',
1495 'release_year': 2019,
1496 },
1497 'params': {
1498 'skip_download': True,
1499 },
1500 },
1501 {
1502 'url': 'https://www.youtubekids.com/watch?v=3b8nCWDgZ6Q',
1503 'only_matching': True,
1504 },
1505 {
1506 # invalid -> valid video id redirection
1507 'url': 'DJztXj2GPfl',
1508 'info_dict': {
1509 'id': 'DJztXj2GPfk',
1510 'ext': 'mp4',
1511 'title': 'Panjabi MC - Mundian To Bach Ke (The Dictator Soundtrack)',
1512 'description': 'md5:bf577a41da97918e94fa9798d9228825',
1513 'upload_date': '20090125',
1514 'uploader': 'Prochorowka',
1515 'uploader_id': 'Prochorowka',
1516 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/Prochorowka',
1517 'artist': 'Panjabi MC',
1518 'track': 'Beware of the Boys (Mundian to Bach Ke) - Motivo Hi-Lectro Remix',
1519 'album': 'Beware of the Boys (Mundian To Bach Ke)',
1520 },
1521 'params': {
1522 'skip_download': True,
1523 },
1524 'skip': 'Video unavailable',
1525 },
1526 {
1527 # empty description results in an empty string
1528 'url': 'https://www.youtube.com/watch?v=x41yOUIvK2k',
1529 'info_dict': {
1530 'id': 'x41yOUIvK2k',
1531 'ext': 'mp4',
1532 'title': 'IMG 3456',
1533 'description': '',
1534 'upload_date': '20170613',
1535 'uploader_id': 'ElevageOrVert',
1536 'uploader': 'ElevageOrVert',
1537 },
1538 'params': {
1539 'skip_download': True,
1540 },
1541 },
1542 {
1543 # with '};' inside yt initial data (see [1])
1544 # see [2] for an example with '};' inside ytInitialPlayerResponse
1545 # 1. https://github.com/ytdl-org/youtube-dl/issues/27093
1546 # 2. https://github.com/ytdl-org/youtube-dl/issues/27216
1547 'url': 'https://www.youtube.com/watch?v=CHqg6qOn4no',
1548 'info_dict': {
1549 'id': 'CHqg6qOn4no',
1550 'ext': 'mp4',
1551 'title': 'Part 77 Sort a list of simple types in c#',
1552 'description': 'md5:b8746fa52e10cdbf47997903f13b20dc',
1553 'upload_date': '20130831',
1554 'uploader_id': 'kudvenkat',
1555 'uploader': 'kudvenkat',
1556 },
1557 'params': {
1558 'skip_download': True,
1559 },
1560 },
1561 {
1562 # another example of '};' in ytInitialData
1563 'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
1564 'only_matching': True,
1565 },
1566 {
1567 'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
1568 'only_matching': True,
1569 },
1570 {
1571 # https://github.com/ytdl-org/youtube-dl/pull/28094
1572 'url': 'OtqTfy26tG0',
1573 'info_dict': {
1574 'id': 'OtqTfy26tG0',
1575 'ext': 'mp4',
1576 'title': 'Burn Out',
1577 'description': 'md5:8d07b84dcbcbfb34bc12a56d968b6131',
1578 'upload_date': '20141120',
1579 'uploader': 'The Cinematic Orchestra - Topic',
1580 'uploader_id': 'UCIzsJBIyo8hhpFm1NK0uLgw',
1581 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCIzsJBIyo8hhpFm1NK0uLgw',
1582 'artist': 'The Cinematic Orchestra',
1583 'track': 'Burn Out',
1584 'album': 'Every Day',
1585 'release_data': None,
1586 'release_year': None,
1587 },
1588 'params': {
1589 'skip_download': True,
1590 },
1591 },
1592 {
1593 # controversial video, only works with bpctr when authenticated with cookies
1594 'url': 'https://www.youtube.com/watch?v=nGC3D_FkCmg',
1595 'only_matching': True,
1596 },
1597 {
1598 # controversial video, requires bpctr/contentCheckOk
1599 'url': 'https://www.youtube.com/watch?v=SZJvDhaSDnc',
1600 'info_dict': {
1601 'id': 'SZJvDhaSDnc',
1602 'ext': 'mp4',
1603 'title': 'San Diego teen commits suicide after bullying over embarrassing video',
1604 'channel_id': 'UC-SJ6nODDmufqBzPBwCvYvQ',
1605 'uploader': 'CBS This Morning',
1606 'uploader_id': 'CBSThisMorning',
1607 'upload_date': '20140716',
1608 'description': 'md5:acde3a73d3f133fc97e837a9f76b53b7'
1609 }
1610 },
1611 {
1612 # restricted location, https://github.com/ytdl-org/youtube-dl/issues/28685
1613 'url': 'cBvYw8_A0vQ',
1614 'info_dict': {
1615 'id': 'cBvYw8_A0vQ',
1616 'ext': 'mp4',
1617 'title': '4K Ueno Okachimachi Street Scenes 上野御徒町歩き',
1618 'description': 'md5:ea770e474b7cd6722b4c95b833c03630',
1619 'upload_date': '20201120',
1620 'uploader': 'Walk around Japan',
1621 'uploader_id': 'UC3o_t8PzBmXf5S9b7GLx1Mw',
1622 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UC3o_t8PzBmXf5S9b7GLx1Mw',
1623 },
1624 'params': {
1625 'skip_download': True,
1626 },
1627 }, {
1628 # Has multiple audio streams
1629 'url': 'WaOKSUlf4TM',
1630 'only_matching': True
1631 }, {
1632 # Requires Premium: has format 141 when requested using YTM url
1633 'url': 'https://music.youtube.com/watch?v=XclachpHxis',
1634 'only_matching': True
1635 }, {
1636 # multiple subtitles with same lang_code
1637 'url': 'https://www.youtube.com/watch?v=wsQiKKfKxug',
1638 'only_matching': True,
1639 }, {
1640 # Force use android client fallback
1641 'url': 'https://www.youtube.com/watch?v=YOelRv7fMxY',
1642 'info_dict': {
1643 'id': 'YOelRv7fMxY',
1644 'title': 'DIGGING A SECRET TUNNEL Part 1',
1645 'ext': '3gp',
1646 'upload_date': '20210624',
1647 'channel_id': 'UCp68_FLety0O-n9QU6phsgw',
1648 'uploader': 'colinfurze',
1649 'uploader_id': 'colinfurze',
1650 'channel_url': r're:https?://(?:www\.)?youtube\.com/channel/UCp68_FLety0O-n9QU6phsgw',
1651 'description': 'md5:b5096f56af7ccd7a555c84db81738b22'
1652 },
1653 'params': {
1654 'format': '17', # 3gp format available on android
1655 'extractor_args': {'youtube': {'player_client': ['android']}},
1656 },
1657 },
1658 {
1659 # Skip download of additional client configs (remix client config in this case)
1660 'url': 'https://music.youtube.com/watch?v=MgNrAu2pzNs',
1661 'only_matching': True,
1662 'params': {
1663 'extractor_args': {'youtube': {'player_skip': ['configs']}},
1664 },
1665 }, {
1666 # shorts
1667 'url': 'https://www.youtube.com/shorts/BGQWPY4IigY',
1668 'only_matching': True,
1669 },
1670 ]
1671
1672 @classmethod
1673 def suitable(cls, url):
1674 from ..utils import parse_qs
1675
1676 qs = parse_qs(url)
1677 if qs.get('list', [None])[0]:
1678 return False
1679 return super(YoutubeIE, cls).suitable(url)
1680
1681 def __init__(self, *args, **kwargs):
1682 super(YoutubeIE, self).__init__(*args, **kwargs)
1683 self._code_cache = {}
1684 self._player_cache = {}
1685
1686 def _extract_player_url(self, *ytcfgs, webpage=None):
1687 player_url = traverse_obj(
1688 ytcfgs, (..., 'PLAYER_JS_URL'), (..., 'WEB_PLAYER_CONTEXT_CONFIGS', ..., 'jsUrl'),
1689 get_all=False, expected_type=compat_str)
1690 if not player_url:
1691 return
1692 if player_url.startswith('//'):
1693 player_url = 'https:' + player_url
1694 elif not re.match(r'https?://', player_url):
1695 player_url = compat_urlparse.urljoin(
1696 'https://www.youtube.com', player_url)
1697 return player_url
1698
1699 def _download_player_url(self, video_id, fatal=False):
1700 res = self._download_webpage(
1701 'https://www.youtube.com/iframe_api',
1702 note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
1703 if res:
1704 player_version = self._search_regex(
1705 r'player\\?/([0-9a-fA-F]{8})\\?/', res, 'player version', fatal=fatal)
1706 if player_version:
1707 return f'https://www.youtube.com/s/player/{player_version}/player_ias.vflset/en_US/base.js'
1708
1709 def _signature_cache_id(self, example_sig):
1710 """ Return a string representation of a signature """
1711 return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
1712
1713 @classmethod
1714 def _extract_player_info(cls, player_url):
1715 for player_re in cls._PLAYER_INFO_RE:
1716 id_m = re.search(player_re, player_url)
1717 if id_m:
1718 break
1719 else:
1720 raise ExtractorError('Cannot identify player %r' % player_url)
1721 return id_m.group('id')
1722
1723 def _load_player(self, video_id, player_url, fatal=True) -> bool:
1724 player_id = self._extract_player_info(player_url)
1725 if player_id not in self._code_cache:
1726 code = self._download_webpage(
1727 player_url, video_id, fatal=fatal,
1728 note='Downloading player ' + player_id,
1729 errnote='Download of %s failed' % player_url)
1730 if code:
1731 self._code_cache[player_id] = code
1732 return player_id in self._code_cache
1733
1734 def _extract_signature_function(self, video_id, player_url, example_sig):
1735 player_id = self._extract_player_info(player_url)
1736
1737 # Read from filesystem cache
1738 func_id = 'js_%s_%s' % (
1739 player_id, self._signature_cache_id(example_sig))
1740 assert os.path.basename(func_id) == func_id
1741
1742 cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
1743 if cache_spec is not None:
1744 return lambda s: ''.join(s[i] for i in cache_spec)
1745
1746 if self._load_player(video_id, player_url):
1747 code = self._code_cache[player_id]
1748 res = self._parse_sig_js(code)
1749
1750 test_string = ''.join(map(compat_chr, range(len(example_sig))))
1751 cache_res = res(test_string)
1752 cache_spec = [ord(c) for c in cache_res]
1753
1754 self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
1755 return res
1756
1757 def _print_sig_code(self, func, example_sig):
1758 def gen_sig_code(idxs):
1759 def _genslice(start, end, step):
1760 starts = '' if start == 0 else str(start)
1761 ends = (':%d' % (end + step)) if end + step >= 0 else ':'
1762 steps = '' if step == 1 else (':%d' % step)
1763 return 's[%s%s%s]' % (starts, ends, steps)
1764
1765 step = None
1766 # Quelch pyflakes warnings - start will be set when step is set
1767 start = '(Never used)'
1768 for i, prev in zip(idxs[1:], idxs[:-1]):
1769 if step is not None:
1770 if i - prev == step:
1771 continue
1772 yield _genslice(start, prev, step)
1773 step = None
1774 continue
1775 if i - prev in [-1, 1]:
1776 step = i - prev
1777 start = prev
1778 continue
1779 else:
1780 yield 's[%d]' % prev
1781 if step is None:
1782 yield 's[%d]' % i
1783 else:
1784 yield _genslice(start, i, step)
1785
1786 test_string = ''.join(map(compat_chr, range(len(example_sig))))
1787 cache_res = func(test_string)
1788 cache_spec = [ord(c) for c in cache_res]
1789 expr_code = ' + '.join(gen_sig_code(cache_spec))
1790 signature_id_tuple = '(%s)' % (
1791 ', '.join(compat_str(len(p)) for p in example_sig.split('.')))
1792 code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
1793 ' return %s\n') % (signature_id_tuple, expr_code)
1794 self.to_screen('Extracted signature function:\n' + code)
1795
1796 def _parse_sig_js(self, jscode):
1797 funcname = self._search_regex(
1798 (r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1799 r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1800 r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
1801 r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
1802 r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)',
1803 r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
1804 r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
1805 # Obsolete patterns
1806 r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1807 r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
1808 r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1809 r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1810 r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1811 r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1812 r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
1813 r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
1814 jscode, 'Initial JS player signature function name', group='sig')
1815
1816 jsi = JSInterpreter(jscode)
1817 initial_function = jsi.extract_function(funcname)
1818 return lambda s: initial_function([s])
1819
1820 def _decrypt_signature(self, s, video_id, player_url):
1821 """Turn the encrypted s field into a working signature"""
1822
1823 if player_url is None:
1824 raise ExtractorError('Cannot decrypt signature without player_url')
1825
1826 try:
1827 player_id = (player_url, self._signature_cache_id(s))
1828 if player_id not in self._player_cache:
1829 func = self._extract_signature_function(
1830 video_id, player_url, s
1831 )
1832 self._player_cache[player_id] = func
1833 func = self._player_cache[player_id]
1834 if self.get_param('youtube_print_sig_code'):
1835 self._print_sig_code(func, s)
1836 return func(s)
1837 except Exception as e:
1838 tb = traceback.format_exc()
1839 raise ExtractorError(
1840 'Signature extraction failed: ' + tb, cause=e)
1841
1842 def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
1843 """
1844 Extract signatureTimestamp (sts)
1845 Required to tell API what sig/player version is in use.
1846 """
1847 sts = None
1848 if isinstance(ytcfg, dict):
1849 sts = int_or_none(ytcfg.get('STS'))
1850
1851 if not sts:
1852 # Attempt to extract from player
1853 if player_url is None:
1854 error_msg = 'Cannot extract signature timestamp without player_url.'
1855 if fatal:
1856 raise ExtractorError(error_msg)
1857 self.report_warning(error_msg)
1858 return
1859 if self._load_player(video_id, player_url, fatal=fatal):
1860 player_id = self._extract_player_info(player_url)
1861 code = self._code_cache[player_id]
1862 sts = int_or_none(self._search_regex(
1863 r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code,
1864 'JS player signature timestamp', group='sts', fatal=fatal))
1865 return sts
1866
1867 def _mark_watched(self, video_id, player_responses):
1868 playback_url = traverse_obj(
1869 player_responses, (..., 'playbackTracking', 'videostatsPlaybackUrl', 'baseUrl'),
1870 expected_type=url_or_none, get_all=False)
1871 if not playback_url:
1872 self.report_warning('Unable to mark watched')
1873 return
1874 parsed_playback_url = compat_urlparse.urlparse(playback_url)
1875 qs = compat_urlparse.parse_qs(parsed_playback_url.query)
1876
1877 # cpn generation algorithm is reverse engineered from base.js.
1878 # In fact it works even with dummy cpn.
1879 CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
1880 cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)))
1881
1882 qs.update({
1883 'ver': ['2'],
1884 'cpn': [cpn],
1885 })
1886 playback_url = compat_urlparse.urlunparse(
1887 parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
1888
1889 self._download_webpage(
1890 playback_url, video_id, 'Marking watched',
1891 'Unable to mark watched', fatal=False)
1892
1893 @staticmethod
1894 def _extract_urls(webpage):
1895 # Embedded YouTube player
1896 entries = [
1897 unescapeHTML(mobj.group('url'))
1898 for mobj in re.finditer(r'''(?x)
1899 (?:
1900 <iframe[^>]+?src=|
1901 data-video-url=|
1902 <embed[^>]+?src=|
1903 embedSWF\(?:\s*|
1904 <object[^>]+data=|
1905 new\s+SWFObject\(
1906 )
1907 (["\'])
1908 (?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
1909 (?:embed|v|p)/[0-9A-Za-z_-]{11}.*?)
1910 \1''', webpage)]
1911
1912 # lazyYT YouTube embed
1913 entries.extend(list(map(
1914 unescapeHTML,
1915 re.findall(r'class="lazyYT" data-youtube-id="([^"]+)"', webpage))))
1916
1917 # Wordpress "YouTube Video Importer" plugin
1918 matches = re.findall(r'''(?x)<div[^>]+
1919 class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
1920 data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
1921 entries.extend(m[-1] for m in matches)
1922
1923 return entries
1924
1925 @staticmethod
1926 def _extract_url(webpage):
1927 urls = YoutubeIE._extract_urls(webpage)
1928 return urls[0] if urls else None
1929
1930 @classmethod
1931 def extract_id(cls, url):
1932 mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
1933 if mobj is None:
1934 raise ExtractorError('Invalid URL: %s' % url)
1935 return mobj.group('id')
1936
1937 def _extract_chapters_from_json(self, data, duration):
1938 chapter_list = traverse_obj(
1939 data, (
1940 'playerOverlays', 'playerOverlayRenderer', 'decoratedPlayerBarRenderer',
1941 'decoratedPlayerBarRenderer', 'playerBar', 'chapteredPlayerBarRenderer', 'chapters'
1942 ), expected_type=list)
1943
1944 return self._extract_chapters(
1945 chapter_list,
1946 chapter_time=lambda chapter: float_or_none(
1947 traverse_obj(chapter, ('chapterRenderer', 'timeRangeStartMillis')), scale=1000),
1948 chapter_title=lambda chapter: traverse_obj(
1949 chapter, ('chapterRenderer', 'title', 'simpleText'), expected_type=str),
1950 duration=duration)
1951
1952 def _extract_chapters_from_engagement_panel(self, data, duration):
1953 content_list = traverse_obj(
1954 data,
1955 ('engagementPanels', ..., 'engagementPanelSectionListRenderer', 'content', 'macroMarkersListRenderer', 'contents'),
1956 expected_type=list, default=[])
1957 chapter_time = lambda chapter: parse_duration(self._get_text(chapter, 'timeDescription'))
1958 chapter_title = lambda chapter: self._get_text(chapter, 'title')
1959
1960 return next((
1961 filter(None, (
1962 self._extract_chapters(
1963 traverse_obj(contents, (..., 'macroMarkersListItemRenderer')),
1964 chapter_time, chapter_title, duration)
1965 for contents in content_list
1966 ))), [])
1967
1968 def _extract_chapters(self, chapter_list, chapter_time, chapter_title, duration):
1969 chapters = []
1970 last_chapter = {'start_time': 0}
1971 for idx, chapter in enumerate(chapter_list or []):
1972 title = chapter_title(chapter)
1973 start_time = chapter_time(chapter)
1974 if start_time is None:
1975 continue
1976 last_chapter['end_time'] = start_time
1977 if start_time < last_chapter['start_time']:
1978 if idx == 1:
1979 chapters.pop()
1980 self.report_warning('Invalid start time for chapter "%s"' % last_chapter['title'])
1981 else:
1982 self.report_warning(f'Invalid start time for chapter "{title}"')
1983 continue
1984 last_chapter = {'start_time': start_time, 'title': title}
1985 chapters.append(last_chapter)
1986 last_chapter['end_time'] = duration
1987 return chapters
1988
1989 def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
1990 return self._parse_json(self._search_regex(
1991 (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
1992 regex), webpage, name, default='{}'), video_id, fatal=False)
1993
1994 @staticmethod
1995 def parse_time_text(time_text):
1996 """
1997 Parse the comment time text
1998 time_text is in the format 'X units ago (edited)'
1999 """
2000 time_text_split = time_text.split(' ')
2001 if len(time_text_split) >= 3:
2002 try:
2003 return datetime_from_str('now-%s%s' % (time_text_split[0], time_text_split[1]), precision='auto')
2004 except ValueError:
2005 return None
2006
2007 def _extract_comment(self, comment_renderer, parent=None):
2008 comment_id = comment_renderer.get('commentId')
2009 if not comment_id:
2010 return
2011
2012 text = self._get_text(comment_renderer, 'contentText')
2013
2014 # note: timestamp is an estimate calculated from the current time and time_text
2015 time_text = self._get_text(comment_renderer, 'publishedTimeText') or ''
2016 time_text_dt = self.parse_time_text(time_text)
2017 if isinstance(time_text_dt, datetime.datetime):
2018 timestamp = calendar.timegm(time_text_dt.timetuple())
2019 author = self._get_text(comment_renderer, 'authorText')
2020 author_id = try_get(comment_renderer,
2021 lambda x: x['authorEndpoint']['browseEndpoint']['browseId'], compat_str)
2022
2023 votes = parse_count(try_get(comment_renderer, (lambda x: x['voteCount']['simpleText'],
2024 lambda x: x['likeCount']), compat_str)) or 0
2025 author_thumbnail = try_get(comment_renderer,
2026 lambda x: x['authorThumbnail']['thumbnails'][-1]['url'], compat_str)
2027
2028 author_is_uploader = try_get(comment_renderer, lambda x: x['authorIsChannelOwner'], bool)
2029 is_favorited = 'creatorHeart' in (try_get(
2030 comment_renderer, lambda x: x['actionButtons']['commentActionButtonsRenderer'], dict) or {})
2031 return {
2032 'id': comment_id,
2033 'text': text,
2034 'timestamp': timestamp,
2035 'time_text': time_text,
2036 'like_count': votes,
2037 'is_favorited': is_favorited,
2038 'author': author,
2039 'author_id': author_id,
2040 'author_thumbnail': author_thumbnail,
2041 'author_is_uploader': author_is_uploader,
2042 'parent': parent or 'root'
2043 }
2044
2045 def _comment_entries(self, root_continuation_data, ytcfg, video_id, parent=None, comment_counts=None):
2046
2047 def extract_header(contents):
2048 _continuation = None
2049 for content in contents:
2050 comments_header_renderer = try_get(content, lambda x: x['commentsHeaderRenderer'])
2051 expected_comment_count = parse_count(self._get_text(
2052 comments_header_renderer, 'countText', 'commentsCount', max_runs=1))
2053
2054 if expected_comment_count:
2055 comment_counts[1] = expected_comment_count
2056 self.to_screen('Downloading ~%d comments' % expected_comment_count)
2057 sort_mode_str = self._configuration_arg('comment_sort', [''])[0]
2058 comment_sort_index = int(sort_mode_str != 'top') # 1 = new, 0 = top
2059
2060 sort_menu_item = try_get(
2061 comments_header_renderer,
2062 lambda x: x['sortMenu']['sortFilterSubMenuRenderer']['subMenuItems'][comment_sort_index], dict) or {}
2063 sort_continuation_ep = sort_menu_item.get('serviceEndpoint') or {}
2064
2065 _continuation = self._extract_continuation_ep_data(sort_continuation_ep) or self._extract_continuation(sort_menu_item)
2066 if not _continuation:
2067 continue
2068
2069 sort_text = sort_menu_item.get('title')
2070 if isinstance(sort_text, compat_str):
2071 sort_text = sort_text.lower()
2072 else:
2073 sort_text = 'top comments' if comment_sort_index == 0 else 'newest first'
2074 self.to_screen('Sorting comments by %s' % sort_text)
2075 break
2076 return _continuation
2077
2078 def extract_thread(contents):
2079 if not parent:
2080 comment_counts[2] = 0
2081 for content in contents:
2082 comment_thread_renderer = try_get(content, lambda x: x['commentThreadRenderer'])
2083 comment_renderer = try_get(
2084 comment_thread_renderer, (lambda x: x['comment']['commentRenderer'], dict)) or try_get(
2085 content, (lambda x: x['commentRenderer'], dict))
2086
2087 if not comment_renderer:
2088 continue
2089 comment = self._extract_comment(comment_renderer, parent)
2090 if not comment:
2091 continue
2092 comment_counts[0] += 1
2093 yield comment
2094 # Attempt to get the replies
2095 comment_replies_renderer = try_get(
2096 comment_thread_renderer, lambda x: x['replies']['commentRepliesRenderer'], dict)
2097
2098 if comment_replies_renderer:
2099 comment_counts[2] += 1
2100 comment_entries_iter = self._comment_entries(
2101 comment_replies_renderer, ytcfg, video_id,
2102 parent=comment.get('id'), comment_counts=comment_counts)
2103
2104 for reply_comment in comment_entries_iter:
2105 yield reply_comment
2106
2107 # YouTube comments have a max depth of 2
2108 max_depth = int_or_none(self._configuration_arg('max_comment_depth', [''])[0]) or float('inf')
2109 if max_depth == 1 and parent:
2110 return
2111 if not comment_counts:
2112 # comment so far, est. total comments, current comment thread #
2113 comment_counts = [0, 0, 0]
2114
2115 continuation = self._extract_continuation(root_continuation_data)
2116 if continuation and len(continuation['continuation']) < 27:
2117 self.write_debug('Detected old API continuation token. Generating new API compatible token.')
2118 continuation_token = self._generate_comment_continuation(video_id)
2119 continuation = self._build_api_continuation_query(continuation_token, None)
2120
2121 message = self._get_text(root_continuation_data, ('contents', ..., 'messageRenderer', 'text'), max_runs=1)
2122 if message and not parent:
2123 self.report_warning(message, video_id=video_id)
2124
2125 visitor_data = None
2126 is_first_continuation = parent is None
2127
2128 for page_num in itertools.count(0):
2129 if not continuation:
2130 break
2131 headers = self.generate_api_headers(ytcfg=ytcfg, visitor_data=visitor_data)
2132 comment_prog_str = '(%d/%d)' % (comment_counts[0], comment_counts[1])
2133 if page_num == 0:
2134 if is_first_continuation:
2135 note_prefix = 'Downloading comment section API JSON'
2136 else:
2137 note_prefix = ' Downloading comment API JSON reply thread %d %s' % (
2138 comment_counts[2], comment_prog_str)
2139 else:
2140 note_prefix = '%sDownloading comment%s API JSON page %d %s' % (
2141 ' ' if parent else '', ' replies' if parent else '',
2142 page_num, comment_prog_str)
2143
2144 response = self._extract_response(
2145 item_id=None, query=continuation,
2146 ep='next', ytcfg=ytcfg, headers=headers, note=note_prefix,
2147 check_get_keys=('onResponseReceivedEndpoints', 'continuationContents'))
2148 if not response:
2149 break
2150 visitor_data = try_get(
2151 response,
2152 lambda x: x['responseContext']['webResponseContextExtensionData']['ytConfigData']['visitorData'],
2153 compat_str) or visitor_data
2154
2155 continuation_contents = dict_get(response, ('onResponseReceivedEndpoints', 'continuationContents'))
2156
2157 continuation = None
2158 if isinstance(continuation_contents, list):
2159 for continuation_section in continuation_contents:
2160 if not isinstance(continuation_section, dict):
2161 continue
2162 continuation_items = try_get(
2163 continuation_section,
2164 (lambda x: x['reloadContinuationItemsCommand']['continuationItems'],
2165 lambda x: x['appendContinuationItemsAction']['continuationItems']),
2166 list) or []
2167 if is_first_continuation:
2168 continuation = extract_header(continuation_items)
2169 is_first_continuation = False
2170 if continuation:
2171 break
2172 continue
2173 count = 0
2174 for count, entry in enumerate(extract_thread(continuation_items)):
2175 yield entry
2176 continuation = self._extract_continuation({'contents': continuation_items})
2177 if continuation:
2178 # Sometimes YouTube provides a continuation without any comments
2179 # In most cases we end up just downloading these with very little comments to come.
2180 if count == 0:
2181 if not parent:
2182 self.report_warning('No comments received - assuming end of comments')
2183 continuation = None
2184 break
2185
2186 # Deprecated response structure
2187 elif isinstance(continuation_contents, dict):
2188 known_continuation_renderers = ('itemSectionContinuation', 'commentRepliesContinuation')
2189 for key, continuation_renderer in continuation_contents.items():
2190 if key not in known_continuation_renderers:
2191 continue
2192 if not isinstance(continuation_renderer, dict):
2193 continue
2194 if is_first_continuation:
2195 header_continuation_items = [continuation_renderer.get('header') or {}]
2196 continuation = extract_header(header_continuation_items)
2197 is_first_continuation = False
2198 if continuation:
2199 break
2200
2201 # Sometimes YouTube provides a continuation without any comments
2202 # In most cases we end up just downloading these with very little comments to come.
2203 count = 0
2204 for count, entry in enumerate(extract_thread(continuation_renderer.get('contents') or {})):
2205 yield entry
2206 continuation = self._extract_continuation(continuation_renderer)
2207 if count == 0:
2208 if not parent:
2209 self.report_warning('No comments received - assuming end of comments')
2210 continuation = None
2211 break
2212
2213 @staticmethod
2214 def _generate_comment_continuation(video_id):
2215 """
2216 Generates initial comment section continuation token from given video id
2217 """
2218 b64_vid_id = base64.b64encode(bytes(video_id.encode('utf-8')))
2219 parts = ('Eg0SCw==', b64_vid_id, 'GAYyJyIRIgs=', b64_vid_id, 'MAB4AjAAQhBjb21tZW50cy1zZWN0aW9u')
2220 new_continuation_intlist = list(itertools.chain.from_iterable(
2221 [bytes_to_intlist(base64.b64decode(part)) for part in parts]))
2222 return base64.b64encode(intlist_to_bytes(new_continuation_intlist)).decode('utf-8')
2223
2224 def _get_comments(self, ytcfg, video_id, contents, webpage):
2225 """Entry for comment extraction"""
2226 def _real_comment_extract(contents):
2227 renderer = next((
2228 item for item in traverse_obj(contents, (..., 'itemSectionRenderer'), default={})
2229 if item.get('sectionIdentifier') == 'comment-item-section'), None)
2230 yield from self._comment_entries(renderer, ytcfg, video_id)
2231
2232 max_comments = int_or_none(self._configuration_arg('max_comments', [''])[0])
2233 # Force English regardless of account setting to prevent parsing issues
2234 # See: https://github.com/yt-dlp/yt-dlp/issues/532
2235 ytcfg = copy.deepcopy(ytcfg)
2236 traverse_obj(
2237 ytcfg, ('INNERTUBE_CONTEXT', 'client'), expected_type=dict, default={})['hl'] = 'en'
2238 return itertools.islice(_real_comment_extract(contents), 0, max_comments)
2239
2240 @staticmethod
2241 def _get_checkok_params():
2242 return {'contentCheckOk': True, 'racyCheckOk': True}
2243
2244 @classmethod
2245 def _generate_player_context(cls, sts=None):
2246 context = {
2247 'html5Preference': 'HTML5_PREF_WANTS',
2248 }
2249 if sts is not None:
2250 context['signatureTimestamp'] = sts
2251 return {
2252 'playbackContext': {
2253 'contentPlaybackContext': context
2254 },
2255 **cls._get_checkok_params()
2256 }
2257
2258 @staticmethod
2259 def _is_agegated(player_response):
2260 if traverse_obj(player_response, ('playabilityStatus', 'desktopLegacyAgeGateReason')):
2261 return True
2262
2263 reasons = traverse_obj(player_response, ('playabilityStatus', ('status', 'reason')), default=[])
2264 AGE_GATE_REASONS = (
2265 'confirm your age', 'age-restricted', 'inappropriate', # reason
2266 'age_verification_required', 'age_check_required', # status
2267 )
2268 return any(expected in reason for expected in AGE_GATE_REASONS for reason in reasons)
2269
2270 @staticmethod
2271 def _is_unplayable(player_response):
2272 return traverse_obj(player_response, ('playabilityStatus', 'status')) == 'UNPLAYABLE'
2273
2274 def _extract_player_response(self, client, video_id, master_ytcfg, player_ytcfg, player_url, initial_pr):
2275
2276 session_index = self._extract_session_index(player_ytcfg, master_ytcfg)
2277 syncid = self._extract_account_syncid(player_ytcfg, master_ytcfg, initial_pr)
2278 sts = self._extract_signature_timestamp(video_id, player_url, master_ytcfg, fatal=False) if player_url else None
2279 headers = self.generate_api_headers(
2280 ytcfg=player_ytcfg, account_syncid=syncid, session_index=session_index, default_client=client)
2281
2282 yt_query = {'videoId': video_id}
2283 yt_query.update(self._generate_player_context(sts))
2284 return self._extract_response(
2285 item_id=video_id, ep='player', query=yt_query,
2286 ytcfg=player_ytcfg, headers=headers, fatal=True,
2287 default_client=client,
2288 note='Downloading %s player API JSON' % client.replace('_', ' ').strip()
2289 ) or None
2290
2291 def _get_requested_clients(self, url, smuggled_data):
2292 requested_clients = []
2293 allowed_clients = sorted(
2294 [client for client in INNERTUBE_CLIENTS.keys() if client[:1] != '_'],
2295 key=lambda client: INNERTUBE_CLIENTS[client]['priority'], reverse=True)
2296 for client in self._configuration_arg('player_client'):
2297 if client in allowed_clients:
2298 requested_clients.append(client)
2299 elif client == 'all':
2300 requested_clients.extend(allowed_clients)
2301 else:
2302 self.report_warning(f'Skipping unsupported client {client}')
2303 if not requested_clients:
2304 requested_clients = ['android', 'web']
2305
2306 if smuggled_data.get('is_music_url') or self.is_music_url(url):
2307 requested_clients.extend(
2308 f'{client}_music' for client in requested_clients if f'{client}_music' in INNERTUBE_CLIENTS)
2309
2310 return orderedSet(requested_clients)
2311
2312 def _extract_player_ytcfg(self, client, video_id):
2313 url = {
2314 'web_music': 'https://music.youtube.com',
2315 'web_embedded': f'https://www.youtube.com/embed/{video_id}?html5=1'
2316 }.get(client)
2317 if not url:
2318 return {}
2319 webpage = self._download_webpage(url, video_id, fatal=False, note=f'Downloading {client} config')
2320 return self.extract_ytcfg(video_id, webpage) or {}
2321
2322 def _extract_player_responses(self, clients, video_id, webpage, master_ytcfg):
2323 initial_pr = None
2324 if webpage:
2325 initial_pr = self._extract_yt_initial_variable(
2326 webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE,
2327 video_id, 'initial player response')
2328
2329 original_clients = clients
2330 clients = clients[::-1]
2331 prs = []
2332
2333 def append_client(client_name):
2334 if client_name in INNERTUBE_CLIENTS and client_name not in original_clients:
2335 clients.append(client_name)
2336
2337 # Android player_response does not have microFormats which are needed for
2338 # extraction of some data. So we return the initial_pr with formats
2339 # stripped out even if not requested by the user
2340 # See: https://github.com/yt-dlp/yt-dlp/issues/501
2341 if initial_pr:
2342 pr = dict(initial_pr)
2343 pr['streamingData'] = None
2344 prs.append(pr)
2345
2346 last_error = None
2347 tried_iframe_fallback = False
2348 player_url = None
2349 while clients:
2350 client = clients.pop()
2351 player_ytcfg = master_ytcfg if client == 'web' else {}
2352 if 'configs' not in self._configuration_arg('player_skip'):
2353 player_ytcfg = self._extract_player_ytcfg(client, video_id) or player_ytcfg
2354
2355 player_url = player_url or self._extract_player_url(master_ytcfg, player_ytcfg, webpage=webpage)
2356 require_js_player = self._get_default_ytcfg(client).get('REQUIRE_JS_PLAYER')
2357 if 'js' in self._configuration_arg('player_skip'):
2358 require_js_player = False
2359 player_url = None
2360
2361 if not player_url and not tried_iframe_fallback and require_js_player:
2362 player_url = self._download_player_url(video_id)
2363 tried_iframe_fallback = True
2364
2365 try:
2366 pr = initial_pr if client == 'web' and initial_pr else self._extract_player_response(
2367 client, video_id, player_ytcfg or master_ytcfg, player_ytcfg, player_url if require_js_player else None, initial_pr)
2368 except ExtractorError as e:
2369 if last_error:
2370 self.report_warning(last_error)
2371 last_error = e
2372 continue
2373
2374 if pr:
2375 prs.append(pr)
2376
2377 # creator clients can bypass AGE_VERIFICATION_REQUIRED if logged in
2378 if client.endswith('_agegate') and self._is_unplayable(pr) and self.is_authenticated:
2379 append_client(client.replace('_agegate', '_creator'))
2380 elif self._is_agegated(pr):
2381 append_client(f'{client}_agegate')
2382
2383 if last_error:
2384 if not len(prs):
2385 raise last_error
2386 self.report_warning(last_error)
2387 return prs, player_url
2388
2389 def _extract_formats(self, streaming_data, video_id, player_url, is_live):
2390 itags, stream_ids = [], []
2391 itag_qualities, res_qualities = {}, {}
2392 q = qualities([
2393 # Normally tiny is the smallest video-only formats. But
2394 # audio-only formats with unknown quality may get tagged as tiny
2395 'tiny',
2396 'audio_quality_ultralow', 'audio_quality_low', 'audio_quality_medium', 'audio_quality_high', # Audio only formats
2397 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'
2398 ])
2399 streaming_formats = traverse_obj(streaming_data, (..., ('formats', 'adaptiveFormats'), ...), default=[])
2400
2401 for fmt in streaming_formats:
2402 if fmt.get('targetDurationSec') or fmt.get('drmFamilies'):
2403 continue
2404
2405 itag = str_or_none(fmt.get('itag'))
2406 audio_track = fmt.get('audioTrack') or {}
2407 stream_id = '%s.%s' % (itag or '', audio_track.get('id', ''))
2408 if stream_id in stream_ids:
2409 continue
2410
2411 quality = fmt.get('quality')
2412 height = int_or_none(fmt.get('height'))
2413 if quality == 'tiny' or not quality:
2414 quality = fmt.get('audioQuality', '').lower() or quality
2415 # The 3gp format (17) in android client has a quality of "small",
2416 # but is actually worse than other formats
2417 if itag == '17':
2418 quality = 'tiny'
2419 if quality:
2420 if itag:
2421 itag_qualities[itag] = quality
2422 if height:
2423 res_qualities[height] = quality
2424 # FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
2425 # (adding `&sq=0` to the URL) and parsing emsg box to determine the
2426 # number of fragment that would subsequently requested with (`&sq=N`)
2427 if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
2428 continue
2429
2430 fmt_url = fmt.get('url')
2431 if not fmt_url:
2432 sc = compat_parse_qs(fmt.get('signatureCipher'))
2433 fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0]))
2434 encrypted_sig = try_get(sc, lambda x: x['s'][0])
2435 if not (sc and fmt_url and encrypted_sig):
2436 continue
2437 if not player_url:
2438 continue
2439 signature = self._decrypt_signature(sc['s'][0], video_id, player_url)
2440 sp = try_get(sc, lambda x: x['sp'][0]) or 'signature'
2441 fmt_url += '&' + sp + '=' + signature
2442
2443 if itag:
2444 itags.append(itag)
2445 stream_ids.append(stream_id)
2446
2447 tbr = float_or_none(
2448 fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
2449 dct = {
2450 'asr': int_or_none(fmt.get('audioSampleRate')),
2451 'filesize': int_or_none(fmt.get('contentLength')),
2452 'format_id': itag,
2453 'format_note': ', '.join(filter(None, (
2454 '%s%s' % (audio_track.get('displayName') or '',
2455 ' (default)' if audio_track.get('audioIsDefault') else ''),
2456 fmt.get('qualityLabel') or quality.replace('audio_quality_', '')))),
2457 'fps': int_or_none(fmt.get('fps')),
2458 'height': height,
2459 'quality': q(quality),
2460 'tbr': tbr,
2461 'url': fmt_url,
2462 'width': int_or_none(fmt.get('width')),
2463 'language': audio_track.get('id', '').split('.')[0],
2464 'language_preference': 1 if audio_track.get('audioIsDefault') else -1,
2465 }
2466 mime_mobj = re.match(
2467 r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
2468 if mime_mobj:
2469 dct['ext'] = mimetype2ext(mime_mobj.group(1))
2470 dct.update(parse_codecs(mime_mobj.group(2)))
2471 no_audio = dct.get('acodec') == 'none'
2472 no_video = dct.get('vcodec') == 'none'
2473 if no_audio:
2474 dct['vbr'] = tbr
2475 if no_video:
2476 dct['abr'] = tbr
2477 if no_audio or no_video:
2478 dct['downloader_options'] = {
2479 # Youtube throttles chunks >~10M
2480 'http_chunk_size': 10485760,
2481 }
2482 if dct.get('ext'):
2483 dct['container'] = dct['ext'] + '_dash'
2484 yield dct
2485
2486 skip_manifests = self._configuration_arg('skip')
2487 get_dash = (
2488 (not is_live or self._configuration_arg('include_live_dash'))
2489 and 'dash' not in skip_manifests and self.get_param('youtube_include_dash_manifest', True))
2490 get_hls = 'hls' not in skip_manifests and self.get_param('youtube_include_hls_manifest', True)
2491
2492 def guess_quality(f):
2493 for val, qdict in ((f.get('format_id'), itag_qualities), (f.get('height'), res_qualities)):
2494 if val in qdict:
2495 return q(qdict[val])
2496 return -1
2497
2498 for sd in streaming_data:
2499 hls_manifest_url = get_hls and sd.get('hlsManifestUrl')
2500 if hls_manifest_url:
2501 for f in self._extract_m3u8_formats(hls_manifest_url, video_id, 'mp4', fatal=False):
2502 itag = self._search_regex(
2503 r'/itag/(\d+)', f['url'], 'itag', default=None)
2504 if itag in itags:
2505 itag += '-hls'
2506 if itag in itags:
2507 continue
2508 if itag:
2509 f['format_id'] = itag
2510 itags.append(itag)
2511 f['quality'] = guess_quality(f)
2512 yield f
2513
2514 dash_manifest_url = get_dash and sd.get('dashManifestUrl')
2515 if dash_manifest_url:
2516 for f in self._extract_mpd_formats(dash_manifest_url, video_id, fatal=False):
2517 itag = f['format_id']
2518 if itag in itags:
2519 itag += '-dash'
2520 if itag in itags:
2521 continue
2522 if itag:
2523 f['format_id'] = itag
2524 itags.append(itag)
2525 f['quality'] = guess_quality(f)
2526 filesize = int_or_none(self._search_regex(
2527 r'/clen/(\d+)', f.get('fragment_base_url')
2528 or f['url'], 'file size', default=None))
2529 if filesize:
2530 f['filesize'] = filesize
2531 yield f
2532
2533 def _real_extract(self, url):
2534 url, smuggled_data = unsmuggle_url(url, {})
2535 video_id = self._match_id(url)
2536
2537 base_url = self.http_scheme() + '//www.youtube.com/'
2538 webpage_url = base_url + 'watch?v=' + video_id
2539 webpage = None
2540 if 'webpage' not in self._configuration_arg('player_skip'):
2541 webpage = self._download_webpage(
2542 webpage_url + '&bpctr=9999999999&has_verified=1', video_id, fatal=False)
2543
2544 master_ytcfg = self.extract_ytcfg(video_id, webpage) or self._get_default_ytcfg()
2545
2546 player_responses, player_url = self._extract_player_responses(
2547 self._get_requested_clients(url, smuggled_data),
2548 video_id, webpage, master_ytcfg)
2549
2550 get_first = lambda obj, keys, **kwargs: traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False)
2551
2552 playability_statuses = traverse_obj(
2553 player_responses, (..., 'playabilityStatus'), expected_type=dict, default=[])
2554
2555 trailer_video_id = get_first(
2556 playability_statuses,
2557 ('errorScreen', 'playerLegacyDesktopYpcTrailerRenderer', 'trailerVideoId'),
2558 expected_type=str)
2559 if trailer_video_id:
2560 return self.url_result(
2561 trailer_video_id, self.ie_key(), trailer_video_id)
2562
2563 search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None))
2564 if webpage else (lambda x: None))
2565
2566 video_details = traverse_obj(
2567 player_responses, (..., 'videoDetails'), expected_type=dict, default=[])
2568 microformats = traverse_obj(
2569 player_responses, (..., 'microformat', 'playerMicroformatRenderer'),
2570 expected_type=dict, default=[])
2571 video_title = (
2572 get_first(video_details, 'title')
2573 or self._get_text(microformats, (..., 'title'))
2574 or search_meta(['og:title', 'twitter:title', 'title']))
2575 video_description = get_first(video_details, 'shortDescription')
2576
2577 if not smuggled_data.get('force_singlefeed', False):
2578 if not self.get_param('noplaylist'):
2579 multifeed_metadata_list = get_first(
2580 player_responses,
2581 ('multicamera', 'playerLegacyMulticameraRenderer', 'metadataList'),
2582 expected_type=str)
2583 if multifeed_metadata_list:
2584 entries = []
2585 feed_ids = []
2586 for feed in multifeed_metadata_list.split(','):
2587 # Unquote should take place before split on comma (,) since textual
2588 # fields may contain comma as well (see
2589 # https://github.com/ytdl-org/youtube-dl/issues/8536)
2590 feed_data = compat_parse_qs(
2591 compat_urllib_parse_unquote_plus(feed))
2592
2593 def feed_entry(name):
2594 return try_get(
2595 feed_data, lambda x: x[name][0], compat_str)
2596
2597 feed_id = feed_entry('id')
2598 if not feed_id:
2599 continue
2600 feed_title = feed_entry('title')
2601 title = video_title
2602 if feed_title:
2603 title += ' (%s)' % feed_title
2604 entries.append({
2605 '_type': 'url_transparent',
2606 'ie_key': 'Youtube',
2607 'url': smuggle_url(
2608 '%swatch?v=%s' % (base_url, feed_data['id'][0]),
2609 {'force_singlefeed': True}),
2610 'title': title,
2611 })
2612 feed_ids.append(feed_id)
2613 self.to_screen(
2614 'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
2615 % (', '.join(feed_ids), video_id))
2616 return self.playlist_result(
2617 entries, video_id, video_title, video_description)
2618 else:
2619 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
2620
2621 live_broadcast_details = traverse_obj(microformats, (..., 'liveBroadcastDetails'))
2622 is_live = get_first(video_details, 'isLive')
2623 if is_live is None:
2624 is_live = get_first(live_broadcast_details, 'isLiveNow')
2625
2626 streaming_data = traverse_obj(player_responses, (..., 'streamingData'), default=[])
2627 formats = list(self._extract_formats(streaming_data, video_id, player_url, is_live))
2628
2629 if not formats:
2630 if not self.get_param('allow_unplayable_formats') and traverse_obj(streaming_data, (..., 'licenseInfos')):
2631 self.report_drm(video_id)
2632 pemr = get_first(
2633 playability_statuses,
2634 ('errorScreen', 'playerErrorMessageRenderer'), expected_type=dict) or {}
2635 reason = self._get_text(pemr, 'reason') or get_first(playability_statuses, 'reason')
2636 subreason = clean_html(self._get_text(pemr, 'subreason') or '')
2637 if subreason:
2638 if subreason == 'The uploader has not made this video available in your country.':
2639 countries = get_first(microformats, 'availableCountries')
2640 if not countries:
2641 regions_allowed = search_meta('regionsAllowed')
2642 countries = regions_allowed.split(',') if regions_allowed else None
2643 self.raise_geo_restricted(subreason, countries, metadata_available=True)
2644 reason += f'. {subreason}'
2645 if reason:
2646 self.raise_no_formats(reason, expected=True)
2647
2648 for f in formats:
2649 if '&c=WEB&' in f['url'] and '&ratebypass=yes&' not in f['url']: # throttled
2650 f['source_preference'] = -10
2651 # TODO: this method is not reliable
2652 f['format_note'] = format_field(f, 'format_note', '%s ') + '(maybe throttled)'
2653
2654 # Source is given priority since formats that throttle are given lower source_preference
2655 # When throttling issue is fully fixed, remove this
2656 self._sort_formats(formats, ('quality', 'res', 'fps', 'hdr:12', 'source', 'codec:vp9.2', 'lang'))
2657
2658 keywords = get_first(video_details, 'keywords', expected_type=list) or []
2659 if not keywords and webpage:
2660 keywords = [
2661 unescapeHTML(m.group('content'))
2662 for m in re.finditer(self._meta_regex('og:video:tag'), webpage)]
2663 for keyword in keywords:
2664 if keyword.startswith('yt:stretch='):
2665 mobj = re.search(r'(\d+)\s*:\s*(\d+)', keyword)
2666 if mobj:
2667 # NB: float is intentional for forcing float division
2668 w, h = (float(v) for v in mobj.groups())
2669 if w > 0 and h > 0:
2670 ratio = w / h
2671 for f in formats:
2672 if f.get('vcodec') != 'none':
2673 f['stretched_ratio'] = ratio
2674 break
2675
2676 thumbnails = []
2677 thumbnail_dicts = traverse_obj(
2678 (video_details, microformats), (..., ..., 'thumbnail', 'thumbnails', ...),
2679 expected_type=dict, default=[])
2680 for thumbnail in thumbnail_dicts:
2681 thumbnail_url = thumbnail.get('url')
2682 if not thumbnail_url:
2683 continue
2684 # Sometimes youtube gives a wrong thumbnail URL. See:
2685 # https://github.com/yt-dlp/yt-dlp/issues/233
2686 # https://github.com/ytdl-org/youtube-dl/issues/28023
2687 if 'maxresdefault' in thumbnail_url:
2688 thumbnail_url = thumbnail_url.split('?')[0]
2689 thumbnails.append({
2690 'url': thumbnail_url,
2691 'height': int_or_none(thumbnail.get('height')),
2692 'width': int_or_none(thumbnail.get('width')),
2693 })
2694 thumbnail_url = search_meta(['og:image', 'twitter:image'])
2695 if thumbnail_url:
2696 thumbnails.append({
2697 'url': thumbnail_url,
2698 })
2699 original_thumbnails = thumbnails.copy()
2700
2701 # The best resolution thumbnails sometimes does not appear in the webpage
2702 # See: https://github.com/ytdl-org/youtube-dl/issues/29049, https://github.com/yt-dlp/yt-dlp/issues/340
2703 # List of possible thumbnails - Ref: <https://stackoverflow.com/a/20542029>
2704 thumbnail_names = [
2705 'maxresdefault', 'hq720', 'sddefault', 'sd1', 'sd2', 'sd3',
2706 'hqdefault', 'hq1', 'hq2', 'hq3', '0',
2707 'mqdefault', 'mq1', 'mq2', 'mq3',
2708 'default', '1', '2', '3'
2709 ]
2710 n_thumbnail_names = len(thumbnail_names)
2711 thumbnails.extend({
2712 'url': 'https://i.ytimg.com/vi{webp}/{video_id}/{name}{live}.{ext}'.format(
2713 video_id=video_id, name=name, ext=ext,
2714 webp='_webp' if ext == 'webp' else '', live='_live' if is_live else ''),
2715 } for name in thumbnail_names for ext in ('webp', 'jpg'))
2716 for thumb in thumbnails:
2717 i = next((i for i, t in enumerate(thumbnail_names) if f'/{video_id}/{t}' in thumb['url']), n_thumbnail_names)
2718 thumb['preference'] = (0 if '.webp' in thumb['url'] else -1) - (2 * i)
2719 self._remove_duplicate_formats(thumbnails)
2720 self._downloader._sort_thumbnails(original_thumbnails)
2721
2722 category = get_first(microformats, 'category') or search_meta('genre')
2723 channel_id = str_or_none(
2724 get_first(video_details, 'channelId')
2725 or get_first(microformats, 'externalChannelId')
2726 or search_meta('channelId'))
2727 duration = int_or_none(
2728 get_first(video_details, 'lengthSeconds')
2729 or get_first(microformats, 'lengthSeconds')
2730 or parse_duration(search_meta('duration'))) or None
2731 owner_profile_url = get_first(microformats, 'ownerProfileUrl')
2732
2733 live_content = get_first(video_details, 'isLiveContent')
2734 is_upcoming = get_first(video_details, 'isUpcoming')
2735 if is_live is None:
2736 if is_upcoming or live_content is False:
2737 is_live = False
2738 if is_upcoming is None and (live_content or is_live):
2739 is_upcoming = False
2740 live_starttime = parse_iso8601(get_first(live_broadcast_details, 'startTimestamp'))
2741 live_endtime = parse_iso8601(get_first(live_broadcast_details, 'endTimestamp'))
2742 if not duration and live_endtime and live_starttime:
2743 duration = live_endtime - live_starttime
2744
2745 info = {
2746 'id': video_id,
2747 'title': self._live_title(video_title) if is_live else video_title,
2748 'formats': formats,
2749 'thumbnails': thumbnails,
2750 # The best thumbnail that we are sure exists. Prevents unnecessary
2751 # URL checking if user don't care about getting the best possible thumbnail
2752 'thumbnail': traverse_obj(original_thumbnails, (-1, 'url')),
2753 'description': video_description,
2754 'upload_date': unified_strdate(
2755 get_first(microformats, 'uploadDate')
2756 or search_meta('uploadDate')),
2757 'uploader': get_first(video_details, 'author'),
2758 'uploader_id': self._search_regex(r'/(?:channel|user)/([^/?&#]+)', owner_profile_url, 'uploader id') if owner_profile_url else None,
2759 'uploader_url': owner_profile_url,
2760 'channel_id': channel_id,
2761 'channel_url': f'https://www.youtube.com/channel/{channel_id}' if channel_id else None,
2762 'duration': duration,
2763 'view_count': int_or_none(
2764 get_first((video_details, microformats), (..., 'viewCount'))
2765 or search_meta('interactionCount')),
2766 'average_rating': float_or_none(get_first(video_details, 'averageRating')),
2767 'age_limit': 18 if (
2768 get_first(microformats, 'isFamilySafe') is False
2769 or search_meta('isFamilyFriendly') == 'false'
2770 or search_meta('og:restrictions:age') == '18+') else 0,
2771 'webpage_url': webpage_url,
2772 'categories': [category] if category else None,
2773 'tags': keywords,
2774 'playable_in_embed': get_first(playability_statuses, 'playableInEmbed'),
2775 'is_live': is_live,
2776 'was_live': (False if is_live or is_upcoming or live_content is False
2777 else None if is_live is None or is_upcoming is None
2778 else live_content),
2779 'live_status': 'is_upcoming' if is_upcoming else None, # rest will be set by YoutubeDL
2780 'release_timestamp': live_starttime,
2781 }
2782
2783 pctr = traverse_obj(player_responses, (..., 'captions', 'playerCaptionsTracklistRenderer'), expected_type=dict)
2784 if pctr:
2785 def get_lang_code(track):
2786 return (remove_start(track.get('vssId') or '', '.').replace('.', '-')
2787 or track.get('languageCode'))
2788
2789 # Converted into dicts to remove duplicates
2790 captions = {
2791 get_lang_code(sub): sub
2792 for sub in traverse_obj(pctr, (..., 'captionTracks', ...), default=[])}
2793 translation_languages = {
2794 lang.get('languageCode'): self._get_text(lang.get('languageName'), max_runs=1)
2795 for lang in traverse_obj(pctr, (..., 'translationLanguages', ...), default=[])}
2796
2797 def process_language(container, base_url, lang_code, sub_name, query):
2798 lang_subs = container.setdefault(lang_code, [])
2799 for fmt in self._SUBTITLE_FORMATS:
2800 query.update({
2801 'fmt': fmt,
2802 })
2803 lang_subs.append({
2804 'ext': fmt,
2805 'url': update_url_query(base_url, query),
2806 'name': sub_name,
2807 })
2808
2809 subtitles, automatic_captions = {}, {}
2810 for lang_code, caption_track in captions.items():
2811 base_url = caption_track.get('baseUrl')
2812 if not base_url:
2813 continue
2814 lang_name = self._get_text(caption_track, 'name', max_runs=1)
2815 if caption_track.get('kind') != 'asr':
2816 if not lang_code:
2817 continue
2818 process_language(
2819 subtitles, base_url, lang_code, lang_name, {})
2820 if not caption_track.get('isTranslatable'):
2821 continue
2822 for trans_code, trans_name in translation_languages.items():
2823 if not trans_code:
2824 continue
2825 if caption_track.get('kind') != 'asr':
2826 trans_code += f'-{lang_code}'
2827 trans_name += format_field(lang_name, template=' from %s')
2828 process_language(
2829 automatic_captions, base_url, trans_code, trans_name, {'tlang': trans_code})
2830 info['automatic_captions'] = automatic_captions
2831 info['subtitles'] = subtitles
2832
2833 parsed_url = compat_urllib_parse_urlparse(url)
2834 for component in [parsed_url.fragment, parsed_url.query]:
2835 query = compat_parse_qs(component)
2836 for k, v in query.items():
2837 for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
2838 d_k += '_time'
2839 if d_k not in info and k in s_ks:
2840 info[d_k] = parse_duration(query[k][0])
2841
2842 # Youtube Music Auto-generated description
2843 if video_description:
2844 mobj = re.search(r'(?s)(?P<track>[^·\n]+)·(?P<artist>[^\n]+)\n+(?P<album>[^\n]+)(?:.+?℗\s*(?P<release_year>\d{4})(?!\d))?(?:.+?Released on\s*:\s*(?P<release_date>\d{4}-\d{2}-\d{2}))?(.+?\nArtist\s*:\s*(?P<clean_artist>[^\n]+))?.+\nAuto-generated by YouTube\.\s*$', video_description)
2845 if mobj:
2846 release_year = mobj.group('release_year')
2847 release_date = mobj.group('release_date')
2848 if release_date:
2849 release_date = release_date.replace('-', '')
2850 if not release_year:
2851 release_year = release_date[:4]
2852 info.update({
2853 'album': mobj.group('album'.strip()),
2854 'artist': mobj.group('clean_artist') or ', '.join(a.strip() for a in mobj.group('artist').split('·')),
2855 'track': mobj.group('track').strip(),
2856 'release_date': release_date,
2857 'release_year': int_or_none(release_year),
2858 })
2859
2860 initial_data = None
2861 if webpage:
2862 initial_data = self._extract_yt_initial_variable(
2863 webpage, self._YT_INITIAL_DATA_RE, video_id,
2864 'yt initial data')
2865 if not initial_data:
2866 query = {'videoId': video_id}
2867 query.update(self._get_checkok_params())
2868 initial_data = self._extract_response(
2869 item_id=video_id, ep='next', fatal=False,
2870 ytcfg=master_ytcfg, query=query,
2871 headers=self.generate_api_headers(ytcfg=master_ytcfg),
2872 note='Downloading initial data API JSON')
2873
2874 try:
2875 # This will error if there is no livechat
2876 initial_data['contents']['twoColumnWatchNextResults']['conversationBar']['liveChatRenderer']['continuations'][0]['reloadContinuationData']['continuation']
2877 info.setdefault('subtitles', {})['live_chat'] = [{
2878 'url': 'https://www.youtube.com/watch?v=%s' % video_id, # url is needed to set cookies
2879 'video_id': video_id,
2880 'ext': 'json',
2881 'protocol': 'youtube_live_chat' if is_live or is_upcoming else 'youtube_live_chat_replay',
2882 }]
2883 except (KeyError, IndexError, TypeError):
2884 pass
2885
2886 if initial_data:
2887 info['chapters'] = (
2888 self._extract_chapters_from_json(initial_data, duration)
2889 or self._extract_chapters_from_engagement_panel(initial_data, duration)
2890 or None)
2891
2892 contents = try_get(
2893 initial_data,
2894 lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'],
2895 list) or []
2896 for content in contents:
2897 vpir = content.get('videoPrimaryInfoRenderer')
2898 if vpir:
2899 stl = vpir.get('superTitleLink')
2900 if stl:
2901 stl = self._get_text(stl)
2902 if try_get(
2903 vpir,
2904 lambda x: x['superTitleIcon']['iconType']) == 'LOCATION_PIN':
2905 info['location'] = stl
2906 else:
2907 mobj = re.search(r'(.+?)\s*S(\d+)\s*•\s*E(\d+)', stl)
2908 if mobj:
2909 info.update({
2910 'series': mobj.group(1),
2911 'season_number': int(mobj.group(2)),
2912 'episode_number': int(mobj.group(3)),
2913 })
2914 for tlb in (try_get(
2915 vpir,
2916 lambda x: x['videoActions']['menuRenderer']['topLevelButtons'],
2917 list) or []):
2918 tbr = tlb.get('toggleButtonRenderer') or {}
2919 for getter, regex in [(
2920 lambda x: x['defaultText']['accessibility']['accessibilityData'],
2921 r'(?P<count>[\d,]+)\s*(?P<type>(?:dis)?like)'), ([
2922 lambda x: x['accessibility'],
2923 lambda x: x['accessibilityData']['accessibilityData'],
2924 ], r'(?P<type>(?:dis)?like) this video along with (?P<count>[\d,]+) other people')]:
2925 label = (try_get(tbr, getter, dict) or {}).get('label')
2926 if label:
2927 mobj = re.match(regex, label)
2928 if mobj:
2929 info[mobj.group('type') + '_count'] = str_to_int(mobj.group('count'))
2930 break
2931 sbr_tooltip = try_get(
2932 vpir, lambda x: x['sentimentBar']['sentimentBarRenderer']['tooltip'])
2933 if sbr_tooltip:
2934 like_count, dislike_count = sbr_tooltip.split(' / ')
2935 info.update({
2936 'like_count': str_to_int(like_count),
2937 'dislike_count': str_to_int(dislike_count),
2938 })
2939 vsir = content.get('videoSecondaryInfoRenderer')
2940 if vsir:
2941 info['channel'] = self._get_text(vsir, ('owner', 'videoOwnerRenderer', 'title'))
2942 rows = try_get(
2943 vsir,
2944 lambda x: x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
2945 list) or []
2946 multiple_songs = False
2947 for row in rows:
2948 if try_get(row, lambda x: x['metadataRowRenderer']['hasDividerLine']) is True:
2949 multiple_songs = True
2950 break
2951 for row in rows:
2952 mrr = row.get('metadataRowRenderer') or {}
2953 mrr_title = mrr.get('title')
2954 if not mrr_title:
2955 continue
2956 mrr_title = self._get_text(mrr, 'title')
2957 mrr_contents_text = self._get_text(mrr, ('contents', 0))
2958 if mrr_title == 'License':
2959 info['license'] = mrr_contents_text
2960 elif not multiple_songs:
2961 if mrr_title == 'Album':
2962 info['album'] = mrr_contents_text
2963 elif mrr_title == 'Artist':
2964 info['artist'] = mrr_contents_text
2965 elif mrr_title == 'Song':
2966 info['track'] = mrr_contents_text
2967
2968 fallbacks = {
2969 'channel': 'uploader',
2970 'channel_id': 'uploader_id',
2971 'channel_url': 'uploader_url',
2972 }
2973 for to, frm in fallbacks.items():
2974 if not info.get(to):
2975 info[to] = info.get(frm)
2976
2977 for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
2978 v = info.get(s_k)
2979 if v:
2980 info[d_k] = v
2981
2982 is_private = get_first(video_details, 'isPrivate', expected_type=bool)
2983 is_unlisted = get_first(microformats, 'isUnlisted', expected_type=bool)
2984 is_membersonly = None
2985 is_premium = None
2986 if initial_data and is_private is not None:
2987 is_membersonly = False
2988 is_premium = False
2989 contents = try_get(initial_data, lambda x: x['contents']['twoColumnWatchNextResults']['results']['results']['contents'], list) or []
2990 badge_labels = set()
2991 for content in contents:
2992 if not isinstance(content, dict):
2993 continue
2994 badge_labels.update(self._extract_badges(content.get('videoPrimaryInfoRenderer')))
2995 for badge_label in badge_labels:
2996 if badge_label.lower() == 'members only':
2997 is_membersonly = True
2998 elif badge_label.lower() == 'premium':
2999 is_premium = True
3000 elif badge_label.lower() == 'unlisted':
3001 is_unlisted = True
3002
3003 info['availability'] = self._availability(
3004 is_private=is_private,
3005 needs_premium=is_premium,
3006 needs_subscription=is_membersonly,
3007 needs_auth=info['age_limit'] >= 18,
3008 is_unlisted=None if is_private is None else is_unlisted)
3009
3010 info['__post_extractor'] = self.extract_comments(master_ytcfg, video_id, contents, webpage)
3011
3012 self.mark_watched(video_id, player_responses)
3013
3014 return info
3015
3016
3017 class YoutubeTabIE(YoutubeBaseInfoExtractor):
3018 IE_DESC = 'YouTube Tabs'
3019 _VALID_URL = r'''(?x)
3020 https?://
3021 (?:\w+\.)?
3022 (?:
3023 youtube(?:kids)?\.com|
3024 invidio\.us
3025 )/
3026 (?:
3027 (?P<channel_type>channel|c|user|browse)/|
3028 (?P<not_channel>
3029 feed/|hashtag/|
3030 (?:playlist|watch)\?.*?\blist=
3031 )|
3032 (?!(?:%s)\b) # Direct URLs
3033 )
3034 (?P<id>[^/?\#&]+)
3035 ''' % YoutubeBaseInfoExtractor._RESERVED_NAMES
3036 IE_NAME = 'youtube:tab'
3037
3038 _TESTS = [{
3039 'note': 'playlists, multipage',
3040 'url': 'https://www.youtube.com/c/ИгорьКлейнер/playlists?view=1&flow=grid',
3041 'playlist_mincount': 94,
3042 'info_dict': {
3043 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
3044 'title': 'Игорь Клейнер - Playlists',
3045 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
3046 'uploader': 'Игорь Клейнер',
3047 'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
3048 },
3049 }, {
3050 'note': 'playlists, multipage, different order',
3051 'url': 'https://www.youtube.com/user/igorkle1/playlists?view=1&sort=dd',
3052 'playlist_mincount': 94,
3053 'info_dict': {
3054 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
3055 'title': 'Игорь Клейнер - Playlists',
3056 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
3057 'uploader_id': 'UCqj7Cz7revf5maW9g5pgNcg',
3058 'uploader': 'Игорь Клейнер',
3059 },
3060 }, {
3061 'note': 'playlists, series',
3062 'url': 'https://www.youtube.com/c/3blue1brown/playlists?view=50&sort=dd&shelf_id=3',
3063 'playlist_mincount': 5,
3064 'info_dict': {
3065 'id': 'UCYO_jab_esuFRV4b17AJtAw',
3066 'title': '3Blue1Brown - Playlists',
3067 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
3068 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
3069 'uploader': '3Blue1Brown',
3070 },
3071 }, {
3072 'note': 'playlists, singlepage',
3073 'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
3074 'playlist_mincount': 4,
3075 'info_dict': {
3076 'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
3077 'title': 'ThirstForScience - Playlists',
3078 'description': 'md5:609399d937ea957b0f53cbffb747a14c',
3079 'uploader': 'ThirstForScience',
3080 'uploader_id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
3081 }
3082 }, {
3083 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
3084 'only_matching': True,
3085 }, {
3086 'note': 'basic, single video playlist',
3087 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
3088 'info_dict': {
3089 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
3090 'uploader': 'Sergey M.',
3091 'id': 'PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
3092 'title': 'youtube-dl public playlist',
3093 },
3094 'playlist_count': 1,
3095 }, {
3096 'note': 'empty playlist',
3097 'url': 'https://www.youtube.com/playlist?list=PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
3098 'info_dict': {
3099 'uploader_id': 'UCmlqkdCBesrv2Lak1mF_MxA',
3100 'uploader': 'Sergey M.',
3101 'id': 'PL4lCao7KL_QFodcLWhDpGCYnngnHtQ-Xf',
3102 'title': 'youtube-dl empty playlist',
3103 },
3104 'playlist_count': 0,
3105 }, {
3106 'note': 'Home tab',
3107 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/featured',
3108 'info_dict': {
3109 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3110 'title': 'lex will - Home',
3111 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3112 'uploader': 'lex will',
3113 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3114 },
3115 'playlist_mincount': 2,
3116 }, {
3117 'note': 'Videos tab',
3118 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos',
3119 'info_dict': {
3120 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3121 'title': 'lex will - Videos',
3122 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3123 'uploader': 'lex will',
3124 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3125 },
3126 'playlist_mincount': 975,
3127 }, {
3128 'note': 'Videos tab, sorted by popular',
3129 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/videos?view=0&sort=p&flow=grid',
3130 'info_dict': {
3131 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3132 'title': 'lex will - Videos',
3133 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3134 'uploader': 'lex will',
3135 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3136 },
3137 'playlist_mincount': 199,
3138 }, {
3139 'note': 'Playlists tab',
3140 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/playlists',
3141 'info_dict': {
3142 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3143 'title': 'lex will - Playlists',
3144 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3145 'uploader': 'lex will',
3146 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3147 },
3148 'playlist_mincount': 17,
3149 }, {
3150 'note': 'Community tab',
3151 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/community',
3152 'info_dict': {
3153 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3154 'title': 'lex will - Community',
3155 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3156 'uploader': 'lex will',
3157 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3158 },
3159 'playlist_mincount': 18,
3160 }, {
3161 'note': 'Channels tab',
3162 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
3163 'info_dict': {
3164 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3165 'title': 'lex will - Channels',
3166 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
3167 'uploader': 'lex will',
3168 'uploader_id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
3169 },
3170 'playlist_mincount': 12,
3171 }, {
3172 'note': 'Search tab',
3173 'url': 'https://www.youtube.com/c/3blue1brown/search?query=linear%20algebra',
3174 'playlist_mincount': 40,
3175 'info_dict': {
3176 'id': 'UCYO_jab_esuFRV4b17AJtAw',
3177 'title': '3Blue1Brown - Search - linear algebra',
3178 'description': 'md5:e1384e8a133307dd10edee76e875d62f',
3179 'uploader': '3Blue1Brown',
3180 'uploader_id': 'UCYO_jab_esuFRV4b17AJtAw',
3181 },
3182 }, {
3183 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
3184 'only_matching': True,
3185 }, {
3186 'url': 'https://www.youtubekids.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
3187 'only_matching': True,
3188 }, {
3189 'url': 'https://music.youtube.com/channel/UCmlqkdCBesrv2Lak1mF_MxA',
3190 'only_matching': True,
3191 }, {
3192 'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
3193 'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
3194 'info_dict': {
3195 'title': '29C3: Not my department',
3196 'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
3197 'uploader': 'Christiaan008',
3198 'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
3199 'description': 'md5:a14dc1a8ef8307a9807fe136a0660268',
3200 },
3201 'playlist_count': 96,
3202 }, {
3203 'note': 'Large playlist',
3204 'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
3205 'info_dict': {
3206 'title': 'Uploads from Cauchemar',
3207 'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
3208 'uploader': 'Cauchemar',
3209 'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
3210 },
3211 'playlist_mincount': 1123,
3212 }, {
3213 'note': 'even larger playlist, 8832 videos',
3214 'url': 'http://www.youtube.com/user/NASAgovVideo/videos',
3215 'only_matching': True,
3216 }, {
3217 'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
3218 'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
3219 'info_dict': {
3220 'title': 'Uploads from Interstellar Movie',
3221 'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
3222 'uploader': 'Interstellar Movie',
3223 'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
3224 },
3225 'playlist_mincount': 21,
3226 }, {
3227 'note': 'Playlist with "show unavailable videos" button',
3228 'url': 'https://www.youtube.com/playlist?list=UUTYLiWFZy8xtPwxFwX9rV7Q',
3229 'info_dict': {
3230 'title': 'Uploads from Phim Siêu Nhân Nhật Bản',
3231 'id': 'UUTYLiWFZy8xtPwxFwX9rV7Q',
3232 'uploader': 'Phim Siêu Nhân Nhật Bản',
3233 'uploader_id': 'UCTYLiWFZy8xtPwxFwX9rV7Q',
3234 },
3235 'playlist_mincount': 200,
3236 }, {
3237 'note': 'Playlist with unavailable videos in page 7',
3238 'url': 'https://www.youtube.com/playlist?list=UU8l9frL61Yl5KFOl87nIm2w',
3239 'info_dict': {
3240 'title': 'Uploads from BlankTV',
3241 'id': 'UU8l9frL61Yl5KFOl87nIm2w',
3242 'uploader': 'BlankTV',
3243 'uploader_id': 'UC8l9frL61Yl5KFOl87nIm2w',
3244 },
3245 'playlist_mincount': 1000,
3246 }, {
3247 'note': 'https://github.com/ytdl-org/youtube-dl/issues/21844',
3248 'url': 'https://www.youtube.com/playlist?list=PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
3249 'info_dict': {
3250 'title': 'Data Analysis with Dr Mike Pound',
3251 'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
3252 'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
3253 'uploader': 'Computerphile',
3254 'description': 'md5:7f567c574d13d3f8c0954d9ffee4e487',
3255 },
3256 'playlist_mincount': 11,
3257 }, {
3258 'url': 'https://invidio.us/playlist?list=PL4lCao7KL_QFVb7Iudeipvc2BCavECqzc',
3259 'only_matching': True,
3260 }, {
3261 'note': 'Playlist URL that does not actually serve a playlist',
3262 'url': 'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
3263 'info_dict': {
3264 'id': 'FqZTN594JQw',
3265 'ext': 'webm',
3266 'title': "Smiley's People 01 detective, Adventure Series, Action",
3267 'uploader': 'STREEM',
3268 'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
3269 'uploader_url': r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
3270 'upload_date': '20150526',
3271 'license': 'Standard YouTube License',
3272 'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
3273 'categories': ['People & Blogs'],
3274 'tags': list,
3275 'view_count': int,
3276 'like_count': int,
3277 'dislike_count': int,
3278 },
3279 'params': {
3280 'skip_download': True,
3281 },
3282 'skip': 'This video is not available.',
3283 'add_ie': [YoutubeIE.ie_key()],
3284 }, {
3285 'url': 'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
3286 'only_matching': True,
3287 }, {
3288 'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
3289 'only_matching': True,
3290 }, {
3291 'url': 'https://www.youtube.com/channel/UCoMdktPbSTixAyNGwb-UYkQ/live',
3292 'info_dict': {
3293 'id': '3yImotZU3tw', # This will keep changing
3294 'ext': 'mp4',
3295 'title': compat_str,
3296 'uploader': 'Sky News',
3297 'uploader_id': 'skynews',
3298 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/skynews',
3299 'upload_date': r're:\d{8}',
3300 'description': compat_str,
3301 'categories': ['News & Politics'],
3302 'tags': list,
3303 'like_count': int,
3304 'dislike_count': int,
3305 },
3306 'params': {
3307 'skip_download': True,
3308 },
3309 'expected_warnings': ['Downloading just video ', 'Ignoring subtitle tracks found in '],
3310 }, {
3311 'url': 'https://www.youtube.com/user/TheYoungTurks/live',
3312 'info_dict': {
3313 'id': 'a48o2S1cPoo',
3314 'ext': 'mp4',
3315 'title': 'The Young Turks - Live Main Show',
3316 'uploader': 'The Young Turks',
3317 'uploader_id': 'TheYoungTurks',
3318 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/TheYoungTurks',
3319 'upload_date': '20150715',
3320 'license': 'Standard YouTube License',
3321 'description': 'md5:438179573adcdff3c97ebb1ee632b891',
3322 'categories': ['News & Politics'],
3323 'tags': ['Cenk Uygur (TV Program Creator)', 'The Young Turks (Award-Winning Work)', 'Talk Show (TV Genre)'],
3324 'like_count': int,
3325 'dislike_count': int,
3326 },
3327 'params': {
3328 'skip_download': True,
3329 },
3330 'only_matching': True,
3331 }, {
3332 'url': 'https://www.youtube.com/channel/UC1yBKRuGpC1tSM73A0ZjYjQ/live',
3333 'only_matching': True,
3334 }, {
3335 'url': 'https://www.youtube.com/c/CommanderVideoHq/live',
3336 'only_matching': True,
3337 }, {
3338 'note': 'A channel that is not live. Should raise error',
3339 'url': 'https://www.youtube.com/user/numberphile/live',
3340 'only_matching': True,
3341 }, {
3342 'url': 'https://www.youtube.com/feed/trending',
3343 'only_matching': True,
3344 }, {
3345 'url': 'https://www.youtube.com/feed/library',
3346 'only_matching': True,
3347 }, {
3348 'url': 'https://www.youtube.com/feed/history',
3349 'only_matching': True,
3350 }, {
3351 'url': 'https://www.youtube.com/feed/subscriptions',
3352 'only_matching': True,
3353 }, {
3354 'url': 'https://www.youtube.com/feed/watch_later',
3355 'only_matching': True,
3356 }, {
3357 'note': 'Recommended - redirects to home page.',
3358 'url': 'https://www.youtube.com/feed/recommended',
3359 'only_matching': True,
3360 }, {
3361 'note': 'inline playlist with not always working continuations',
3362 'url': 'https://www.youtube.com/watch?v=UC6u0Tct-Fo&list=PL36D642111D65BE7C',
3363 'only_matching': True,
3364 }, {
3365 'url': 'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8',
3366 'only_matching': True,
3367 }, {
3368 'url': 'https://www.youtube.com/course',
3369 'only_matching': True,
3370 }, {
3371 'url': 'https://www.youtube.com/zsecurity',
3372 'only_matching': True,
3373 }, {
3374 'url': 'http://www.youtube.com/NASAgovVideo/videos',
3375 'only_matching': True,
3376 }, {
3377 'url': 'https://www.youtube.com/TheYoungTurks/live',
3378 'only_matching': True,
3379 }, {
3380 'url': 'https://www.youtube.com/hashtag/cctv9',
3381 'info_dict': {
3382 'id': 'cctv9',
3383 'title': '#cctv9',
3384 },
3385 'playlist_mincount': 350,
3386 }, {
3387 'url': 'https://www.youtube.com/watch?list=PLW4dVinRY435CBE_JD3t-0SRXKfnZHS1P&feature=youtu.be&v=M9cJMXmQ_ZU',
3388 'only_matching': True,
3389 }, {
3390 'note': 'Requires Premium: should request additional YTM-info webpage (and have format 141) for videos in playlist',
3391 'url': 'https://music.youtube.com/playlist?list=PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
3392 'only_matching': True
3393 }, {
3394 'note': '/browse/ should redirect to /channel/',
3395 'url': 'https://music.youtube.com/browse/UC1a8OFewdjuLq6KlF8M_8Ng',
3396 'only_matching': True
3397 }, {
3398 'note': 'VLPL, should redirect to playlist?list=PL...',
3399 'url': 'https://music.youtube.com/browse/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
3400 'info_dict': {
3401 'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
3402 'uploader': 'NoCopyrightSounds',
3403 'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
3404 'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
3405 'title': 'NCS Releases',
3406 },
3407 'playlist_mincount': 166,
3408 }, {
3409 'note': 'Topic, should redirect to playlist?list=UU...',
3410 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
3411 'info_dict': {
3412 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
3413 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
3414 'title': 'Uploads from Royalty Free Music - Topic',
3415 'uploader': 'Royalty Free Music - Topic',
3416 },
3417 'expected_warnings': [
3418 'A channel/user page was given',
3419 'The URL does not have a videos tab',
3420 ],
3421 'playlist_mincount': 101,
3422 }, {
3423 'note': 'Topic without a UU playlist',
3424 'url': 'https://www.youtube.com/channel/UCtFRv9O2AHqOZjjynzrv-xg',
3425 'info_dict': {
3426 'id': 'UCtFRv9O2AHqOZjjynzrv-xg',
3427 'title': 'UCtFRv9O2AHqOZjjynzrv-xg',
3428 },
3429 'expected_warnings': [
3430 'A channel/user page was given',
3431 'The URL does not have a videos tab',
3432 'Falling back to channel URL',
3433 ],
3434 'playlist_mincount': 9,
3435 }, {
3436 'note': 'Youtube music Album',
3437 'url': 'https://music.youtube.com/browse/MPREb_gTAcphH99wE',
3438 'info_dict': {
3439 'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0',
3440 'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
3441 },
3442 'playlist_count': 50,
3443 }, {
3444 'note': 'unlisted single video playlist',
3445 'url': 'https://www.youtube.com/playlist?list=PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
3446 'info_dict': {
3447 'uploader_id': 'UC9zHu_mHU96r19o-wV5Qs1Q',
3448 'uploader': 'colethedj',
3449 'id': 'PLwL24UFy54GrB3s2KMMfjZscDi1x5Dajf',
3450 'title': 'yt-dlp unlisted playlist test',
3451 'availability': 'unlisted'
3452 },
3453 'playlist_count': 1,
3454 }, {
3455 'note': 'API Fallback: Recommended - redirects to home page. Requires visitorData',
3456 'url': 'https://www.youtube.com/feed/recommended',
3457 'info_dict': {
3458 'id': 'recommended',
3459 'title': 'recommended',
3460 },
3461 'playlist_mincount': 50,
3462 'params': {
3463 'skip_download': True,
3464 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
3465 },
3466 }, {
3467 'note': 'API Fallback: /videos tab, sorted by oldest first',
3468 'url': 'https://www.youtube.com/user/theCodyReeder/videos?view=0&sort=da&flow=grid',
3469 'info_dict': {
3470 'id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
3471 'title': 'Cody\'sLab - Videos',
3472 'description': 'md5:d083b7c2f0c67ee7a6c74c3e9b4243fa',
3473 'uploader': 'Cody\'sLab',
3474 'uploader_id': 'UCu6mSoMNzHQiBIOCkHUa2Aw',
3475 },
3476 'playlist_mincount': 650,
3477 'params': {
3478 'skip_download': True,
3479 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
3480 },
3481 }, {
3482 'note': 'API Fallback: Topic, should redirect to playlist?list=UU...',
3483 'url': 'https://music.youtube.com/browse/UC9ALqqC4aIeG5iDs7i90Bfw',
3484 'info_dict': {
3485 'id': 'UU9ALqqC4aIeG5iDs7i90Bfw',
3486 'uploader_id': 'UC9ALqqC4aIeG5iDs7i90Bfw',
3487 'title': 'Uploads from Royalty Free Music - Topic',
3488 'uploader': 'Royalty Free Music - Topic',
3489 },
3490 'expected_warnings': [
3491 'A channel/user page was given',
3492 'The URL does not have a videos tab',
3493 ],
3494 'playlist_mincount': 101,
3495 'params': {
3496 'skip_download': True,
3497 'extractor_args': {'youtubetab': {'skip': ['webpage']}}
3498 },
3499 }]
3500
3501 @classmethod
3502 def suitable(cls, url):
3503 return False if YoutubeIE.suitable(url) else super(
3504 YoutubeTabIE, cls).suitable(url)
3505
3506 def _extract_channel_id(self, webpage):
3507 channel_id = self._html_search_meta(
3508 'channelId', webpage, 'channel id', default=None)
3509 if channel_id:
3510 return channel_id
3511 channel_url = self._html_search_meta(
3512 ('og:url', 'al:ios:url', 'al:android:url', 'al:web:url',
3513 'twitter:url', 'twitter:app:url:iphone', 'twitter:app:url:ipad',
3514 'twitter:app:url:googleplay'), webpage, 'channel url')
3515 return self._search_regex(
3516 r'https?://(?:www\.)?youtube\.com/channel/([^/?#&])+',
3517 channel_url, 'channel id')
3518
3519 @staticmethod
3520 def _extract_basic_item_renderer(item):
3521 # Modified from _extract_grid_item_renderer
3522 known_basic_renderers = (
3523 'playlistRenderer', 'videoRenderer', 'channelRenderer', 'showRenderer'
3524 )
3525 for key, renderer in item.items():
3526 if not isinstance(renderer, dict):
3527 continue
3528 elif key in known_basic_renderers:
3529 return renderer
3530 elif key.startswith('grid') and key.endswith('Renderer'):
3531 return renderer
3532
3533 def _grid_entries(self, grid_renderer):
3534 for item in grid_renderer['items']:
3535 if not isinstance(item, dict):
3536 continue
3537 renderer = self._extract_basic_item_renderer(item)
3538 if not isinstance(renderer, dict):
3539 continue
3540 title = self._get_text(renderer, 'title')
3541
3542 # playlist
3543 playlist_id = renderer.get('playlistId')
3544 if playlist_id:
3545 yield self.url_result(
3546 'https://www.youtube.com/playlist?list=%s' % playlist_id,
3547 ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
3548 video_title=title)
3549 continue
3550 # video
3551 video_id = renderer.get('videoId')
3552 if video_id:
3553 yield self._extract_video(renderer)
3554 continue
3555 # channel
3556 channel_id = renderer.get('channelId')
3557 if channel_id:
3558 yield self.url_result(
3559 'https://www.youtube.com/channel/%s' % channel_id,
3560 ie=YoutubeTabIE.ie_key(), video_title=title)
3561 continue
3562 # generic endpoint URL support
3563 ep_url = urljoin('https://www.youtube.com/', try_get(
3564 renderer, lambda x: x['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
3565 compat_str))
3566 if ep_url:
3567 for ie in (YoutubeTabIE, YoutubePlaylistIE, YoutubeIE):
3568 if ie.suitable(ep_url):
3569 yield self.url_result(
3570 ep_url, ie=ie.ie_key(), video_id=ie._match_id(ep_url), video_title=title)
3571 break
3572
3573 def _shelf_entries_from_content(self, shelf_renderer):
3574 content = shelf_renderer.get('content')
3575 if not isinstance(content, dict):
3576 return
3577 renderer = content.get('gridRenderer') or content.get('expandedShelfContentsRenderer')
3578 if renderer:
3579 # TODO: add support for nested playlists so each shelf is processed
3580 # as separate playlist
3581 # TODO: this includes only first N items
3582 for entry in self._grid_entries(renderer):
3583 yield entry
3584 renderer = content.get('horizontalListRenderer')
3585 if renderer:
3586 # TODO
3587 pass
3588
3589 def _shelf_entries(self, shelf_renderer, skip_channels=False):
3590 ep = try_get(
3591 shelf_renderer, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
3592 compat_str)
3593 shelf_url = urljoin('https://www.youtube.com', ep)
3594 if shelf_url:
3595 # Skipping links to another channels, note that checking for
3596 # endpoint.commandMetadata.webCommandMetadata.webPageTypwebPageType == WEB_PAGE_TYPE_CHANNEL
3597 # will not work
3598 if skip_channels and '/channels?' in shelf_url:
3599 return
3600 title = self._get_text(shelf_renderer, 'title')
3601 yield self.url_result(shelf_url, video_title=title)
3602 # Shelf may not contain shelf URL, fallback to extraction from content
3603 for entry in self._shelf_entries_from_content(shelf_renderer):
3604 yield entry
3605
3606 def _playlist_entries(self, video_list_renderer):
3607 for content in video_list_renderer['contents']:
3608 if not isinstance(content, dict):
3609 continue
3610 renderer = content.get('playlistVideoRenderer') or content.get('playlistPanelVideoRenderer')
3611 if not isinstance(renderer, dict):
3612 continue
3613 video_id = renderer.get('videoId')
3614 if not video_id:
3615 continue
3616 yield self._extract_video(renderer)
3617
3618 def _rich_entries(self, rich_grid_renderer):
3619 renderer = try_get(
3620 rich_grid_renderer, lambda x: x['content']['videoRenderer'], dict) or {}
3621 video_id = renderer.get('videoId')
3622 if not video_id:
3623 return
3624 yield self._extract_video(renderer)
3625
3626 def _video_entry(self, video_renderer):
3627 video_id = video_renderer.get('videoId')
3628 if video_id:
3629 return self._extract_video(video_renderer)
3630
3631 def _post_thread_entries(self, post_thread_renderer):
3632 post_renderer = try_get(
3633 post_thread_renderer, lambda x: x['post']['backstagePostRenderer'], dict)
3634 if not post_renderer:
3635 return
3636 # video attachment
3637 video_renderer = try_get(
3638 post_renderer, lambda x: x['backstageAttachment']['videoRenderer'], dict) or {}
3639 video_id = video_renderer.get('videoId')
3640 if video_id:
3641 entry = self._extract_video(video_renderer)
3642 if entry:
3643 yield entry
3644 # playlist attachment
3645 playlist_id = try_get(
3646 post_renderer, lambda x: x['backstageAttachment']['playlistRenderer']['playlistId'], compat_str)
3647 if playlist_id:
3648 yield self.url_result(
3649 'https://www.youtube.com/playlist?list=%s' % playlist_id,
3650 ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
3651 # inline video links
3652 runs = try_get(post_renderer, lambda x: x['contentText']['runs'], list) or []
3653 for run in runs:
3654 if not isinstance(run, dict):
3655 continue
3656 ep_url = try_get(
3657 run, lambda x: x['navigationEndpoint']['urlEndpoint']['url'], compat_str)
3658 if not ep_url:
3659 continue
3660 if not YoutubeIE.suitable(ep_url):
3661 continue
3662 ep_video_id = YoutubeIE._match_id(ep_url)
3663 if video_id == ep_video_id:
3664 continue
3665 yield self.url_result(ep_url, ie=YoutubeIE.ie_key(), video_id=ep_video_id)
3666
3667 def _post_thread_continuation_entries(self, post_thread_continuation):
3668 contents = post_thread_continuation.get('contents')
3669 if not isinstance(contents, list):
3670 return
3671 for content in contents:
3672 renderer = content.get('backstagePostThreadRenderer')
3673 if not isinstance(renderer, dict):
3674 continue
3675 for entry in self._post_thread_entries(renderer):
3676 yield entry
3677
3678 r''' # unused
3679 def _rich_grid_entries(self, contents):
3680 for content in contents:
3681 video_renderer = try_get(content, lambda x: x['richItemRenderer']['content']['videoRenderer'], dict)
3682 if video_renderer:
3683 entry = self._video_entry(video_renderer)
3684 if entry:
3685 yield entry
3686 '''
3687 def _entries(self, tab, item_id, ytcfg, account_syncid, visitor_data):
3688
3689 def extract_entries(parent_renderer): # this needs to called again for continuation to work with feeds
3690 contents = try_get(parent_renderer, lambda x: x['contents'], list) or []
3691 for content in contents:
3692 if not isinstance(content, dict):
3693 continue
3694 is_renderer = try_get(content, lambda x: x['itemSectionRenderer'], dict)
3695 if not is_renderer:
3696 renderer = content.get('richItemRenderer')
3697 if renderer:
3698 for entry in self._rich_entries(renderer):
3699 yield entry
3700 continuation_list[0] = self._extract_continuation(parent_renderer)
3701 continue
3702 isr_contents = try_get(is_renderer, lambda x: x['contents'], list) or []
3703 for isr_content in isr_contents:
3704 if not isinstance(isr_content, dict):
3705 continue
3706
3707 known_renderers = {
3708 'playlistVideoListRenderer': self._playlist_entries,
3709 'gridRenderer': self._grid_entries,
3710 'shelfRenderer': lambda x: self._shelf_entries(x, tab.get('title') != 'Channels'),
3711 'backstagePostThreadRenderer': self._post_thread_entries,
3712 'videoRenderer': lambda x: [self._video_entry(x)],
3713 }
3714 for key, renderer in isr_content.items():
3715 if key not in known_renderers:
3716 continue
3717 for entry in known_renderers[key](renderer):
3718 if entry:
3719 yield entry
3720 continuation_list[0] = self._extract_continuation(renderer)
3721 break
3722
3723 if not continuation_list[0]:
3724 continuation_list[0] = self._extract_continuation(is_renderer)
3725
3726 if not continuation_list[0]:
3727 continuation_list[0] = self._extract_continuation(parent_renderer)
3728
3729 continuation_list = [None] # Python 2 does not support nonlocal
3730 tab_content = try_get(tab, lambda x: x['content'], dict)
3731 if not tab_content:
3732 return
3733 parent_renderer = (
3734 try_get(tab_content, lambda x: x['sectionListRenderer'], dict)
3735 or try_get(tab_content, lambda x: x['richGridRenderer'], dict) or {})
3736 for entry in extract_entries(parent_renderer):
3737 yield entry
3738 continuation = continuation_list[0]
3739
3740 for page_num in itertools.count(1):
3741 if not continuation:
3742 break
3743 headers = self.generate_api_headers(
3744 ytcfg=ytcfg, account_syncid=account_syncid, visitor_data=visitor_data)
3745 response = self._extract_response(
3746 item_id='%s page %s' % (item_id, page_num),
3747 query=continuation, headers=headers, ytcfg=ytcfg,
3748 check_get_keys=('continuationContents', 'onResponseReceivedActions', 'onResponseReceivedEndpoints'))
3749
3750 if not response:
3751 break
3752 # Extracting updated visitor data is required to prevent an infinite extraction loop in some cases
3753 # See: https://github.com/ytdl-org/youtube-dl/issues/28702
3754 visitor_data = self._extract_visitor_data(response) or visitor_data
3755
3756 known_continuation_renderers = {
3757 'playlistVideoListContinuation': self._playlist_entries,
3758 'gridContinuation': self._grid_entries,
3759 'itemSectionContinuation': self._post_thread_continuation_entries,
3760 'sectionListContinuation': extract_entries, # for feeds
3761 }
3762 continuation_contents = try_get(
3763 response, lambda x: x['continuationContents'], dict) or {}
3764 continuation_renderer = None
3765 for key, value in continuation_contents.items():
3766 if key not in known_continuation_renderers:
3767 continue
3768 continuation_renderer = value
3769 continuation_list = [None]
3770 for entry in known_continuation_renderers[key](continuation_renderer):
3771 yield entry
3772 continuation = continuation_list[0] or self._extract_continuation(continuation_renderer)
3773 break
3774 if continuation_renderer:
3775 continue
3776
3777 known_renderers = {
3778 'gridPlaylistRenderer': (self._grid_entries, 'items'),
3779 'gridVideoRenderer': (self._grid_entries, 'items'),
3780 'gridChannelRenderer': (self._grid_entries, 'items'),
3781 'playlistVideoRenderer': (self._playlist_entries, 'contents'),
3782 'itemSectionRenderer': (extract_entries, 'contents'), # for feeds
3783 'richItemRenderer': (extract_entries, 'contents'), # for hashtag
3784 'backstagePostThreadRenderer': (self._post_thread_continuation_entries, 'contents')
3785 }
3786 on_response_received = dict_get(response, ('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
3787 continuation_items = try_get(
3788 on_response_received, lambda x: x[0]['appendContinuationItemsAction']['continuationItems'], list)
3789 continuation_item = try_get(continuation_items, lambda x: x[0], dict) or {}
3790 video_items_renderer = None
3791 for key, value in continuation_item.items():
3792 if key not in known_renderers:
3793 continue
3794 video_items_renderer = {known_renderers[key][1]: continuation_items}
3795 continuation_list = [None]
3796 for entry in known_renderers[key][0](video_items_renderer):
3797 yield entry
3798 continuation = continuation_list[0] or self._extract_continuation(video_items_renderer)
3799 break
3800 if video_items_renderer:
3801 continue
3802 break
3803
3804 @staticmethod
3805 def _extract_selected_tab(tabs):
3806 for tab in tabs:
3807 renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {}
3808 if renderer.get('selected') is True:
3809 return renderer
3810 else:
3811 raise ExtractorError('Unable to find selected tab')
3812
3813 @classmethod
3814 def _extract_uploader(cls, data):
3815 uploader = {}
3816 renderer = cls._extract_sidebar_info_renderer(data, 'playlistSidebarSecondaryInfoRenderer') or {}
3817 owner = try_get(
3818 renderer, lambda x: x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
3819 if owner:
3820 uploader['uploader'] = owner.get('text')
3821 uploader['uploader_id'] = try_get(
3822 owner, lambda x: x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
3823 uploader['uploader_url'] = urljoin(
3824 'https://www.youtube.com/',
3825 try_get(owner, lambda x: x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
3826 return {k: v for k, v in uploader.items() if v is not None}
3827
3828 def _extract_from_tabs(self, item_id, ytcfg, data, tabs):
3829 playlist_id = title = description = channel_url = channel_name = channel_id = None
3830 thumbnails_list = []
3831 tags = []
3832
3833 selected_tab = self._extract_selected_tab(tabs)
3834 renderer = try_get(
3835 data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
3836 if renderer:
3837 channel_name = renderer.get('title')
3838 channel_url = renderer.get('channelUrl')
3839 channel_id = renderer.get('externalId')
3840 else:
3841 renderer = try_get(
3842 data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
3843
3844 if renderer:
3845 title = renderer.get('title')
3846 description = renderer.get('description', '')
3847 playlist_id = channel_id
3848 tags = renderer.get('keywords', '').split()
3849 thumbnails_list = (
3850 try_get(renderer, lambda x: x['avatar']['thumbnails'], list)
3851 or try_get(
3852 self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer'),
3853 lambda x: x['thumbnailRenderer']['playlistVideoThumbnailRenderer']['thumbnail']['thumbnails'],
3854 list)
3855 or [])
3856
3857 thumbnails = []
3858 for t in thumbnails_list:
3859 if not isinstance(t, dict):
3860 continue
3861 thumbnail_url = url_or_none(t.get('url'))
3862 if not thumbnail_url:
3863 continue
3864 thumbnails.append({
3865 'url': thumbnail_url,
3866 'width': int_or_none(t.get('width')),
3867 'height': int_or_none(t.get('height')),
3868 })
3869 if playlist_id is None:
3870 playlist_id = item_id
3871 if title is None:
3872 title = (
3873 try_get(data, lambda x: x['header']['hashtagHeaderRenderer']['hashtag']['simpleText'])
3874 or playlist_id)
3875 title += format_field(selected_tab, 'title', ' - %s')
3876 title += format_field(selected_tab, 'expandedText', ' - %s')
3877 metadata = {
3878 'playlist_id': playlist_id,
3879 'playlist_title': title,
3880 'playlist_description': description,
3881 'uploader': channel_name,
3882 'uploader_id': channel_id,
3883 'uploader_url': channel_url,
3884 'thumbnails': thumbnails,
3885 'tags': tags,
3886 }
3887 availability = self._extract_availability(data)
3888 if availability:
3889 metadata['availability'] = availability
3890 if not channel_id:
3891 metadata.update(self._extract_uploader(data))
3892 metadata.update({
3893 'channel': metadata['uploader'],
3894 'channel_id': metadata['uploader_id'],
3895 'channel_url': metadata['uploader_url']})
3896 return self.playlist_result(
3897 self._entries(
3898 selected_tab, playlist_id, ytcfg,
3899 self._extract_account_syncid(ytcfg, data),
3900 self._extract_visitor_data(data, ytcfg)),
3901 **metadata)
3902
3903 def _extract_mix_playlist(self, playlist, playlist_id, data, ytcfg):
3904 first_id = last_id = response = None
3905 for page_num in itertools.count(1):
3906 videos = list(self._playlist_entries(playlist))
3907 if not videos:
3908 return
3909 start = next((i for i, v in enumerate(videos) if v['id'] == last_id), -1) + 1
3910 if start >= len(videos):
3911 return
3912 for video in videos[start:]:
3913 if video['id'] == first_id:
3914 self.to_screen('First video %s found again; Assuming end of Mix' % first_id)
3915 return
3916 yield video
3917 first_id = first_id or videos[0]['id']
3918 last_id = videos[-1]['id']
3919 watch_endpoint = try_get(
3920 playlist, lambda x: x['contents'][-1]['playlistPanelVideoRenderer']['navigationEndpoint']['watchEndpoint'])
3921 headers = self.generate_api_headers(
3922 ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
3923 visitor_data=self._extract_visitor_data(response, data, ytcfg))
3924 query = {
3925 'playlistId': playlist_id,
3926 'videoId': watch_endpoint.get('videoId') or last_id,
3927 'index': watch_endpoint.get('index') or len(videos),
3928 'params': watch_endpoint.get('params') or 'OAE%3D'
3929 }
3930 response = self._extract_response(
3931 item_id='%s page %d' % (playlist_id, page_num),
3932 query=query, ep='next', headers=headers, ytcfg=ytcfg,
3933 check_get_keys='contents'
3934 )
3935 playlist = try_get(
3936 response, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
3937
3938 def _extract_from_playlist(self, item_id, url, data, playlist, ytcfg):
3939 title = playlist.get('title') or try_get(
3940 data, lambda x: x['titleText']['simpleText'], compat_str)
3941 playlist_id = playlist.get('playlistId') or item_id
3942
3943 # Delegating everything except mix playlists to regular tab-based playlist URL
3944 playlist_url = urljoin(url, try_get(
3945 playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'],
3946 compat_str))
3947 if playlist_url and playlist_url != url:
3948 return self.url_result(
3949 playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
3950 video_title=title)
3951
3952 return self.playlist_result(
3953 self._extract_mix_playlist(playlist, playlist_id, data, ytcfg),
3954 playlist_id=playlist_id, playlist_title=title)
3955
3956 def _extract_availability(self, data):
3957 """
3958 Gets the availability of a given playlist/tab.
3959 Note: Unless YouTube tells us explicitly, we do not assume it is public
3960 @param data: response
3961 """
3962 is_private = is_unlisted = None
3963 renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer') or {}
3964 badge_labels = self._extract_badges(renderer)
3965
3966 # Personal playlists, when authenticated, have a dropdown visibility selector instead of a badge
3967 privacy_dropdown_entries = try_get(
3968 renderer, lambda x: x['privacyForm']['dropdownFormFieldRenderer']['dropdown']['dropdownRenderer']['entries'], list) or []
3969 for renderer_dict in privacy_dropdown_entries:
3970 is_selected = try_get(
3971 renderer_dict, lambda x: x['privacyDropdownItemRenderer']['isSelected'], bool) or False
3972 if not is_selected:
3973 continue
3974 label = self._get_text(renderer_dict, ('privacyDropdownItemRenderer', 'label'))
3975 if label:
3976 badge_labels.add(label.lower())
3977 break
3978
3979 for badge_label in badge_labels:
3980 if badge_label == 'unlisted':
3981 is_unlisted = True
3982 elif badge_label == 'private':
3983 is_private = True
3984 elif badge_label == 'public':
3985 is_unlisted = is_private = False
3986 return self._availability(is_private, False, False, False, is_unlisted)
3987
3988 @staticmethod
3989 def _extract_sidebar_info_renderer(data, info_renderer, expected_type=dict):
3990 sidebar_renderer = try_get(
3991 data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], list) or []
3992 for item in sidebar_renderer:
3993 renderer = try_get(item, lambda x: x[info_renderer], expected_type)
3994 if renderer:
3995 return renderer
3996
3997 def _reload_with_unavailable_videos(self, item_id, data, ytcfg):
3998 """
3999 Get playlist with unavailable videos if the 'show unavailable videos' button exists.
4000 """
4001 browse_id = params = None
4002 renderer = self._extract_sidebar_info_renderer(data, 'playlistSidebarPrimaryInfoRenderer')
4003 if not renderer:
4004 return
4005 menu_renderer = try_get(
4006 renderer, lambda x: x['menu']['menuRenderer']['items'], list) or []
4007 for menu_item in menu_renderer:
4008 if not isinstance(menu_item, dict):
4009 continue
4010 nav_item_renderer = menu_item.get('menuNavigationItemRenderer')
4011 text = try_get(
4012 nav_item_renderer, lambda x: x['text']['simpleText'], compat_str)
4013 if not text or text.lower() != 'show unavailable videos':
4014 continue
4015 browse_endpoint = try_get(
4016 nav_item_renderer, lambda x: x['navigationEndpoint']['browseEndpoint'], dict) or {}
4017 browse_id = browse_endpoint.get('browseId')
4018 params = browse_endpoint.get('params')
4019 break
4020
4021 headers = self.generate_api_headers(
4022 ytcfg=ytcfg, account_syncid=self._extract_account_syncid(ytcfg, data),
4023 visitor_data=self._extract_visitor_data(data, ytcfg))
4024 query = {
4025 'params': params or 'wgYCCAA=',
4026 'browseId': browse_id or 'VL%s' % item_id
4027 }
4028 return self._extract_response(
4029 item_id=item_id, headers=headers, query=query,
4030 check_get_keys='contents', fatal=False, ytcfg=ytcfg,
4031 note='Downloading API JSON with unavailable videos')
4032
4033 def _extract_webpage(self, url, item_id, fatal=True):
4034 retries = self.get_param('extractor_retries', 3)
4035 count = -1
4036 webpage = data = last_error = None
4037 while count < retries:
4038 count += 1
4039 # Sometimes youtube returns a webpage with incomplete ytInitialData
4040 # See: https://github.com/yt-dlp/yt-dlp/issues/116
4041 if last_error:
4042 self.report_warning('%s. Retrying ...' % last_error)
4043 try:
4044 webpage = self._download_webpage(
4045 url, item_id,
4046 note='Downloading webpage%s' % (' (retry #%d)' % count if count else '',))
4047 data = self.extract_yt_initial_data(item_id, webpage or '', fatal=fatal) or {}
4048 except ExtractorError as e:
4049 if isinstance(e.cause, network_exceptions):
4050 if not isinstance(e.cause, compat_HTTPError) or e.cause.code not in (403, 429):
4051 last_error = error_to_compat_str(e.cause or e.msg)
4052 if count < retries:
4053 continue
4054 if fatal:
4055 raise
4056 self.report_warning(error_to_compat_str(e))
4057 break
4058 else:
4059 try:
4060 self._extract_and_report_alerts(data)
4061 except ExtractorError as e:
4062 if fatal:
4063 raise
4064 self.report_warning(error_to_compat_str(e))
4065 break
4066
4067 if dict_get(data, ('contents', 'currentVideoEndpoint')):
4068 break
4069
4070 last_error = 'Incomplete yt initial data received'
4071 if count >= retries:
4072 if fatal:
4073 raise ExtractorError(last_error)
4074 self.report_warning(last_error)
4075 break
4076
4077 return webpage, data
4078
4079 def _extract_data(self, url, item_id, ytcfg=None, fatal=True, webpage_fatal=False, default_client='web'):
4080 data = None
4081 if 'webpage' not in self._configuration_arg('skip'):
4082 webpage, data = self._extract_webpage(url, item_id, fatal=webpage_fatal)
4083 ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage)
4084 if not data:
4085 if not ytcfg and self.is_authenticated:
4086 msg = 'Playlists that require authentication may not extract correctly without a successful webpage download.'
4087 if 'authcheck' not in self._configuration_arg('skip') and fatal:
4088 raise ExtractorError(
4089 msg + ' If you are not downloading private content, or your cookies are only for the first account and channel,'
4090 ' pass "--extractor-args youtubetab:skip=authcheck" to skip this check',
4091 expected=True)
4092 self.report_warning(msg, only_once=True)
4093 data = self._extract_tab_endpoint(url, item_id, ytcfg, fatal=fatal, default_client=default_client)
4094 return data, ytcfg
4095
4096 def _extract_tab_endpoint(self, url, item_id, ytcfg=None, fatal=True, default_client='web'):
4097 headers = self.generate_api_headers(ytcfg=ytcfg, default_client=default_client)
4098 resolve_response = self._extract_response(
4099 item_id=item_id, query={'url': url}, check_get_keys='endpoint', headers=headers, ytcfg=ytcfg, fatal=fatal,
4100 ep='navigation/resolve_url', note='Downloading API parameters API JSON', default_client=default_client)
4101 endpoints = {'browseEndpoint': 'browse', 'watchEndpoint': 'next'}
4102 for ep_key, ep in endpoints.items():
4103 params = try_get(resolve_response, lambda x: x['endpoint'][ep_key], dict)
4104 if params:
4105 return self._extract_response(
4106 item_id=item_id, query=params, ep=ep, headers=headers,
4107 ytcfg=ytcfg, fatal=fatal, default_client=default_client,
4108 check_get_keys=('contents', 'currentVideoEndpoint'))
4109 err_note = 'Failed to resolve url (does the playlist exist?)'
4110 if fatal:
4111 raise ExtractorError(err_note, expected=True)
4112 self.report_warning(err_note, item_id)
4113
4114 @staticmethod
4115 def _smuggle_data(entries, data):
4116 for entry in entries:
4117 if data:
4118 entry['url'] = smuggle_url(entry['url'], data)
4119 yield entry
4120
4121 def _real_extract(self, url):
4122 url, smuggled_data = unsmuggle_url(url, {})
4123 if self.is_music_url(url):
4124 smuggled_data['is_music_url'] = True
4125 info_dict = self.__real_extract(url, smuggled_data)
4126 if info_dict.get('entries'):
4127 info_dict['entries'] = self._smuggle_data(info_dict['entries'], smuggled_data)
4128 return info_dict
4129
4130 _url_re = re.compile(r'(?P<pre>%s)(?(channel_type)(?P<tab>/\w+))?(?P<post>.*)$' % _VALID_URL)
4131
4132 def __real_extract(self, url, smuggled_data):
4133 item_id = self._match_id(url)
4134 url = compat_urlparse.urlunparse(
4135 compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
4136 compat_opts = self.get_param('compat_opts', [])
4137
4138 def get_mobj(url):
4139 mobj = self._url_re.match(url).groupdict()
4140 mobj.update((k, '') for k, v in mobj.items() if v is None)
4141 return mobj
4142
4143 mobj = get_mobj(url)
4144 # Youtube returns incomplete data if tabname is not lower case
4145 pre, tab, post, is_channel = mobj['pre'], mobj['tab'].lower(), mobj['post'], not mobj['not_channel']
4146 if is_channel:
4147 if smuggled_data.get('is_music_url'):
4148 if item_id[:2] == 'VL':
4149 # Youtube music VL channels have an equivalent playlist
4150 item_id = item_id[2:]
4151 pre, tab, post, is_channel = 'https://www.youtube.com/playlist?list=%s' % item_id, '', '', False
4152 elif item_id[:2] == 'MP':
4153 # Resolve albums (/[channel/browse]/MP...) to their equivalent playlist
4154 mdata = self._extract_tab_endpoint(
4155 'https://music.youtube.com/channel/%s' % item_id, item_id, default_client='web_music')
4156 murl = traverse_obj(
4157 mdata, ('microformat', 'microformatDataRenderer', 'urlCanonical'), get_all=False, expected_type=compat_str)
4158 if not murl:
4159 raise ExtractorError('Failed to resolve album to playlist.')
4160 return self.url_result(murl, ie=YoutubeTabIE.ie_key())
4161 elif mobj['channel_type'] == 'browse':
4162 # Youtube music /browse/ should be changed to /channel/
4163 pre = 'https://www.youtube.com/channel/%s' % item_id
4164 if is_channel and not tab and 'no-youtube-channel-redirect' not in compat_opts:
4165 # Home URLs should redirect to /videos/
4166 self.report_warning(
4167 'A channel/user page was given. All the channel\'s videos will be downloaded. '
4168 'To download only the videos in the home page, add a "/featured" to the URL')
4169 tab = '/videos'
4170
4171 url = ''.join((pre, tab, post))
4172 mobj = get_mobj(url)
4173
4174 # Handle both video/playlist URLs
4175 qs = parse_qs(url)
4176 video_id = qs.get('v', [None])[0]
4177 playlist_id = qs.get('list', [None])[0]
4178
4179 if not video_id and mobj['not_channel'].startswith('watch'):
4180 if not playlist_id:
4181 # If there is neither video or playlist ids, youtube redirects to home page, which is undesirable
4182 raise ExtractorError('Unable to recognize tab page')
4183 # Common mistake: https://www.youtube.com/watch?list=playlist_id
4184 self.report_warning('A video URL was given without video ID. Trying to download playlist %s' % playlist_id)
4185 url = 'https://www.youtube.com/playlist?list=%s' % playlist_id
4186 mobj = get_mobj(url)
4187
4188 if video_id and playlist_id:
4189 if self.get_param('noplaylist'):
4190 self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
4191 return self.url_result(f'https://www.youtube.com/watch?v={video_id}', ie=YoutubeIE.ie_key(), video_id=video_id)
4192 self.to_screen('Downloading playlist %s; add --no-playlist to just download video %s' % (playlist_id, video_id))
4193
4194 data, ytcfg = self._extract_data(url, item_id)
4195
4196 tabs = try_get(
4197 data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
4198 if tabs:
4199 selected_tab = self._extract_selected_tab(tabs)
4200 tab_name = selected_tab.get('title', '')
4201 if 'no-youtube-channel-redirect' not in compat_opts:
4202 if mobj['tab'] == '/live':
4203 # Live tab should have redirected to the video
4204 raise ExtractorError('The channel is not currently live', expected=True)
4205 if mobj['tab'] == '/videos' and tab_name.lower() != mobj['tab'][1:]:
4206 if not mobj['not_channel'] and item_id[:2] == 'UC':
4207 # Topic channels don't have /videos. Use the equivalent playlist instead
4208 self.report_warning('The URL does not have a %s tab. Trying to redirect to playlist UU%s instead' % (mobj['tab'][1:], item_id[2:]))
4209 pl_id = 'UU%s' % item_id[2:]
4210 pl_url = 'https://www.youtube.com/playlist?list=%s%s' % (pl_id, mobj['post'])
4211 try:
4212 data, ytcfg, item_id, url = *self._extract_data(pl_url, pl_id, ytcfg=ytcfg, fatal=True), pl_id, pl_url
4213 except ExtractorError:
4214 self.report_warning('The playlist gave error. Falling back to channel URL')
4215 else:
4216 self.report_warning('The URL does not have a %s tab. %s is being downloaded instead' % (mobj['tab'][1:], tab_name))
4217
4218 self.write_debug('Final URL: %s' % url)
4219
4220 # YouTube sometimes provides a button to reload playlist with unavailable videos.
4221 if 'no-youtube-unavailable-videos' not in compat_opts:
4222 data = self._reload_with_unavailable_videos(item_id, data, ytcfg) or data
4223 self._extract_and_report_alerts(data, only_once=True)
4224 tabs = try_get(
4225 data, lambda x: x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
4226 if tabs:
4227 return self._extract_from_tabs(item_id, ytcfg, data, tabs)
4228
4229 playlist = try_get(
4230 data, lambda x: x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
4231 if playlist:
4232 return self._extract_from_playlist(item_id, url, data, playlist, ytcfg)
4233
4234 video_id = try_get(
4235 data, lambda x: x['currentVideoEndpoint']['watchEndpoint']['videoId'],
4236 compat_str) or video_id
4237 if video_id:
4238 if mobj['tab'] != '/live': # live tab is expected to redirect to video
4239 self.report_warning('Unable to recognize playlist. Downloading just video %s' % video_id)
4240 return self.url_result(f'https://www.youtube.com/watch?v={video_id}', ie=YoutubeIE.ie_key(), video_id=video_id)
4241
4242 raise ExtractorError('Unable to recognize tab page')
4243
4244
4245 class YoutubePlaylistIE(InfoExtractor):
4246 IE_DESC = 'YouTube playlists'
4247 _VALID_URL = r'''(?x)(?:
4248 (?:https?://)?
4249 (?:\w+\.)?
4250 (?:
4251 (?:
4252 youtube(?:kids)?\.com|
4253 invidio\.us
4254 )
4255 /.*?\?.*?\blist=
4256 )?
4257 (?P<id>%(playlist_id)s)
4258 )''' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
4259 IE_NAME = 'youtube:playlist'
4260 _TESTS = [{
4261 'note': 'issue #673',
4262 'url': 'PLBB231211A4F62143',
4263 'info_dict': {
4264 'title': '[OLD]Team Fortress 2 (Class-based LP)',
4265 'id': 'PLBB231211A4F62143',
4266 'uploader': 'Wickydoo',
4267 'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
4268 'description': 'md5:8fa6f52abb47a9552002fa3ddfc57fc2',
4269 },
4270 'playlist_mincount': 29,
4271 }, {
4272 'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
4273 'info_dict': {
4274 'title': 'YDL_safe_search',
4275 'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
4276 },
4277 'playlist_count': 2,
4278 'skip': 'This playlist is private',
4279 }, {
4280 'note': 'embedded',
4281 'url': 'https://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
4282 'playlist_count': 4,
4283 'info_dict': {
4284 'title': 'JODA15',
4285 'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
4286 'uploader': 'milan',
4287 'uploader_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
4288 }
4289 }, {
4290 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
4291 'playlist_mincount': 654,
4292 'info_dict': {
4293 'title': '2018 Chinese New Singles (11/6 updated)',
4294 'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
4295 'uploader': 'LBK',
4296 'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
4297 'description': 'md5:da521864744d60a198e3a88af4db0d9d',
4298 }
4299 }, {
4300 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
4301 'only_matching': True,
4302 }, {
4303 # music album playlist
4304 'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
4305 'only_matching': True,
4306 }]
4307
4308 @classmethod
4309 def suitable(cls, url):
4310 if YoutubeTabIE.suitable(url):
4311 return False
4312 from ..utils import parse_qs
4313 qs = parse_qs(url)
4314 if qs.get('v', [None])[0]:
4315 return False
4316 return super(YoutubePlaylistIE, cls).suitable(url)
4317
4318 def _real_extract(self, url):
4319 playlist_id = self._match_id(url)
4320 is_music_url = YoutubeBaseInfoExtractor.is_music_url(url)
4321 url = update_url_query(
4322 'https://www.youtube.com/playlist',
4323 parse_qs(url) or {'list': playlist_id})
4324 if is_music_url:
4325 url = smuggle_url(url, {'is_music_url': True})
4326 return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
4327
4328
4329 class YoutubeYtBeIE(InfoExtractor):
4330 IE_DESC = 'youtu.be'
4331 _VALID_URL = r'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{11})/*?.*?\blist=(?P<playlist_id>%(playlist_id)s)' % {'playlist_id': YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
4332 _TESTS = [{
4333 'url': 'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
4334 'info_dict': {
4335 'id': 'yeWKywCrFtk',
4336 'ext': 'mp4',
4337 'title': 'Small Scale Baler and Braiding Rugs',
4338 'uploader': 'Backus-Page House Museum',
4339 'uploader_id': 'backuspagemuseum',
4340 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/backuspagemuseum',
4341 'upload_date': '20161008',
4342 'description': 'md5:800c0c78d5eb128500bffd4f0b4f2e8a',
4343 'categories': ['Nonprofits & Activism'],
4344 'tags': list,
4345 'like_count': int,
4346 'dislike_count': int,
4347 },
4348 'params': {
4349 'noplaylist': True,
4350 'skip_download': True,
4351 },
4352 }, {
4353 'url': 'https://youtu.be/uWyaPkt-VOI?list=PL9D9FC436B881BA21',
4354 'only_matching': True,
4355 }]
4356
4357 def _real_extract(self, url):
4358 mobj = self._match_valid_url(url)
4359 video_id = mobj.group('id')
4360 playlist_id = mobj.group('playlist_id')
4361 return self.url_result(
4362 update_url_query('https://www.youtube.com/watch', {
4363 'v': video_id,
4364 'list': playlist_id,
4365 'feature': 'youtu.be',
4366 }), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
4367
4368
4369 class YoutubeYtUserIE(InfoExtractor):
4370 IE_DESC = 'YouTube user videos; "ytuser:" prefix'
4371 _VALID_URL = r'ytuser:(?P<id>.+)'
4372 _TESTS = [{
4373 'url': 'ytuser:phihag',
4374 'only_matching': True,
4375 }]
4376
4377 def _real_extract(self, url):
4378 user_id = self._match_id(url)
4379 return self.url_result(
4380 'https://www.youtube.com/user/%s' % user_id,
4381 ie=YoutubeTabIE.ie_key(), video_id=user_id)
4382
4383
4384 class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
4385 IE_NAME = 'youtube:favorites'
4386 IE_DESC = 'YouTube liked videos; ":ytfav" keyword (requires cookies)'
4387 _VALID_URL = r':ytfav(?:ou?rite)?s?'
4388 _LOGIN_REQUIRED = True
4389 _TESTS = [{
4390 'url': ':ytfav',
4391 'only_matching': True,
4392 }, {
4393 'url': ':ytfavorites',
4394 'only_matching': True,
4395 }]
4396
4397 def _real_extract(self, url):
4398 return self.url_result(
4399 'https://www.youtube.com/playlist?list=LL',
4400 ie=YoutubeTabIE.ie_key())
4401
4402
4403 class YoutubeSearchIE(SearchInfoExtractor, YoutubeTabIE):
4404 IE_DESC = 'YouTube searches'
4405 IE_NAME = 'youtube:search'
4406 _SEARCH_KEY = 'ytsearch'
4407 _SEARCH_PARAMS = None
4408 _TESTS = []
4409
4410 def _search_results(self, query):
4411 data = {'query': query}
4412 if self._SEARCH_PARAMS:
4413 data['params'] = self._SEARCH_PARAMS
4414 continuation = {}
4415 for page_num in itertools.count(1):
4416 data.update(continuation)
4417 search = self._extract_response(
4418 item_id='query "%s" page %s' % (query, page_num), ep='search', query=data,
4419 check_get_keys=('contents', 'onResponseReceivedCommands')
4420 )
4421 if not search:
4422 break
4423 slr_contents = try_get(
4424 search,
4425 (lambda x: x['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'],
4426 lambda x: x['onResponseReceivedCommands'][0]['appendContinuationItemsAction']['continuationItems']),
4427 list)
4428 if not slr_contents:
4429 break
4430
4431 # Youtube sometimes adds promoted content to searches,
4432 # changing the index location of videos and token.
4433 # So we search through all entries till we find them.
4434 continuation = None
4435 for slr_content in slr_contents:
4436 if not continuation:
4437 continuation = self._extract_continuation({'contents': [slr_content]})
4438
4439 isr_contents = try_get(
4440 slr_content,
4441 lambda x: x['itemSectionRenderer']['contents'],
4442 list)
4443 if not isr_contents:
4444 continue
4445 for content in isr_contents:
4446 if not isinstance(content, dict):
4447 continue
4448 video = content.get('videoRenderer')
4449 if not isinstance(video, dict):
4450 continue
4451 video_id = video.get('videoId')
4452 if not video_id:
4453 continue
4454
4455 yield self._extract_video(video)
4456
4457 if not continuation:
4458 break
4459
4460
4461 class YoutubeSearchDateIE(YoutubeSearchIE):
4462 IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
4463 _SEARCH_KEY = 'ytsearchdate'
4464 IE_DESC = 'YouTube searches, newest videos first'
4465 _SEARCH_PARAMS = 'CAI%3D'
4466
4467
4468 class YoutubeSearchURLIE(YoutubeSearchIE):
4469 IE_DESC = 'YouTube search URLs with sorting and filter support'
4470 IE_NAME = YoutubeSearchIE.IE_NAME + '_url'
4471 _SEARCH_KEY = None
4472 _VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?(?:search_query|q)=(?:[^&]+)(?:[&]|$)'
4473 # _MAX_RESULTS = 100
4474 _TESTS = [{
4475 'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
4476 'playlist_mincount': 5,
4477 'info_dict': {
4478 'id': 'youtube-dl test video',
4479 'title': 'youtube-dl test video',
4480 }
4481 }, {
4482 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
4483 'only_matching': True,
4484 }]
4485
4486 @classmethod
4487 def _make_valid_url(cls):
4488 return cls._VALID_URL
4489
4490 def _real_extract(self, url):
4491 qs = parse_qs(url)
4492 query = (qs.get('search_query') or qs.get('q'))[0]
4493 self._SEARCH_PARAMS = qs.get('sp', ('',))[0]
4494 return self._get_n_results(query, self._MAX_RESULTS)
4495
4496
4497 class YoutubeFeedsInfoExtractor(YoutubeTabIE):
4498 """
4499 Base class for feed extractors
4500 Subclasses must define the _FEED_NAME property.
4501 """
4502 _LOGIN_REQUIRED = True
4503 _TESTS = []
4504
4505 @property
4506 def IE_NAME(self):
4507 return 'youtube:%s' % self._FEED_NAME
4508
4509 def _real_extract(self, url):
4510 return self.url_result(
4511 'https://www.youtube.com/feed/%s' % self._FEED_NAME,
4512 ie=YoutubeTabIE.ie_key())
4513
4514
4515 class YoutubeWatchLaterIE(InfoExtractor):
4516 IE_NAME = 'youtube:watchlater'
4517 IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
4518 _VALID_URL = r':ytwatchlater'
4519 _TESTS = [{
4520 'url': ':ytwatchlater',
4521 'only_matching': True,
4522 }]
4523
4524 def _real_extract(self, url):
4525 return self.url_result(
4526 'https://www.youtube.com/playlist?list=WL', ie=YoutubeTabIE.ie_key())
4527
4528
4529 class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
4530 IE_DESC = 'YouTube recommended videos; ":ytrec" keyword'
4531 _VALID_URL = r'https?://(?:www\.)?youtube\.com/?(?:[?#]|$)|:ytrec(?:ommended)?'
4532 _FEED_NAME = 'recommended'
4533 _LOGIN_REQUIRED = False
4534 _TESTS = [{
4535 'url': ':ytrec',
4536 'only_matching': True,
4537 }, {
4538 'url': ':ytrecommended',
4539 'only_matching': True,
4540 }, {
4541 'url': 'https://youtube.com',
4542 'only_matching': True,
4543 }]
4544
4545
4546 class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
4547 IE_DESC = 'YouTube subscriptions feed; ":ytsubs" keyword (requires cookies)'
4548 _VALID_URL = r':ytsub(?:scription)?s?'
4549 _FEED_NAME = 'subscriptions'
4550 _TESTS = [{
4551 'url': ':ytsubs',
4552 'only_matching': True,
4553 }, {
4554 'url': ':ytsubscriptions',
4555 'only_matching': True,
4556 }]
4557
4558
4559 class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
4560 IE_DESC = 'Youtube watch history; ":ythis" keyword (requires cookies)'
4561 _VALID_URL = r':ythis(?:tory)?'
4562 _FEED_NAME = 'history'
4563 _TESTS = [{
4564 'url': ':ythistory',
4565 'only_matching': True,
4566 }]
4567
4568
4569 class YoutubeTruncatedURLIE(InfoExtractor):
4570 IE_NAME = 'youtube:truncated_url'
4571 IE_DESC = False # Do not list
4572 _VALID_URL = r'''(?x)
4573 (?:https?://)?
4574 (?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
4575 (?:watch\?(?:
4576 feature=[a-z_]+|
4577 annotation_id=annotation_[^&]+|
4578 x-yt-cl=[0-9]+|
4579 hl=[^&]*|
4580 t=[0-9]+
4581 )?
4582 |
4583 attribution_link\?a=[^&]+
4584 )
4585 $
4586 '''
4587
4588 _TESTS = [{
4589 'url': 'https://www.youtube.com/watch?annotation_id=annotation_3951667041',
4590 'only_matching': True,
4591 }, {
4592 'url': 'https://www.youtube.com/watch?',
4593 'only_matching': True,
4594 }, {
4595 'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
4596 'only_matching': True,
4597 }, {
4598 'url': 'https://www.youtube.com/watch?feature=foo',
4599 'only_matching': True,
4600 }, {
4601 'url': 'https://www.youtube.com/watch?hl=en-GB',
4602 'only_matching': True,
4603 }, {
4604 'url': 'https://www.youtube.com/watch?t=2372',
4605 'only_matching': True,
4606 }]
4607
4608 def _real_extract(self, url):
4609 raise ExtractorError(
4610 'Did you forget to quote the URL? Remember that & is a meta '
4611 'character in most shells, so you want to put the URL in quotes, '
4612 'like youtube-dl '
4613 '"https://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
4614 ' or simply youtube-dl BaW_jenozKc .',
4615 expected=True)
4616
4617
4618 class YoutubeClipIE(InfoExtractor):
4619 IE_NAME = 'youtube:clip'
4620 IE_DESC = False # Do not list
4621 _VALID_URL = r'https?://(?:www\.)?youtube\.com/clip/'
4622
4623 def _real_extract(self, url):
4624 self.report_warning('YouTube clips are not currently supported. The entire video will be downloaded instead')
4625 return self.url_result(url, 'Generic')
4626
4627
4628 class YoutubeTruncatedIDIE(InfoExtractor):
4629 IE_NAME = 'youtube:truncated_id'
4630 IE_DESC = False # Do not list
4631 _VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
4632
4633 _TESTS = [{
4634 'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
4635 'only_matching': True,
4636 }]
4637
4638 def _real_extract(self, url):
4639 video_id = self._match_id(url)
4640 raise ExtractorError(
4641 'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
4642 expected=True)