]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/twitch.py
[twitch] Modified _VALID_URL for TwitchAllVideosIE
[yt-dlp.git] / youtube_dl / extractor / twitch.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import itertools
5 import re
6 import random
7 import json
8
9 from .common import InfoExtractor
10 from ..compat import (
11 compat_kwargs,
12 compat_parse_qs,
13 compat_str,
14 compat_urllib_parse_urlencode,
15 compat_urllib_parse_urlparse,
16 )
17 from ..utils import (
18 clean_html,
19 ExtractorError,
20 float_or_none,
21 int_or_none,
22 orderedSet,
23 parse_duration,
24 parse_iso8601,
25 qualities,
26 try_get,
27 unified_timestamp,
28 update_url_query,
29 url_or_none,
30 urljoin,
31 )
32
33
34 class TwitchBaseIE(InfoExtractor):
35 _VALID_URL_BASE = r'https?://(?:(?:www|go|m)\.)?twitch\.tv'
36
37 _API_BASE = 'https://api.twitch.tv'
38 _USHER_BASE = 'https://usher.ttvnw.net'
39 _LOGIN_FORM_URL = 'https://www.twitch.tv/login'
40 _LOGIN_POST_URL = 'https://passport.twitch.tv/login'
41 _CLIENT_ID = 'kimne78kx3ncx6brgo4mv6wki5h1ko'
42 _NETRC_MACHINE = 'twitch'
43
44 def _handle_error(self, response):
45 if not isinstance(response, dict):
46 return
47 error = response.get('error')
48 if error:
49 raise ExtractorError(
50 '%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
51 expected=True)
52
53 def _call_api(self, path, item_id, *args, **kwargs):
54 headers = kwargs.get('headers', {}).copy()
55 headers['Client-ID'] = self._CLIENT_ID
56 kwargs['headers'] = headers
57 response = self._download_json(
58 '%s/%s' % (self._API_BASE, path), item_id,
59 *args, **compat_kwargs(kwargs))
60 self._handle_error(response)
61 return response
62
63 def _real_initialize(self):
64 self._login()
65
66 def _login(self):
67 username, password = self._get_login_info()
68 if username is None:
69 return
70
71 def fail(message):
72 raise ExtractorError(
73 'Unable to login. Twitch said: %s' % message, expected=True)
74
75 def login_step(page, urlh, note, data):
76 form = self._hidden_inputs(page)
77 form.update(data)
78
79 page_url = urlh.geturl()
80 post_url = self._search_regex(
81 r'<form[^>]+action=(["\'])(?P<url>.+?)\1', page,
82 'post url', default=self._LOGIN_POST_URL, group='url')
83 post_url = urljoin(page_url, post_url)
84
85 headers = {
86 'Referer': page_url,
87 'Origin': page_url,
88 'Content-Type': 'text/plain;charset=UTF-8',
89 }
90
91 response = self._download_json(
92 post_url, None, note, data=json.dumps(form).encode(),
93 headers=headers, expected_status=400)
94 error = response.get('error_description') or response.get('error_code')
95 if error:
96 fail(error)
97
98 if 'Authenticated successfully' in response.get('message', ''):
99 return None, None
100
101 redirect_url = urljoin(
102 post_url,
103 response.get('redirect') or response['redirect_path'])
104 return self._download_webpage_handle(
105 redirect_url, None, 'Downloading login redirect page',
106 headers=headers)
107
108 login_page, handle = self._download_webpage_handle(
109 self._LOGIN_FORM_URL, None, 'Downloading login page')
110
111 # Some TOR nodes and public proxies are blocked completely
112 if 'blacklist_message' in login_page:
113 fail(clean_html(login_page))
114
115 redirect_page, handle = login_step(
116 login_page, handle, 'Logging in', {
117 'username': username,
118 'password': password,
119 'client_id': self._CLIENT_ID,
120 })
121
122 # Successful login
123 if not redirect_page:
124 return
125
126 if re.search(r'(?i)<form[^>]+id="two-factor-submit"', redirect_page) is not None:
127 # TODO: Add mechanism to request an SMS or phone call
128 tfa_token = self._get_tfa_info('two-factor authentication token')
129 login_step(redirect_page, handle, 'Submitting TFA token', {
130 'authy_token': tfa_token,
131 'remember_2fa': 'true',
132 })
133
134 def _prefer_source(self, formats):
135 try:
136 source = next(f for f in formats if f['format_id'] == 'Source')
137 source['quality'] = 10
138 except StopIteration:
139 for f in formats:
140 if '/chunked/' in f['url']:
141 f.update({
142 'quality': 10,
143 'format_note': 'Source',
144 })
145 self._sort_formats(formats)
146
147
148 class TwitchItemBaseIE(TwitchBaseIE):
149 def _download_info(self, item, item_id):
150 return self._extract_info(self._call_api(
151 'kraken/videos/%s%s' % (item, item_id), item_id,
152 'Downloading %s info JSON' % self._ITEM_TYPE))
153
154 def _extract_media(self, item_id):
155 info = self._download_info(self._ITEM_SHORTCUT, item_id)
156 response = self._call_api(
157 'api/videos/%s%s' % (self._ITEM_SHORTCUT, item_id), item_id,
158 'Downloading %s playlist JSON' % self._ITEM_TYPE)
159 entries = []
160 chunks = response['chunks']
161 qualities = list(chunks.keys())
162 for num, fragment in enumerate(zip(*chunks.values()), start=1):
163 formats = []
164 for fmt_num, fragment_fmt in enumerate(fragment):
165 format_id = qualities[fmt_num]
166 fmt = {
167 'url': fragment_fmt['url'],
168 'format_id': format_id,
169 'quality': 1 if format_id == 'live' else 0,
170 }
171 m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
172 if m:
173 fmt['height'] = int(m.group('height'))
174 formats.append(fmt)
175 self._sort_formats(formats)
176 entry = dict(info)
177 entry['id'] = '%s_%d' % (entry['id'], num)
178 entry['title'] = '%s part %d' % (entry['title'], num)
179 entry['formats'] = formats
180 entries.append(entry)
181 return self.playlist_result(entries, info['id'], info['title'])
182
183 def _extract_info(self, info):
184 status = info.get('status')
185 if status == 'recording':
186 is_live = True
187 elif status == 'recorded':
188 is_live = False
189 else:
190 is_live = None
191 return {
192 'id': info['_id'],
193 'title': info.get('title') or 'Untitled Broadcast',
194 'description': info.get('description'),
195 'duration': int_or_none(info.get('length')),
196 'thumbnail': info.get('preview'),
197 'uploader': info.get('channel', {}).get('display_name'),
198 'uploader_id': info.get('channel', {}).get('name'),
199 'timestamp': parse_iso8601(info.get('recorded_at')),
200 'view_count': int_or_none(info.get('views')),
201 'is_live': is_live,
202 }
203
204 def _real_extract(self, url):
205 return self._extract_media(self._match_id(url))
206
207
208 class TwitchVideoIE(TwitchItemBaseIE):
209 IE_NAME = 'twitch:video'
210 _VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
211 _ITEM_TYPE = 'video'
212 _ITEM_SHORTCUT = 'a'
213
214 _TEST = {
215 'url': 'http://www.twitch.tv/riotgames/b/577357806',
216 'info_dict': {
217 'id': 'a577357806',
218 'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
219 },
220 'playlist_mincount': 12,
221 'skip': 'HTTP Error 404: Not Found',
222 }
223
224
225 class TwitchChapterIE(TwitchItemBaseIE):
226 IE_NAME = 'twitch:chapter'
227 _VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
228 _ITEM_TYPE = 'chapter'
229 _ITEM_SHORTCUT = 'c'
230
231 _TESTS = [{
232 'url': 'http://www.twitch.tv/acracingleague/c/5285812',
233 'info_dict': {
234 'id': 'c5285812',
235 'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
236 },
237 'playlist_mincount': 3,
238 'skip': 'HTTP Error 404: Not Found',
239 }, {
240 'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
241 'only_matching': True,
242 }]
243
244
245 class TwitchVodIE(TwitchItemBaseIE):
246 IE_NAME = 'twitch:vod'
247 _VALID_URL = r'''(?x)
248 https?://
249 (?:
250 (?:(?:www|go|m)\.)?twitch\.tv/(?:[^/]+/v(?:ideo)?|videos)/|
251 player\.twitch\.tv/\?.*?\bvideo=v
252 )
253 (?P<id>\d+)
254 '''
255 _ITEM_TYPE = 'vod'
256 _ITEM_SHORTCUT = 'v'
257
258 _TESTS = [{
259 'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s',
260 'info_dict': {
261 'id': 'v6528877',
262 'ext': 'mp4',
263 'title': 'LCK Summer Split - Week 6 Day 1',
264 'thumbnail': r're:^https?://.*\.jpg$',
265 'duration': 17208,
266 'timestamp': 1435131709,
267 'upload_date': '20150624',
268 'uploader': 'Riot Games',
269 'uploader_id': 'riotgames',
270 'view_count': int,
271 'start_time': 310,
272 },
273 'params': {
274 # m3u8 download
275 'skip_download': True,
276 },
277 }, {
278 # Untitled broadcast (title is None)
279 'url': 'http://www.twitch.tv/belkao_o/v/11230755',
280 'info_dict': {
281 'id': 'v11230755',
282 'ext': 'mp4',
283 'title': 'Untitled Broadcast',
284 'thumbnail': r're:^https?://.*\.jpg$',
285 'duration': 1638,
286 'timestamp': 1439746708,
287 'upload_date': '20150816',
288 'uploader': 'BelkAO_o',
289 'uploader_id': 'belkao_o',
290 'view_count': int,
291 },
292 'params': {
293 # m3u8 download
294 'skip_download': True,
295 },
296 'skip': 'HTTP Error 404: Not Found',
297 }, {
298 'url': 'http://player.twitch.tv/?t=5m10s&video=v6528877',
299 'only_matching': True,
300 }, {
301 'url': 'https://www.twitch.tv/videos/6528877',
302 'only_matching': True,
303 }, {
304 'url': 'https://m.twitch.tv/beagsandjam/v/247478721',
305 'only_matching': True,
306 }, {
307 'url': 'https://www.twitch.tv/northernlion/video/291940395',
308 'only_matching': True,
309 }]
310
311 def _real_extract(self, url):
312 item_id = self._match_id(url)
313
314 info = self._download_info(self._ITEM_SHORTCUT, item_id)
315 access_token = self._call_api(
316 'api/vods/%s/access_token' % item_id, item_id,
317 'Downloading %s access token' % self._ITEM_TYPE)
318
319 formats = self._extract_m3u8_formats(
320 '%s/vod/%s.m3u8?%s' % (
321 self._USHER_BASE, item_id,
322 compat_urllib_parse_urlencode({
323 'allow_source': 'true',
324 'allow_audio_only': 'true',
325 'allow_spectre': 'true',
326 'player': 'twitchweb',
327 'nauth': access_token['token'],
328 'nauthsig': access_token['sig'],
329 })),
330 item_id, 'mp4', entry_protocol='m3u8_native')
331
332 self._prefer_source(formats)
333 info['formats'] = formats
334
335 parsed_url = compat_urllib_parse_urlparse(url)
336 query = compat_parse_qs(parsed_url.query)
337 if 't' in query:
338 info['start_time'] = parse_duration(query['t'][0])
339
340 if info.get('timestamp') is not None:
341 info['subtitles'] = {
342 'rechat': [{
343 'url': update_url_query(
344 'https://rechat.twitch.tv/rechat-messages', {
345 'video_id': 'v%s' % item_id,
346 'start': info['timestamp'],
347 }),
348 'ext': 'json',
349 }],
350 }
351
352 return info
353
354
355 class TwitchPlaylistBaseIE(TwitchBaseIE):
356 _PLAYLIST_PATH = 'kraken/channels/%s/videos/?offset=%d&limit=%d'
357 _PAGE_LIMIT = 100
358
359 def _extract_playlist(self, channel_id):
360 info = self._call_api(
361 'kraken/channels/%s' % channel_id,
362 channel_id, 'Downloading channel info JSON')
363 channel_name = info.get('display_name') or info.get('name')
364 entries = []
365 offset = 0
366 limit = self._PAGE_LIMIT
367 broken_paging_detected = False
368 counter_override = None
369 for counter in itertools.count(1):
370 response = self._call_api(
371 self._PLAYLIST_PATH % (channel_id, offset, limit),
372 channel_id,
373 'Downloading %s JSON page %s'
374 % (self._PLAYLIST_TYPE, counter_override or counter))
375 page_entries = self._extract_playlist_page(response)
376 if not page_entries:
377 break
378 total = int_or_none(response.get('_total'))
379 # Since the beginning of March 2016 twitch's paging mechanism
380 # is completely broken on the twitch side. It simply ignores
381 # a limit and returns the whole offset number of videos.
382 # Working around by just requesting all videos at once.
383 # Upd: pagination bug was fixed by twitch on 15.03.2016.
384 if not broken_paging_detected and total and len(page_entries) > limit:
385 self.report_warning(
386 'Twitch pagination is broken on twitch side, requesting all videos at once',
387 channel_id)
388 broken_paging_detected = True
389 offset = total
390 counter_override = '(all at once)'
391 continue
392 entries.extend(page_entries)
393 if broken_paging_detected or total and len(page_entries) >= total:
394 break
395 offset += limit
396 return self.playlist_result(
397 [self._make_url_result(entry) for entry in orderedSet(entries)],
398 channel_id, channel_name)
399
400 def _make_url_result(self, url):
401 try:
402 video_id = 'v%s' % TwitchVodIE._match_id(url)
403 return self.url_result(url, TwitchVodIE.ie_key(), video_id=video_id)
404 except AssertionError:
405 return self.url_result(url)
406
407 def _extract_playlist_page(self, response):
408 videos = response.get('videos')
409 return [video['url'] for video in videos] if videos else []
410
411 def _real_extract(self, url):
412 return self._extract_playlist(self._match_id(url))
413
414
415 class TwitchProfileIE(TwitchPlaylistBaseIE):
416 IE_NAME = 'twitch:profile'
417 _VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
418 _PLAYLIST_TYPE = 'profile'
419
420 _TESTS = [{
421 'url': 'http://www.twitch.tv/vanillatv/profile',
422 'info_dict': {
423 'id': 'vanillatv',
424 'title': 'VanillaTV',
425 },
426 'playlist_mincount': 412,
427 }, {
428 'url': 'http://m.twitch.tv/vanillatv/profile',
429 'only_matching': True,
430 }]
431
432
433 class TwitchVideosBaseIE(TwitchPlaylistBaseIE):
434 _VALID_URL_VIDEOS_BASE = r'%s/(?P<id>[^/]+)/videos' % TwitchBaseIE._VALID_URL_BASE
435 _VALID_URL_VIDEOS_FILTERS = r'\?(?:.*?[&;])??filter=%s'
436 _PLAYLIST_PATH = TwitchPlaylistBaseIE._PLAYLIST_PATH + '&broadcast_type='
437
438
439 class TwitchAllVideosIE(TwitchVideosBaseIE):
440 IE_NAME = 'twitch:videos:all'
441 _VALID_URL = '%s/?(?:(?:%s)|$)' % (
442 TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE,
443 TwitchVideosBaseIE._VALID_URL_VIDEOS_FILTERS % 'all'
444 )
445 _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive,upload,highlight'
446 _PLAYLIST_TYPE = 'all videos'
447
448 _TESTS = [{
449 'url': 'https://www.twitch.tv/spamfish/videos?filter=all&sort=time',
450 'info_dict': {
451 'id': 'spamfish',
452 'title': 'Spamfish',
453 },
454 'playlist_mincount': 869,
455 }, {
456 'url': 'https://m.twitch.tv/spamfish/videos/',
457 'only_matching': True,
458 }]
459
460
461 class TwitchUploadsIE(TwitchVideosBaseIE):
462 IE_NAME = 'twitch:videos:uploads'
463 _VALID_URL = '%s/?(?:%s)' % (
464 TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE,
465 TwitchVideosBaseIE._VALID_URL_VIDEOS_FILTERS % 'uploads'
466 )
467 _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'upload'
468 _PLAYLIST_TYPE = 'uploads'
469
470 _TESTS = [{
471 'url': 'https://www.twitch.tv/spamfish/videos?filter=uploads&sort=time',
472 'info_dict': {
473 'id': 'spamfish',
474 'title': 'Spamfish',
475 },
476 'playlist_mincount': 0,
477 }]
478
479
480 class TwitchPastBroadcastsIE(TwitchVideosBaseIE):
481 IE_NAME = 'twitch:videos:past-broadcasts'
482 _VALID_URL = '%s/?(?:%s)' % (
483 TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE,
484 TwitchVideosBaseIE._VALID_URL_VIDEOS_FILTERS % 'archives'
485 )
486 _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'archive'
487 _PLAYLIST_TYPE = 'past broadcasts'
488
489 _TESTS = [{
490 'url': 'https://www.twitch.tv/spamfish/videos?filter=archives&sort=time',
491 'info_dict': {
492 'id': 'spamfish',
493 'title': 'Spamfish',
494 },
495 'playlist_mincount': 0,
496 }]
497
498
499 class TwitchHighlightsIE(TwitchVideosBaseIE):
500 IE_NAME = 'twitch:videos:highlights'
501 _VALID_URL = '%s/?(?:%s)' % (
502 TwitchVideosBaseIE._VALID_URL_VIDEOS_BASE,
503 TwitchVideosBaseIE._VALID_URL_VIDEOS_FILTERS % 'highlights'
504 )
505 _PLAYLIST_PATH = TwitchVideosBaseIE._PLAYLIST_PATH + 'highlight'
506 _PLAYLIST_TYPE = 'highlights'
507
508 _TESTS = [{
509 'url': 'https://www.twitch.tv/spamfish/videos?filter=highlights&sort=views',
510 'info_dict': {
511 'id': 'spamfish',
512 'title': 'Spamfish',
513 },
514 'playlist_mincount': 805,
515 }]
516
517
518 class TwitchStreamIE(TwitchBaseIE):
519 IE_NAME = 'twitch:stream'
520 _VALID_URL = r'''(?x)
521 https?://
522 (?:
523 (?:(?:www|go|m)\.)?twitch\.tv/|
524 player\.twitch\.tv/\?.*?\bchannel=
525 )
526 (?P<id>[^/#?]+)
527 '''
528
529 _TESTS = [{
530 'url': 'http://www.twitch.tv/shroomztv',
531 'info_dict': {
532 'id': '12772022048',
533 'display_id': 'shroomztv',
534 'ext': 'mp4',
535 'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
536 'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
537 'is_live': True,
538 'timestamp': 1421928037,
539 'upload_date': '20150122',
540 'uploader': 'ShroomzTV',
541 'uploader_id': 'shroomztv',
542 'view_count': int,
543 },
544 'params': {
545 # m3u8 download
546 'skip_download': True,
547 },
548 }, {
549 'url': 'http://www.twitch.tv/miracle_doto#profile-0',
550 'only_matching': True,
551 }, {
552 'url': 'https://player.twitch.tv/?channel=lotsofs',
553 'only_matching': True,
554 }, {
555 'url': 'https://go.twitch.tv/food',
556 'only_matching': True,
557 }, {
558 'url': 'https://m.twitch.tv/food',
559 'only_matching': True,
560 }]
561
562 @classmethod
563 def suitable(cls, url):
564 return (False
565 if any(ie.suitable(url) for ie in (
566 TwitchVideoIE,
567 TwitchChapterIE,
568 TwitchVodIE,
569 TwitchProfileIE,
570 TwitchAllVideosIE,
571 TwitchUploadsIE,
572 TwitchPastBroadcastsIE,
573 TwitchHighlightsIE,
574 TwitchClipsIE))
575 else super(TwitchStreamIE, cls).suitable(url))
576
577 def _real_extract(self, url):
578 channel_id = self._match_id(url)
579
580 stream = self._call_api(
581 'kraken/streams/%s?stream_type=all' % channel_id, channel_id,
582 'Downloading stream JSON').get('stream')
583
584 if not stream:
585 raise ExtractorError('%s is offline' % channel_id, expected=True)
586
587 # Channel name may be typed if different case than the original channel name
588 # (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
589 # an invalid m3u8 URL. Working around by use of original channel name from stream
590 # JSON and fallback to lowercase if it's not available.
591 channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
592
593 access_token = self._call_api(
594 'api/channels/%s/access_token' % channel_id, channel_id,
595 'Downloading channel access token')
596
597 query = {
598 'allow_source': 'true',
599 'allow_audio_only': 'true',
600 'allow_spectre': 'true',
601 'p': random.randint(1000000, 10000000),
602 'player': 'twitchweb',
603 'segment_preference': '4',
604 'sig': access_token['sig'].encode('utf-8'),
605 'token': access_token['token'].encode('utf-8'),
606 }
607 formats = self._extract_m3u8_formats(
608 '%s/api/channel/hls/%s.m3u8?%s'
609 % (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
610 channel_id, 'mp4')
611 self._prefer_source(formats)
612
613 view_count = stream.get('viewers')
614 timestamp = parse_iso8601(stream.get('created_at'))
615
616 channel = stream['channel']
617 title = self._live_title(channel.get('display_name') or channel.get('name'))
618 description = channel.get('status')
619
620 thumbnails = []
621 for thumbnail_key, thumbnail_url in stream['preview'].items():
622 m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
623 if not m:
624 continue
625 thumbnails.append({
626 'url': thumbnail_url,
627 'width': int(m.group('width')),
628 'height': int(m.group('height')),
629 })
630
631 return {
632 'id': compat_str(stream['_id']),
633 'display_id': channel_id,
634 'title': title,
635 'description': description,
636 'thumbnails': thumbnails,
637 'uploader': channel.get('display_name'),
638 'uploader_id': channel.get('name'),
639 'timestamp': timestamp,
640 'view_count': view_count,
641 'formats': formats,
642 'is_live': True,
643 }
644
645
646 class TwitchClipsIE(TwitchBaseIE):
647 IE_NAME = 'twitch:clips'
648 _VALID_URL = r'https?://(?:clips\.twitch\.tv/(?:[^/]+/)*|(?:www\.)?twitch\.tv/[^/]+/clip/)(?P<id>[^/?#&]+)'
649
650 _TESTS = [{
651 'url': 'https://clips.twitch.tv/FaintLightGullWholeWheat',
652 'md5': '761769e1eafce0ffebfb4089cb3847cd',
653 'info_dict': {
654 'id': '42850523',
655 'ext': 'mp4',
656 'title': 'EA Play 2016 Live from the Novo Theatre',
657 'thumbnail': r're:^https?://.*\.jpg',
658 'timestamp': 1465767393,
659 'upload_date': '20160612',
660 'creator': 'EA',
661 'uploader': 'stereotype_',
662 'uploader_id': '43566419',
663 },
664 }, {
665 # multiple formats
666 'url': 'https://clips.twitch.tv/rflegendary/UninterestedBeeDAESuppy',
667 'only_matching': True,
668 }, {
669 'url': 'https://www.twitch.tv/sergeynixon/clip/StormyThankfulSproutFutureMan',
670 'only_matching': True,
671 }]
672
673 def _real_extract(self, url):
674 video_id = self._match_id(url)
675
676 status = self._download_json(
677 'https://clips.twitch.tv/api/v2/clips/%s/status' % video_id,
678 video_id)
679
680 formats = []
681
682 for option in status['quality_options']:
683 if not isinstance(option, dict):
684 continue
685 source = url_or_none(option.get('source'))
686 if not source:
687 continue
688 formats.append({
689 'url': source,
690 'format_id': option.get('quality'),
691 'height': int_or_none(option.get('quality')),
692 'fps': int_or_none(option.get('frame_rate')),
693 })
694
695 self._sort_formats(formats)
696
697 info = {
698 'formats': formats,
699 }
700
701 clip = self._call_api(
702 'kraken/clips/%s' % video_id, video_id, fatal=False, headers={
703 'Accept': 'application/vnd.twitchtv.v5+json',
704 })
705
706 if clip:
707 quality_key = qualities(('tiny', 'small', 'medium'))
708 thumbnails = []
709 thumbnails_dict = clip.get('thumbnails')
710 if isinstance(thumbnails_dict, dict):
711 for thumbnail_id, thumbnail_url in thumbnails_dict.items():
712 thumbnails.append({
713 'id': thumbnail_id,
714 'url': thumbnail_url,
715 'preference': quality_key(thumbnail_id),
716 })
717
718 info.update({
719 'id': clip.get('tracking_id') or video_id,
720 'title': clip.get('title') or video_id,
721 'duration': float_or_none(clip.get('duration')),
722 'views': int_or_none(clip.get('views')),
723 'timestamp': unified_timestamp(clip.get('created_at')),
724 'thumbnails': thumbnails,
725 'creator': try_get(clip, lambda x: x['broadcaster']['display_name'], compat_str),
726 'uploader': try_get(clip, lambda x: x['curator']['display_name'], compat_str),
727 'uploader_id': try_get(clip, lambda x: x['curator']['id'], compat_str),
728 })
729 else:
730 info.update({
731 'title': video_id,
732 'id': video_id,
733 })
734
735 return info