]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/twitch.py
Merge pull request #8611 from remitamine/ffmpegfd
[yt-dlp.git] / youtube_dl / extractor / twitch.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import itertools
5 import re
6 import random
7
8 from .common import InfoExtractor
9 from ..compat import (
10 compat_parse_qs,
11 compat_str,
12 compat_urllib_parse,
13 compat_urllib_parse_urlparse,
14 compat_urlparse,
15 )
16 from ..utils import (
17 encode_dict,
18 ExtractorError,
19 int_or_none,
20 orderedSet,
21 parse_duration,
22 parse_iso8601,
23 sanitized_Request,
24 )
25
26
27 class TwitchBaseIE(InfoExtractor):
28 _VALID_URL_BASE = r'https?://(?:www\.)?twitch\.tv'
29
30 _API_BASE = 'https://api.twitch.tv'
31 _USHER_BASE = 'http://usher.twitch.tv'
32 _LOGIN_URL = 'http://www.twitch.tv/login'
33 _NETRC_MACHINE = 'twitch'
34
35 def _handle_error(self, response):
36 if not isinstance(response, dict):
37 return
38 error = response.get('error')
39 if error:
40 raise ExtractorError(
41 '%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
42 expected=True)
43
44 def _download_json(self, url, video_id, note='Downloading JSON metadata'):
45 headers = {
46 'Referer': 'http://api.twitch.tv/crossdomain/receiver.html?v=2',
47 'X-Requested-With': 'XMLHttpRequest',
48 }
49 for cookie in self._downloader.cookiejar:
50 if cookie.name == 'api_token':
51 headers['Twitch-Api-Token'] = cookie.value
52 request = sanitized_Request(url, headers=headers)
53 response = super(TwitchBaseIE, self)._download_json(request, video_id, note)
54 self._handle_error(response)
55 return response
56
57 def _real_initialize(self):
58 self._login()
59
60 def _login(self):
61 (username, password) = self._get_login_info()
62 if username is None:
63 return
64
65 login_page, handle = self._download_webpage_handle(
66 self._LOGIN_URL, None, 'Downloading login page')
67
68 login_form = self._hidden_inputs(login_page)
69
70 login_form.update({
71 'username': username,
72 'password': password,
73 })
74
75 redirect_url = handle.geturl()
76
77 post_url = self._search_regex(
78 r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
79 'post url', default=redirect_url, group='url')
80
81 if not post_url.startswith('http'):
82 post_url = compat_urlparse.urljoin(redirect_url, post_url)
83
84 request = sanitized_Request(
85 post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8'))
86 request.add_header('Referer', redirect_url)
87 response = self._download_webpage(
88 request, None, 'Logging in as %s' % username)
89
90 error_message = self._search_regex(
91 r'<div[^>]+class="subwindow_notice"[^>]*>([^<]+)</div>',
92 response, 'error message', default=None)
93 if error_message:
94 raise ExtractorError(
95 'Unable to login. Twitch said: %s' % error_message, expected=True)
96
97 if '>Reset your password<' in response:
98 self.report_warning('Twitch asks you to reset your password, go to https://secure.twitch.tv/reset/submit')
99
100 def _prefer_source(self, formats):
101 try:
102 source = next(f for f in formats if f['format_id'] == 'Source')
103 source['preference'] = 10
104 except StopIteration:
105 pass # No Source stream present
106 self._sort_formats(formats)
107
108
109 class TwitchItemBaseIE(TwitchBaseIE):
110 def _download_info(self, item, item_id):
111 return self._extract_info(self._download_json(
112 '%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
113 'Downloading %s info JSON' % self._ITEM_TYPE))
114
115 def _extract_media(self, item_id):
116 info = self._download_info(self._ITEM_SHORTCUT, item_id)
117 response = self._download_json(
118 '%s/api/videos/%s%s' % (self._API_BASE, self._ITEM_SHORTCUT, item_id), item_id,
119 'Downloading %s playlist JSON' % self._ITEM_TYPE)
120 entries = []
121 chunks = response['chunks']
122 qualities = list(chunks.keys())
123 for num, fragment in enumerate(zip(*chunks.values()), start=1):
124 formats = []
125 for fmt_num, fragment_fmt in enumerate(fragment):
126 format_id = qualities[fmt_num]
127 fmt = {
128 'url': fragment_fmt['url'],
129 'format_id': format_id,
130 'quality': 1 if format_id == 'live' else 0,
131 }
132 m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
133 if m:
134 fmt['height'] = int(m.group('height'))
135 formats.append(fmt)
136 self._sort_formats(formats)
137 entry = dict(info)
138 entry['id'] = '%s_%d' % (entry['id'], num)
139 entry['title'] = '%s part %d' % (entry['title'], num)
140 entry['formats'] = formats
141 entries.append(entry)
142 return self.playlist_result(entries, info['id'], info['title'])
143
144 def _extract_info(self, info):
145 return {
146 'id': info['_id'],
147 'title': info.get('title') or 'Untitled Broadcast',
148 'description': info.get('description'),
149 'duration': int_or_none(info.get('length')),
150 'thumbnail': info.get('preview'),
151 'uploader': info.get('channel', {}).get('display_name'),
152 'uploader_id': info.get('channel', {}).get('name'),
153 'timestamp': parse_iso8601(info.get('recorded_at')),
154 'view_count': int_or_none(info.get('views')),
155 }
156
157 def _real_extract(self, url):
158 return self._extract_media(self._match_id(url))
159
160
161 class TwitchVideoIE(TwitchItemBaseIE):
162 IE_NAME = 'twitch:video'
163 _VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
164 _ITEM_TYPE = 'video'
165 _ITEM_SHORTCUT = 'a'
166
167 _TEST = {
168 'url': 'http://www.twitch.tv/riotgames/b/577357806',
169 'info_dict': {
170 'id': 'a577357806',
171 'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
172 },
173 'playlist_mincount': 12,
174 }
175
176
177 class TwitchChapterIE(TwitchItemBaseIE):
178 IE_NAME = 'twitch:chapter'
179 _VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
180 _ITEM_TYPE = 'chapter'
181 _ITEM_SHORTCUT = 'c'
182
183 _TESTS = [{
184 'url': 'http://www.twitch.tv/acracingleague/c/5285812',
185 'info_dict': {
186 'id': 'c5285812',
187 'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
188 },
189 'playlist_mincount': 3,
190 }, {
191 'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
192 'only_matching': True,
193 }]
194
195
196 class TwitchVodIE(TwitchItemBaseIE):
197 IE_NAME = 'twitch:vod'
198 _VALID_URL = r'%s/[^/]+/v/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
199 _ITEM_TYPE = 'vod'
200 _ITEM_SHORTCUT = 'v'
201
202 _TESTS = [{
203 'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s',
204 'info_dict': {
205 'id': 'v6528877',
206 'ext': 'mp4',
207 'title': 'LCK Summer Split - Week 6 Day 1',
208 'thumbnail': 're:^https?://.*\.jpg$',
209 'duration': 17208,
210 'timestamp': 1435131709,
211 'upload_date': '20150624',
212 'uploader': 'Riot Games',
213 'uploader_id': 'riotgames',
214 'view_count': int,
215 'start_time': 310,
216 },
217 'params': {
218 # m3u8 download
219 'skip_download': True,
220 },
221 }, {
222 # Untitled broadcast (title is None)
223 'url': 'http://www.twitch.tv/belkao_o/v/11230755',
224 'info_dict': {
225 'id': 'v11230755',
226 'ext': 'mp4',
227 'title': 'Untitled Broadcast',
228 'thumbnail': 're:^https?://.*\.jpg$',
229 'duration': 1638,
230 'timestamp': 1439746708,
231 'upload_date': '20150816',
232 'uploader': 'BelkAO_o',
233 'uploader_id': 'belkao_o',
234 'view_count': int,
235 },
236 'params': {
237 # m3u8 download
238 'skip_download': True,
239 },
240 }]
241
242 def _real_extract(self, url):
243 item_id = self._match_id(url)
244
245 info = self._download_info(self._ITEM_SHORTCUT, item_id)
246 access_token = self._download_json(
247 '%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
248 'Downloading %s access token' % self._ITEM_TYPE)
249
250 formats = self._extract_m3u8_formats(
251 '%s/vod/%s?%s' % (
252 self._USHER_BASE, item_id,
253 compat_urllib_parse.urlencode({
254 'allow_source': 'true',
255 'allow_audio_only': 'true',
256 'allow_spectre': 'true',
257 'player': 'twitchweb',
258 'nauth': access_token['token'],
259 'nauthsig': access_token['sig'],
260 })),
261 item_id, 'mp4')
262
263 self._prefer_source(formats)
264 info['formats'] = formats
265
266 parsed_url = compat_urllib_parse_urlparse(url)
267 query = compat_parse_qs(parsed_url.query)
268 if 't' in query:
269 info['start_time'] = parse_duration(query['t'][0])
270
271 return info
272
273
274 class TwitchPlaylistBaseIE(TwitchBaseIE):
275 _PLAYLIST_URL = '%s/kraken/channels/%%s/videos/?offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
276 _PAGE_LIMIT = 100
277
278 def _extract_playlist(self, channel_id):
279 info = self._download_json(
280 '%s/kraken/channels/%s' % (self._API_BASE, channel_id),
281 channel_id, 'Downloading channel info JSON')
282 channel_name = info.get('display_name') or info.get('name')
283 entries = []
284 offset = 0
285 limit = self._PAGE_LIMIT
286 broken_paging_detected = False
287 counter_override = None
288 for counter in itertools.count(1):
289 response = self._download_json(
290 self._PLAYLIST_URL % (channel_id, offset, limit),
291 channel_id,
292 'Downloading %s videos JSON page %s'
293 % (self._PLAYLIST_TYPE, counter_override or counter))
294 page_entries = self._extract_playlist_page(response)
295 if not page_entries:
296 break
297 total = int_or_none(response.get('_total'))
298 # Since the beginning of March 2016 twitch's paging mechanism
299 # is completely broken on the twitch side. It simply ignores
300 # a limit and returns the whole offset number of videos.
301 # Working around by just requesting all videos at once.
302 if not broken_paging_detected and total and len(page_entries) > limit:
303 self.report_warning(
304 'Twitch paging is broken on twitch side, requesting all videos at once',
305 channel_id)
306 broken_paging_detected = True
307 offset = total
308 counter_override = '(all at once)'
309 continue
310 entries.extend(page_entries)
311 if broken_paging_detected or total and len(page_entries) >= total:
312 break
313 offset += limit
314 return self.playlist_result(
315 [self.url_result(entry) for entry in orderedSet(entries)],
316 channel_id, channel_name)
317
318 def _extract_playlist_page(self, response):
319 videos = response.get('videos')
320 return [video['url'] for video in videos] if videos else []
321
322 def _real_extract(self, url):
323 return self._extract_playlist(self._match_id(url))
324
325
326 class TwitchProfileIE(TwitchPlaylistBaseIE):
327 IE_NAME = 'twitch:profile'
328 _VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
329 _PLAYLIST_TYPE = 'profile'
330
331 _TEST = {
332 'url': 'http://www.twitch.tv/vanillatv/profile',
333 'info_dict': {
334 'id': 'vanillatv',
335 'title': 'VanillaTV',
336 },
337 'playlist_mincount': 412,
338 }
339
340
341 class TwitchPastBroadcastsIE(TwitchPlaylistBaseIE):
342 IE_NAME = 'twitch:past_broadcasts'
343 _VALID_URL = r'%s/(?P<id>[^/]+)/profile/past_broadcasts/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
344 _PLAYLIST_URL = TwitchPlaylistBaseIE._PLAYLIST_URL + '&broadcasts=true'
345 _PLAYLIST_TYPE = 'past broadcasts'
346
347 _TEST = {
348 'url': 'http://www.twitch.tv/spamfish/profile/past_broadcasts',
349 'info_dict': {
350 'id': 'spamfish',
351 'title': 'Spamfish',
352 },
353 'playlist_mincount': 54,
354 }
355
356
357 class TwitchBookmarksIE(TwitchPlaylistBaseIE):
358 IE_NAME = 'twitch:bookmarks'
359 _VALID_URL = r'%s/(?P<id>[^/]+)/profile/bookmarks/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
360 _PLAYLIST_URL = '%s/api/bookmark/?user=%%s&offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
361 _PLAYLIST_TYPE = 'bookmarks'
362
363 _TEST = {
364 'url': 'http://www.twitch.tv/ognos/profile/bookmarks',
365 'info_dict': {
366 'id': 'ognos',
367 'title': 'Ognos',
368 },
369 'playlist_mincount': 3,
370 }
371
372 def _extract_playlist_page(self, response):
373 entries = []
374 for bookmark in response.get('bookmarks', []):
375 video = bookmark.get('video')
376 if not video:
377 continue
378 entries.append(video['url'])
379 return entries
380
381
382 class TwitchStreamIE(TwitchBaseIE):
383 IE_NAME = 'twitch:stream'
384 _VALID_URL = r'%s/(?P<id>[^/#?]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
385
386 _TESTS = [{
387 'url': 'http://www.twitch.tv/shroomztv',
388 'info_dict': {
389 'id': '12772022048',
390 'display_id': 'shroomztv',
391 'ext': 'mp4',
392 'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
393 'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
394 'is_live': True,
395 'timestamp': 1421928037,
396 'upload_date': '20150122',
397 'uploader': 'ShroomzTV',
398 'uploader_id': 'shroomztv',
399 'view_count': int,
400 },
401 'params': {
402 # m3u8 download
403 'skip_download': True,
404 },
405 }, {
406 'url': 'http://www.twitch.tv/miracle_doto#profile-0',
407 'only_matching': True,
408 }]
409
410 def _real_extract(self, url):
411 channel_id = self._match_id(url)
412
413 stream = self._download_json(
414 '%s/kraken/streams/%s' % (self._API_BASE, channel_id), channel_id,
415 'Downloading stream JSON').get('stream')
416
417 # Fallback on profile extraction if stream is offline
418 if not stream:
419 return self.url_result(
420 'http://www.twitch.tv/%s/profile' % channel_id,
421 'TwitchProfile', channel_id)
422
423 # Channel name may be typed if different case than the original channel name
424 # (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
425 # an invalid m3u8 URL. Working around by use of original channel name from stream
426 # JSON and fallback to lowercase if it's not available.
427 channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
428
429 access_token = self._download_json(
430 '%s/api/channels/%s/access_token' % (self._API_BASE, channel_id), channel_id,
431 'Downloading channel access token')
432
433 query = {
434 'allow_source': 'true',
435 'allow_audio_only': 'true',
436 'p': random.randint(1000000, 10000000),
437 'player': 'twitchweb',
438 'segment_preference': '4',
439 'sig': access_token['sig'].encode('utf-8'),
440 'token': access_token['token'].encode('utf-8'),
441 }
442 formats = self._extract_m3u8_formats(
443 '%s/api/channel/hls/%s.m3u8?%s'
444 % (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query)),
445 channel_id, 'mp4')
446 self._prefer_source(formats)
447
448 view_count = stream.get('viewers')
449 timestamp = parse_iso8601(stream.get('created_at'))
450
451 channel = stream['channel']
452 title = self._live_title(channel.get('display_name') or channel.get('name'))
453 description = channel.get('status')
454
455 thumbnails = []
456 for thumbnail_key, thumbnail_url in stream['preview'].items():
457 m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
458 if not m:
459 continue
460 thumbnails.append({
461 'url': thumbnail_url,
462 'width': int(m.group('width')),
463 'height': int(m.group('height')),
464 })
465
466 return {
467 'id': compat_str(stream['_id']),
468 'display_id': channel_id,
469 'title': title,
470 'description': description,
471 'thumbnails': thumbnails,
472 'uploader': channel.get('display_name'),
473 'uploader_id': channel.get('name'),
474 'timestamp': timestamp,
475 'view_count': view_count,
476 'formats': formats,
477 'is_live': True,
478 }