]> jfr.im git - yt-dlp.git/blob - youtube_dl/extractor/twitch.py
Merge branch 'master' of github.com:rg3/youtube-dl
[yt-dlp.git] / youtube_dl / extractor / twitch.py
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import itertools
5 import re
6 import random
7
8 from .common import InfoExtractor
9 from ..compat import (
10 compat_str,
11 compat_urllib_parse,
12 compat_urllib_request,
13 )
14 from ..utils import (
15 ExtractorError,
16 parse_iso8601,
17 )
18
19
20 class TwitchBaseIE(InfoExtractor):
21 _VALID_URL_BASE = r'https?://(?:www\.)?twitch\.tv'
22
23 _API_BASE = 'https://api.twitch.tv'
24 _USHER_BASE = 'http://usher.twitch.tv'
25 _LOGIN_URL = 'https://secure.twitch.tv/login'
26 _LOGIN_POST_URL = 'https://passport.twitch.tv/authorize'
27 _NETRC_MACHINE = 'twitch'
28
29 def _handle_error(self, response):
30 if not isinstance(response, dict):
31 return
32 error = response.get('error')
33 if error:
34 raise ExtractorError(
35 '%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
36 expected=True)
37
38 def _download_json(self, url, video_id, note='Downloading JSON metadata'):
39 headers = {
40 'Referer': 'http://api.twitch.tv/crossdomain/receiver.html?v=2',
41 'X-Requested-With': 'XMLHttpRequest',
42 }
43 for cookie in self._downloader.cookiejar:
44 if cookie.name == 'api_token':
45 headers['Twitch-Api-Token'] = cookie.value
46 request = compat_urllib_request.Request(url, headers=headers)
47 response = super(TwitchBaseIE, self)._download_json(request, video_id, note)
48 self._handle_error(response)
49 return response
50
51 def _real_initialize(self):
52 self._login()
53
54 def _login(self):
55 (username, password) = self._get_login_info()
56 if username is None:
57 return
58
59 login_page = self._download_webpage(
60 self._LOGIN_URL, None, 'Downloading login page')
61
62 login_form = dict(re.findall(
63 r'<input\s+type="hidden"\s+name="([^"]+)"\s+(?:id="[^"]+"\s+)?value="([^"]*)"',
64 login_page))
65
66 login_form.update({
67 'login': username,
68 'password': password,
69 })
70
71 request = compat_urllib_request.Request(
72 self._LOGIN_POST_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
73 request.add_header('Referer', self._LOGIN_URL)
74 response = self._download_webpage(
75 request, None, 'Logging in as %s' % username)
76
77 m = re.search(
78 r"id=([\"'])login_error_message\1[^>]*>(?P<msg>[^<]+)", response)
79 if m:
80 raise ExtractorError(
81 'Unable to login: %s' % m.group('msg').strip(), expected=True)
82
83 def _prefer_source(self, formats):
84 try:
85 source = next(f for f in formats if f['format_id'] == 'Source')
86 source['preference'] = 10
87 except StopIteration:
88 pass # No Source stream present
89 self._sort_formats(formats)
90
91
92 class TwitchItemBaseIE(TwitchBaseIE):
93 def _download_info(self, item, item_id):
94 return self._extract_info(self._download_json(
95 '%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
96 'Downloading %s info JSON' % self._ITEM_TYPE))
97
98 def _extract_media(self, item_id):
99 info = self._download_info(self._ITEM_SHORTCUT, item_id)
100 response = self._download_json(
101 '%s/api/videos/%s%s' % (self._API_BASE, self._ITEM_SHORTCUT, item_id), item_id,
102 'Downloading %s playlist JSON' % self._ITEM_TYPE)
103 entries = []
104 chunks = response['chunks']
105 qualities = list(chunks.keys())
106 for num, fragment in enumerate(zip(*chunks.values()), start=1):
107 formats = []
108 for fmt_num, fragment_fmt in enumerate(fragment):
109 format_id = qualities[fmt_num]
110 fmt = {
111 'url': fragment_fmt['url'],
112 'format_id': format_id,
113 'quality': 1 if format_id == 'live' else 0,
114 }
115 m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
116 if m:
117 fmt['height'] = int(m.group('height'))
118 formats.append(fmt)
119 self._sort_formats(formats)
120 entry = dict(info)
121 entry['id'] = '%s_%d' % (entry['id'], num)
122 entry['title'] = '%s part %d' % (entry['title'], num)
123 entry['formats'] = formats
124 entries.append(entry)
125 return self.playlist_result(entries, info['id'], info['title'])
126
127 def _extract_info(self, info):
128 return {
129 'id': info['_id'],
130 'title': info['title'],
131 'description': info['description'],
132 'duration': info['length'],
133 'thumbnail': info['preview'],
134 'uploader': info['channel']['display_name'],
135 'uploader_id': info['channel']['name'],
136 'timestamp': parse_iso8601(info['recorded_at']),
137 'view_count': info['views'],
138 }
139
140 def _real_extract(self, url):
141 return self._extract_media(self._match_id(url))
142
143
144 class TwitchVideoIE(TwitchItemBaseIE):
145 IE_NAME = 'twitch:video'
146 _VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
147 _ITEM_TYPE = 'video'
148 _ITEM_SHORTCUT = 'a'
149
150 _TEST = {
151 'url': 'http://www.twitch.tv/riotgames/b/577357806',
152 'info_dict': {
153 'id': 'a577357806',
154 'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
155 },
156 'playlist_mincount': 12,
157 }
158
159
160 class TwitchChapterIE(TwitchItemBaseIE):
161 IE_NAME = 'twitch:chapter'
162 _VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
163 _ITEM_TYPE = 'chapter'
164 _ITEM_SHORTCUT = 'c'
165
166 _TESTS = [{
167 'url': 'http://www.twitch.tv/acracingleague/c/5285812',
168 'info_dict': {
169 'id': 'c5285812',
170 'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
171 },
172 'playlist_mincount': 3,
173 }, {
174 'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
175 'only_matching': True,
176 }]
177
178
179 class TwitchVodIE(TwitchItemBaseIE):
180 IE_NAME = 'twitch:vod'
181 _VALID_URL = r'%s/[^/]+/v/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
182 _ITEM_TYPE = 'vod'
183 _ITEM_SHORTCUT = 'v'
184
185 _TEST = {
186 'url': 'http://www.twitch.tv/riotgames/v/6528877',
187 'info_dict': {
188 'id': 'v6528877',
189 'ext': 'mp4',
190 'title': 'LCK Summer Split - Week 6 Day 1',
191 'thumbnail': 're:^https?://.*\.jpg$',
192 'duration': 17208,
193 'timestamp': 1435131709,
194 'upload_date': '20150624',
195 'uploader': 'Riot Games',
196 'uploader_id': 'riotgames',
197 'view_count': int,
198 },
199 'params': {
200 # m3u8 download
201 'skip_download': True,
202 },
203 }
204
205 def _real_extract(self, url):
206 item_id = self._match_id(url)
207 info = self._download_info(self._ITEM_SHORTCUT, item_id)
208 access_token = self._download_json(
209 '%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
210 'Downloading %s access token' % self._ITEM_TYPE)
211 formats = self._extract_m3u8_formats(
212 '%s/vod/%s?nauth=%s&nauthsig=%s&allow_source=true'
213 % (self._USHER_BASE, item_id, access_token['token'], access_token['sig']),
214 item_id, 'mp4')
215 self._prefer_source(formats)
216 info['formats'] = formats
217 return info
218
219
220 class TwitchPlaylistBaseIE(TwitchBaseIE):
221 _PLAYLIST_URL = '%s/kraken/channels/%%s/videos/?offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
222 _PAGE_LIMIT = 100
223
224 def _extract_playlist(self, channel_id):
225 info = self._download_json(
226 '%s/kraken/channels/%s' % (self._API_BASE, channel_id),
227 channel_id, 'Downloading channel info JSON')
228 channel_name = info.get('display_name') or info.get('name')
229 entries = []
230 offset = 0
231 limit = self._PAGE_LIMIT
232 for counter in itertools.count(1):
233 response = self._download_json(
234 self._PLAYLIST_URL % (channel_id, offset, limit),
235 channel_id, 'Downloading %s videos JSON page %d' % (self._PLAYLIST_TYPE, counter))
236 page_entries = self._extract_playlist_page(response)
237 if not page_entries:
238 break
239 entries.extend(page_entries)
240 offset += limit
241 return self.playlist_result(
242 [self.url_result(entry) for entry in set(entries)],
243 channel_id, channel_name)
244
245 def _extract_playlist_page(self, response):
246 videos = response.get('videos')
247 return [video['url'] for video in videos] if videos else []
248
249 def _real_extract(self, url):
250 return self._extract_playlist(self._match_id(url))
251
252
253 class TwitchProfileIE(TwitchPlaylistBaseIE):
254 IE_NAME = 'twitch:profile'
255 _VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
256 _PLAYLIST_TYPE = 'profile'
257
258 _TEST = {
259 'url': 'http://www.twitch.tv/vanillatv/profile',
260 'info_dict': {
261 'id': 'vanillatv',
262 'title': 'VanillaTV',
263 },
264 'playlist_mincount': 412,
265 }
266
267
268 class TwitchPastBroadcastsIE(TwitchPlaylistBaseIE):
269 IE_NAME = 'twitch:past_broadcasts'
270 _VALID_URL = r'%s/(?P<id>[^/]+)/profile/past_broadcasts/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
271 _PLAYLIST_URL = TwitchPlaylistBaseIE._PLAYLIST_URL + '&broadcasts=true'
272 _PLAYLIST_TYPE = 'past broadcasts'
273
274 _TEST = {
275 'url': 'http://www.twitch.tv/spamfish/profile/past_broadcasts',
276 'info_dict': {
277 'id': 'spamfish',
278 'title': 'Spamfish',
279 },
280 'playlist_mincount': 54,
281 }
282
283
284 class TwitchBookmarksIE(TwitchPlaylistBaseIE):
285 IE_NAME = 'twitch:bookmarks'
286 _VALID_URL = r'%s/(?P<id>[^/]+)/profile/bookmarks/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
287 _PLAYLIST_URL = '%s/api/bookmark/?user=%%s&offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
288 _PLAYLIST_TYPE = 'bookmarks'
289
290 _TEST = {
291 'url': 'http://www.twitch.tv/ognos/profile/bookmarks',
292 'info_dict': {
293 'id': 'ognos',
294 'title': 'Ognos',
295 },
296 'playlist_mincount': 3,
297 }
298
299 def _extract_playlist_page(self, response):
300 entries = []
301 for bookmark in response.get('bookmarks', []):
302 video = bookmark.get('video')
303 if not video:
304 continue
305 entries.append(video['url'])
306 return entries
307
308
309 class TwitchStreamIE(TwitchBaseIE):
310 IE_NAME = 'twitch:stream'
311 _VALID_URL = r'%s/(?P<id>[^/]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
312
313 _TEST = {
314 'url': 'http://www.twitch.tv/shroomztv',
315 'info_dict': {
316 'id': '12772022048',
317 'display_id': 'shroomztv',
318 'ext': 'mp4',
319 'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
320 'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
321 'is_live': True,
322 'timestamp': 1421928037,
323 'upload_date': '20150122',
324 'uploader': 'ShroomzTV',
325 'uploader_id': 'shroomztv',
326 'view_count': int,
327 },
328 'params': {
329 # m3u8 download
330 'skip_download': True,
331 },
332 }
333
334 def _real_extract(self, url):
335 channel_id = self._match_id(url)
336
337 stream = self._download_json(
338 '%s/kraken/streams/%s' % (self._API_BASE, channel_id), channel_id,
339 'Downloading stream JSON').get('stream')
340
341 # Fallback on profile extraction if stream is offline
342 if not stream:
343 return self.url_result(
344 'http://www.twitch.tv/%s/profile' % channel_id,
345 'TwitchProfile', channel_id)
346
347 access_token = self._download_json(
348 '%s/api/channels/%s/access_token' % (self._API_BASE, channel_id), channel_id,
349 'Downloading channel access token')
350
351 query = {
352 'allow_source': 'true',
353 'p': random.randint(1000000, 10000000),
354 'player': 'twitchweb',
355 'segment_preference': '4',
356 'sig': access_token['sig'].encode('utf-8'),
357 'token': access_token['token'].encode('utf-8'),
358 }
359 formats = self._extract_m3u8_formats(
360 '%s/api/channel/hls/%s.m3u8?%s'
361 % (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query)),
362 channel_id, 'mp4')
363 self._prefer_source(formats)
364
365 view_count = stream.get('viewers')
366 timestamp = parse_iso8601(stream.get('created_at'))
367
368 channel = stream['channel']
369 title = self._live_title(channel.get('display_name') or channel.get('name'))
370 description = channel.get('status')
371
372 thumbnails = []
373 for thumbnail_key, thumbnail_url in stream['preview'].items():
374 m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
375 if not m:
376 continue
377 thumbnails.append({
378 'url': thumbnail_url,
379 'width': int(m.group('width')),
380 'height': int(m.group('height')),
381 })
382
383 return {
384 'id': compat_str(stream['_id']),
385 'display_id': channel_id,
386 'title': title,
387 'description': description,
388 'thumbnails': thumbnails,
389 'uploader': channel.get('display_name'),
390 'uploader_id': channel.get('name'),
391 'timestamp': timestamp,
392 'view_count': view_count,
393 'formats': formats,
394 'is_live': True,
395 }