]>
Commit | Line | Data |
---|---|---|
061f62da | 1 | # coding: utf-8 |
2 | from __future__ import unicode_literals | |
3 | ||
4 | import hmac | |
5 | from hashlib import sha1 | |
6 | from base64 import b64encode | |
7 | from time import time | |
8 | ||
9 | from .common import InfoExtractor | |
10 | from ..utils import ( | |
11 | ExtractorError, | |
12 | determine_ext | |
13 | ) | |
14 | from ..compat import compat_urllib_parse | |
15 | ||
16 | ||
17 | class VLiveIE(InfoExtractor): | |
18 | IE_NAME = 'vlive' | |
19 | _VALID_URL = r'https?://(?:(www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)' | |
20 | _TEST = { | |
21 | 'url': 'http://m.vlive.tv/video/1326', | |
22 | 'md5': 'cc7314812855ce56de70a06a27314983', | |
23 | 'info_dict': { | |
24 | 'id': '1326', | |
25 | 'ext': 'mp4', | |
26 | 'title': '[V] Girl\'s Day\'s Broadcast', | |
27 | 'creator': 'Girl\'s Day', | |
28 | 'upload_date': '20150817', | |
29 | }, | |
30 | } | |
31 | _SECRET = 'rFkwZet6pqk1vQt6SxxUkAHX7YL3lmqzUMrU4IDusTo4jEBdtOhNfT4BYYAdArwH' | |
32 | ||
33 | def _real_extract(self, url): | |
34 | video_id = self._match_id(url) | |
35 | ||
36 | webpage = self._download_webpage( | |
37 | 'http://m.vlive.tv/video/%s' % video_id, | |
38 | video_id, note='Download video page') | |
39 | ||
40 | title = self._og_search_title(webpage) | |
41 | thumbnail = self._og_search_thumbnail(webpage) | |
42 | creator = self._html_search_regex( | |
43 | r'<span class="name">([^<>]+)</span>', webpage, 'creator') | |
44 | upload_date = self._html_search_regex( | |
45 | r'<span class="time">(\d{4}\.\d{2}\.\d{2})</span>', webpage, | |
46 | 'upload date', default=None, fatal=False) | |
47 | if upload_date: | |
48 | upload_date = upload_date.replace('.', '') | |
49 | ||
50 | url = 'http://global.apis.naver.com/globalV/globalV/vod/%s/playinfo?' % video_id | |
51 | msgpad = {'msgpad': '%.0f' % (time() * 1000)} | |
52 | md = { | |
53 | 'md': b64encode( | |
54 | hmac.new(self._SECRET.encode('ascii'), | |
55 | (url[:255] + msgpad['msgpad']).encode('ascii'), sha1).digest()) | |
56 | } | |
57 | url += '&' + compat_urllib_parse.urlencode(msgpad) + '&' + compat_urllib_parse.urlencode(md) | |
58 | ||
59 | playinfo = self._download_json(url, video_id, 'Downloading video json') | |
60 | ||
61 | if playinfo.get('message', '') != 'success': | |
62 | raise ExtractorError(playinfo['message']) | |
63 | ||
64 | if not playinfo.get('result'): | |
65 | raise ExtractorError('No videos found.') | |
66 | ||
67 | formats = [] | |
68 | for vid in playinfo['result'].get('videos', {}).get('list', []): | |
69 | formats.append({ | |
70 | 'url': vid['source'], | |
71 | 'ext': 'mp4', | |
72 | 'abr': vid.get('bitrate', {}).get('audio'), | |
73 | 'vbr': vid.get('bitrate', {}).get('video'), | |
74 | 'format_id': vid['encodingOption']['name'], | |
75 | 'height': vid.get('height'), | |
76 | 'width': vid.get('width'), | |
77 | }) | |
78 | self._sort_formats(formats) | |
79 | ||
80 | subtitles = {} | |
81 | for caption in playinfo['result'].get('captions', {}).get('list', []): | |
82 | subtitles[caption['language']] = [ | |
83 | {'ext': determine_ext(caption['source'], default_ext='vtt'), | |
84 | 'url': caption['source']}] | |
85 | ||
86 | return { | |
87 | 'id': video_id, | |
88 | 'title': title, | |
89 | 'creator': creator, | |
90 | 'thumbnail': thumbnail, | |
91 | 'formats': formats, | |
92 | 'upload_date': upload_date, | |
93 | 'subtitles': subtitles, | |
94 | } |