]>
Commit | Line | Data |
---|---|---|
52ad14ae TT |
1 | # encoding: utf-8 |
2 | ||
3 | import re | |
4 | import socket | |
5 | import xml.etree.ElementTree | |
6 | ||
7 | from .common import InfoExtractor | |
8 | from ..utils import ( | |
9 | compat_http_client, | |
10 | compat_urllib_error, | |
11 | compat_urllib_parse, | |
12 | compat_urllib_request, | |
13 | compat_urlparse, | |
14 | compat_str, | |
15 | ||
16 | ExtractorError, | |
17 | unified_strdate, | |
18 | ) | |
19 | ||
20 | class NiconicoIE(InfoExtractor): | |
21 | IE_NAME = u'niconico' | |
22 | IE_DESC = u'ニコニコ動画' | |
23 | ||
24 | _TEST = { | |
25 | u'url': u'http://www.nicovideo.jp/watch/sm22312215', | |
26 | u'file': u'sm22312215.mp4', | |
27 | u'md5': u'd1a75c0823e2f629128c43e1212760f9', | |
28 | u'info_dict': { | |
29 | u'title': u'Big Buck Bunny', | |
30 | u'uploader': u'takuya0301', | |
31 | u'uploader_id': u'2698420', | |
32 | u'upload_date': u'20131123', | |
33 | u'description': u'(c) copyright 2008, Blender Foundation / www.bigbuckbunny.org', | |
34 | }, | |
35 | u'params': { | |
36 | u'username': u'ydl.niconico@gmail.com', | |
37 | u'password': u'youtube-dl', | |
38 | }, | |
39 | } | |
40 | ||
41 | _VALID_URL = r'^(?:https?://)?(?:www\.)?nicovideo\.jp/watch/([a-z][a-z][0-9]+)(?:.*)$' | |
42 | _LOGIN_URL = 'https://secure.nicovideo.jp/secure/login' | |
43 | _NETRC_MACHINE = 'niconico' | |
44 | # If True it will raise an error if no login info is provided | |
45 | _LOGIN_REQUIRED = True | |
46 | ||
47 | def _real_initialize(self): | |
48 | self._login() | |
49 | ||
50 | def _login(self): | |
51 | (username, password) = self._get_login_info() | |
52 | # No authentication to be performed | |
53 | if username is None: | |
54 | if self._LOGIN_REQUIRED: | |
55 | raise ExtractorError(u'No login info available, needed for using %s.' % self.IE_NAME, expected=True) | |
56 | return False | |
57 | ||
58 | # Log in | |
59 | login_form_strs = { | |
60 | u'mail': username, | |
61 | u'password': password, | |
62 | } | |
63 | # Convert to UTF-8 *before* urlencode because Python 2.x's urlencode | |
64 | # chokes on unicode | |
65 | login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k,v in login_form_strs.items()) | |
66 | login_data = compat_urllib_parse.urlencode(login_form).encode('ascii') | |
67 | request = compat_urllib_request.Request(self._LOGIN_URL, login_data) | |
68 | try: | |
69 | self.report_login() | |
70 | login_results = compat_urllib_request.urlopen(request).read().decode('utf-8') | |
71 | if re.search(r'(?i)<h1 class="mb8p4">Log in error</h1>', login_results) is not None: | |
72 | self._downloader.report_warning(u'unable to log in: bad username or password') | |
73 | return False | |
74 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
75 | self._downloader.report_warning(u'unable to log in: %s' % compat_str(err)) | |
76 | return False | |
77 | return True | |
78 | ||
79 | def _real_extract(self, url): | |
80 | video_id = self._extract_id(url) | |
81 | ||
82 | # Get video webpage | |
83 | self.report_video_webpage_download(video_id) | |
84 | url = 'http://www.nicovideo.jp/watch/' + video_id | |
85 | request = compat_urllib_request.Request(url) | |
86 | try: | |
87 | video_webpage = compat_urllib_request.urlopen(request).read() | |
88 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
89 | raise ExtractorError(u'Unable to download video webpage: %s' % compat_str(err)) | |
90 | ||
91 | # Get video info | |
92 | self.report_video_info_webpage_download(video_id) | |
93 | url = 'http://ext.nicovideo.jp/api/getthumbinfo/' + video_id | |
94 | request = compat_urllib_request.Request(url) | |
95 | try: | |
96 | video_info_webpage = compat_urllib_request.urlopen(request).read() | |
97 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
98 | raise ExtractorError(u'Unable to download video info webpage: %s' % compat_str(err)) | |
99 | ||
100 | # Get flv info | |
101 | self.report_flv_info_webpage_download(video_id) | |
102 | url = 'http://flapi.nicovideo.jp/api/getflv?v=' + video_id | |
103 | request = compat_urllib_request.Request(url) | |
104 | try: | |
105 | flv_info_webpage = compat_urllib_request.urlopen(request).read() | |
106 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
107 | raise ExtractorError(u'Unable to download flv info webpage: %s' % compat_str(err)) | |
108 | ||
109 | # Start extracting information | |
110 | self.report_information_extraction(video_id) | |
111 | video_info = xml.etree.ElementTree.fromstring(video_info_webpage) | |
112 | ||
113 | # url | |
114 | video_real_url = compat_urlparse.parse_qs(flv_info_webpage.decode('utf-8'))['url'][0] | |
115 | ||
116 | # title | |
117 | video_title = video_info.find('.//title').text | |
118 | ||
119 | # ext | |
120 | video_extension = video_info.find('.//movie_type').text | |
121 | ||
122 | # format | |
123 | video_format = video_extension.upper() | |
124 | ||
125 | # thumbnail | |
126 | video_thumbnail = video_info.find('.//thumbnail_url').text | |
127 | ||
128 | # description | |
129 | video_description = video_info.find('.//description').text | |
130 | ||
131 | # uploader_id | |
132 | video_uploader_id = video_info.find('.//user_id').text | |
133 | ||
134 | # uploader | |
135 | url = 'http://seiga.nicovideo.jp/api/user/info?id=' + video_uploader_id | |
136 | request = compat_urllib_request.Request(url) | |
137 | try: | |
138 | user_info_webpage = compat_urllib_request.urlopen(request).read() | |
139 | except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: | |
140 | self._downloader.report_warning(u'Unable to download user info webpage: %s' % compat_str(err)) | |
141 | ||
142 | user_info = xml.etree.ElementTree.fromstring(user_info_webpage) | |
143 | video_uploader = user_info.find('.//nickname').text | |
144 | ||
145 | # uploder_date | |
146 | video_upload_date = unified_strdate(video_info.find('.//first_retrieve').text.split('+')[0]) | |
147 | ||
148 | # view_count | |
149 | video_view_count = video_info.find('.//view_counter').text | |
150 | ||
151 | # webpage_url | |
152 | video_webpage_url = video_info.find('.//watch_url').text | |
153 | ||
154 | return { | |
155 | 'id': video_id, | |
156 | 'url': video_real_url, | |
157 | 'title': video_title, | |
158 | 'ext': video_extension, | |
159 | 'format': video_format, | |
160 | 'thumbnail': video_thumbnail, | |
161 | 'description': video_description, | |
162 | 'uploader': video_uploader, | |
163 | 'upload_date': video_upload_date, | |
164 | 'uploader_id': video_uploader_id, | |
165 | 'view_count': video_view_count, | |
166 | 'webpage_url': video_webpage_url, | |
167 | } | |
168 | ||
169 | def _extract_id(self, url): | |
170 | mobj = re.match(self._VALID_URL, url) | |
171 | if mobj is None: | |
172 | raise ExtractorError(u'Invalid URL: %s' % url) | |
173 | video_id = mobj.group(1) | |
174 | return video_id | |
175 | ||
176 | def report_video_webpage_download(self, video_id): | |
177 | """Report attempt to download video webpage.""" | |
178 | self.to_screen(u'%s: Downloading video webpage' % video_id) | |
179 | ||
180 | def report_video_info_webpage_download(self, video_id): | |
181 | """Report attempt to download video info webpage.""" | |
182 | self.to_screen(u'%s: Downloading video info webpage' % video_id) | |
183 | ||
184 | def report_flv_info_webpage_download(self, video_id): | |
185 | """Report attempt to download flv info webpage.""" | |
186 | self.to_screen(u'%s: Downloading flv info webpage' % video_id) | |
187 | ||
188 | def report_information_extraction(self, video_id): | |
189 | """Report attempt to extract video information.""" | |
190 | self.to_screen(u'%s: Extracting video information' % video_id) |