]> jfr.im git - erebus.git/blob - modules/urls.py
0171fe4c2bde6772c3a670744c9c501a53a3bd8c
[erebus.git] / modules / urls.py
1 # Erebus IRC bot - Author: Conny Sjoblom
2 # vim: fileencoding=utf-8
3 # URL Checker
4 # This file is released into the public domain; see http://unlicense.org/
5
6 # module info
7 modinfo = {
8 'author': 'Erebus Team',
9 'license': 'public domain',
10 'compatible': [0],
11 'depends': [],
12 'softdeps': [],
13 }
14
15 # http://embed.ly/tools/generator
16
17 # preamble
18 import modlib
19 lib = modlib.modlib(__name__)
20 modstart = lib.modstart
21 modstop = lib.modstop
22
23 # module code
24 import sys
25 if sys.version_info.major < 3:
26 stringbase = basestring
27 import urllib2
28 import urlparse
29 import HTMLParser
30 html = HTMLParser.HTMLParser()
31 from BeautifulSoup import BeautifulSoup
32 else:
33 stringbase = str
34 import urllib.request as urllib2
35 import urllib.parse as urlparse
36 import html
37 from bs4 import BeautifulSoup
38
39 import re, json, datetime
40
41 hostmask_regex = re.compile(r'^(.*)!(.*)@(.*)$')
42
43 def parser_hostmask(hostmask):
44 if isinstance(hostmask, dict):
45 return hostmask
46
47 nick = None
48 user = None
49 host = None
50
51 if hostmask is not None:
52 match = hostmask_regex.match(hostmask)
53
54 if not match:
55 nick = hostmask
56 else:
57 nick = match.group(1)
58 user = match.group(2)
59 host = match.group(3)
60
61 return {
62 'nick': nick,
63 'user': user,
64 'host': host
65 }
66
67 class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
68 def http_error_301(self, req, fp, code, msg, headers):
69 result = urllib2.HTTPRedirectHandler.http_error_301(
70 self, req, fp, code, msg, headers)
71 result.status = code
72 return result
73
74 def http_error_302(self, req, fp, code, msg, headers):
75 result = urllib2.HTTPRedirectHandler.http_error_302(
76 self, req, fp, code, msg, headers)
77 result.status = code
78 return result
79
80 def process_line(line):
81 responses = []
82 num_found = 0
83 limit = lib.parent.cfg.getint('urls', 'limit', 2)
84 for action, group, prefix in regexes:
85 for regex in group:
86 for match in regex.findall(line):
87 if match:
88 num_found += 1
89 if num_found > limit:
90 return responses
91 if isinstance(match, stringbase):
92 resp = action(match)
93 else:
94 resp = action(*match)
95 if resp is not None:
96 responses.append("%s: %s" % (prefix, resp))
97 return responses
98
99 @lib.hooknum("PRIVMSG")
100 def privmsg_hook(bot, textline):
101 user = parser_hostmask(textline[1:textline.find(' ')])
102 chan = textline.split()[2]
103
104 try:
105 line = textline.split(None, 3)[3][1:]
106 except IndexError:
107 line = ''
108
109 responses = process_line(line)
110 if len(responses) > 0:
111 if lib.parent.cfg.getboolean('urls', 'multiline'):
112 for r in responses:
113 bot.msg(chan, r, True)
114 else:
115 bot.msg(chan, ' | '.join(responses), True)
116
117 def unescape(line):
118 return re.sub('\s+', ' ', html.unescape(line))
119
120 def gotspotify(type, track):
121 url = 'http://ws.spotify.com/lookup/1/?uri=spotify:%s:%s' % (type, track)
122 xml = urllib2.urlopen(url).read()
123 soup = BeautifulSoup(xml, convertEntities=BeautifulSoup.HTML_ENTITIES)
124 lookup_type = soup.contents[2].name
125
126 if lookup_type == 'track':
127 name = soup.find('name').string
128 album_name = soup.find('album').find('name').string
129 artist_name = soup.find('artist').find('name').string
130 popularity = soup.find('popularity')
131 if popularity:
132 popularity = float(popularity.string)*100
133 length = float(soup.find('length').string)
134 minutes = int(length)/60
135 seconds = int(length)%60
136
137 return unescape('Track: %s - %s / %s %s:%.2d %2d%%' % (artist_name, name, album_name, minutes, seconds, popularity))
138
139 elif lookup_type == 'album':
140 album_name = soup.find('album').find('name').string
141 artist_name = soup.find('artist').find('name').string
142 released = soup.find('released').string
143 return unescape('Album: %s - %s - %s' % (artist_name, album_name, released))
144
145 else:
146 return 'Unsupported type.'
147
148 def _yt_duration(s):
149 mo = re.match(r'P(\d+D)?T(\d+H)?(\d+M)?(\d+S)?', s)
150 pcs = [x for x in mo.groups() if x]
151 return ''.join(pcs).lower()
152 def _yt_date(s, f):
153 mo = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})\.(\d+)Z', s)
154 return datetime.datetime(*(int(x) for x in mo.groups())).strftime(f)
155 def _yt_round(n):
156 n = float(n)
157 if n >= 10**12:
158 return '%.1ft' % (n/10**12)
159 elif n >= 10**9:
160 return '%.1fb' % (n/10**9)
161 elif n >= 10**6:
162 return '%.1fm' % (n/10**6)
163 elif n >= 10**3:
164 return '%.1fk' % (n/10**3)
165 else:
166 return int(n)
167
168 def gotyoutube(url):
169 url_data = urlparse.urlparse(url)
170 query = urlparse.parse_qs(url_data.query)
171 video = query["v"][0]
172 api_url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails,statistics&id=%s&key=%s' % (video, lib.parent.cfg.get('urls', 'api_key'))
173 try:
174 respdata = urllib2.urlopen(api_url).read()
175 v = json.loads(respdata)
176 v = v['items'][0]
177
178 return unescape(lib.parent.cfg.get('urls', 'yt_format', "\002%(author)s\002: \037%(title)s\037 [%(duration)s, uploaded %(uploaded)s, %(views)s v/%(likes)s l/%(dislikes)s d]") % {
179 'title': v['snippet']['title'],
180 'author': v['snippet']['channelTitle'],
181 'duration': _yt_duration(v['contentDetails']['duration']),
182 'uploaded': _yt_date(v['snippet']['publishedAt'], lib.parent.cfg.get('urls', 'yt_date_format', '%b %d %Y')),
183 'views': _yt_round(v['statistics']['viewCount']),
184 'likes': _yt_round(v['statistics']['likeCount']),
185 'dislikes': _yt_round(v['statistics']['dislikeCount']),
186 })
187 except urllib2.HTTPError as e:
188 if e.getcode() == 403:
189 return 'API limit exceeded'
190 else:
191 return str(e)
192 except IndexError:
193 return 'no results'
194 except Exception as e:
195 return str(e)
196
197 def gottwitch(uri):
198 url = 'https://api.twitch.tv/helix/streams?user_login=%s' % uri.split('/')[0]
199 opener = urllib2.build_opener()
200 opener.addheaders = [('Client-ID', lib.parent.cfg.get('urls', 'twitch_api_key'))]
201 respdata = opener.open(url).read()
202 twitch = json.loads(respdata)['data']
203 try:
204 # TODO: add current game.
205 return unescape('\037%s\037 is %s (%s)' % (twitch[0]['user_name'], twitch[0]['type'], twitch[0]['title']))
206 except:
207 return 'Channel offline.'
208
209 def goturl(url):
210 for _, group, _ in other_regexes:
211 for regex in group:
212 if regex.match(url):
213 return None
214 request = urllib2.Request(url)
215 opener = urllib2.build_opener(SmartRedirectHandler())
216 try:
217 soup = BeautifulSoup(opener.open(request, timeout=0.5))
218 if soup.title:
219 return unescape('%s' % (soup.title.string))
220 else:
221 return None
222 except urllib2.HTTPError as e:
223 return 'Error: %s %s' % (e.code, e.reason)
224 except urllib2.URLError as e:
225 return 'Error: %s' % (e.reason)
226 except Exception as e:
227 return 'Error: %r' % (e.args)
228
229 url_regex = (
230 re.compile(r'https?://[^/\s]+\.[^/\s]+(?:/\S+)?'),
231 )
232 spotify_regex = (
233 re.compile(r'spotify:(?P<type>\w+):(?P<track_id>\w{22})'),
234 re.compile(r'https?://open\.spotify\.com/(?P<type>\w+)/(?P<track_id>\w+)')
235 )
236 youtube_regex = (
237 re.compile(r'https?://(?:www\.)?youtube\.com/watch\?[a-zA-Z0-9=&_\-]+'),
238 )
239 twitch_regex = (
240 re.compile(r'https?:\/\/(?:www\.)?twitch.tv\/([A-Za-z0-9]*)'),
241 )
242 other_regexes = (
243 (gotspotify, spotify_regex, 'Spotify'),
244 (gotyoutube, youtube_regex, 'YouTube'),
245 (gottwitch, twitch_regex, 'Twitch'),
246 )
247 regexes = other_regexes + (
248 (goturl, url_regex, 'Title'),
249 )