]> jfr.im git - erebus.git/blob - modules/urls.py
add bashorg module
[erebus.git] / modules / urls.py
1 # Erebus IRC bot - Author: Conny Sjoblom
2 # vim: fileencoding=utf-8
3 # URL Checker
4 # This file is released into the public domain; see http://unlicense.org/
5
6 # module info
7 modinfo = {
8 'author': 'Erebus Team',
9 'license': 'public domain',
10 'compatible': [0],
11 'depends': [],
12 'softdeps': [],
13 }
14
15 # http://embed.ly/tools/generator
16
17 # preamble
18 import modlib
19 lib = modlib.modlib(__name__)
20 modstart = lib.modstart
21 modstop = lib.modstop
22
23 # module code
24 import sys
25 if sys.version_info.major < 3:
26 import urllib2
27 import urlparse
28 import HTMLParser
29 html = HTMLParser.HTMLParser()
30 from BeautifulSoup import BeautifulSoup
31 else:
32 import urllib.request as urllib2
33 import urllib.parse as urlparse
34 import html
35 from bs4 import BeautifulSoup
36
37 import re, json, datetime
38
39 hostmask_regex = re.compile(r'^(.*)!(.*)@(.*)$')
40
41 def parser_hostmask(hostmask):
42 if isinstance(hostmask, dict):
43 return hostmask
44
45 nick = None
46 user = None
47 host = None
48
49 if hostmask is not None:
50 match = hostmask_regex.match(hostmask)
51
52 if not match:
53 nick = hostmask
54 else:
55 nick = match.group(1)
56 user = match.group(2)
57 host = match.group(3)
58
59 return {
60 'nick': nick,
61 'user': user,
62 'host': host
63 }
64
65 class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
66 def http_error_301(self, req, fp, code, msg, headers):
67 result = urllib2.HTTPRedirectHandler.http_error_301(
68 self, req, fp, code, msg, headers)
69 result.status = code
70 return result
71
72 def http_error_302(self, req, fp, code, msg, headers):
73 result = urllib2.HTTPRedirectHandler.http_error_302(
74 self, req, fp, code, msg, headers)
75 result.status = code
76 return result
77
78 def process_line(line):
79 responses = []
80 num_found = 0
81 limit = lib.parent.cfg.getint('urls', 'limit', 2)
82 for action, group, prefix in regexes:
83 for regex in group:
84 for match in regex.findall(line):
85 if match:
86 num_found += 1
87 if num_found > limit:
88 return responses
89 resp = action(match)
90 if resp is not None:
91 responses.append("%s: %s" % (prefix, action(match)))
92 return responses
93
94 @lib.hooknum("PRIVMSG")
95 def privmsg_hook(bot, textline):
96 user = parser_hostmask(textline[1:textline.find(' ')])
97 chan = textline.split()[2]
98
99 try:
100 line = textline.split(None, 3)[3][1:]
101 except IndexError:
102 line = ''
103
104 responses = process_line(line)
105 if len(responses) > 0:
106 if lib.parent.cfg.getboolean('urls', 'multiline'):
107 for r in responses:
108 bot.msg(chan, r, True)
109 else:
110 bot.msg(chan, ' | '.join(responses), True)
111
112 def unescape(line):
113 return re.sub('\s+', ' ', html.unescape(line))
114
115 def gotspotify(type, track):
116 url = 'http://ws.spotify.com/lookup/1/?uri=spotify:%s:%s' % (type, track)
117 xml = urllib2.urlopen(url).read()
118 soup = BeautifulSoup(xml, convertEntities=BeautifulSoup.HTML_ENTITIES)
119 lookup_type = soup.contents[2].name
120
121 if lookup_type == 'track':
122 name = soup.find('name').string
123 album_name = soup.find('album').find('name').string
124 artist_name = soup.find('artist').find('name').string
125 popularity = soup.find('popularity')
126 if popularity:
127 popularity = float(popularity.string)*100
128 length = float(soup.find('length').string)
129 minutes = int(length)/60
130 seconds = int(length)%60
131
132 return unescape('Track: %s - %s / %s %s:%.2d %2d%%' % (artist_name, name, album_name, minutes, seconds, popularity))
133
134 elif lookup_type == 'album':
135 album_name = soup.find('album').find('name').string
136 artist_name = soup.find('artist').find('name').string
137 released = soup.find('released').string
138 return unescape('Album: %s - %s - %s' % (artist_name, album_name, released))
139
140 else:
141 return 'Unsupported type.'
142
143 def _yt_duration(s):
144 mo = re.match(r'P(\d+D)?T(\d+H)?(\d+M)?(\d+S)?', s)
145 pcs = [x for x in mo.groups() if x]
146 return ''.join(pcs).lower()
147 def _yt_date(s, f):
148 mo = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})\.(\d+)Z', s)
149 return datetime.datetime(*(int(x) for x in mo.groups())).strftime(f)
150 def _yt_round(n):
151 n = float(n)
152 if n >= 10**12:
153 return '%.1ft' % (n/10**12)
154 elif n >= 10**9:
155 return '%.1fb' % (n/10**9)
156 elif n >= 10**6:
157 return '%.1fm' % (n/10**6)
158 elif n >= 10**3:
159 return '%.1fk' % (n/10**3)
160 else:
161 return int(n)
162
163 def gotyoutube(url):
164 url_data = urlparse.urlparse(url)
165 query = urlparse.parse_qs(url_data.query)
166 video = query["v"][0]
167 api_url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails,statistics&id=%s&key=%s' % (video, lib.parent.cfg.get('urls', 'api_key'))
168 try:
169 respdata = urllib2.urlopen(api_url).read()
170 v = json.loads(respdata)
171 v = v['items'][0]
172
173 return unescape(lib.parent.cfg.get('urls', 'yt_format', "\002%(author)s\002: \037%(title)s\037 [%(duration)s, uploaded %(uploaded)s, %(views)s v/%(likes)s l/%(dislikes)s d]") % {
174 'title': v['snippet']['title'],
175 'author': v['snippet']['channelTitle'],
176 'duration': _yt_duration(v['contentDetails']['duration']),
177 'uploaded': _yt_date(v['snippet']['publishedAt'], lib.parent.cfg.get('urls', 'yt_date_format', '%b %d %Y')),
178 'views': _yt_round(v['statistics']['viewCount']),
179 'likes': _yt_round(v['statistics']['likeCount']),
180 'dislikes': _yt_round(v['statistics']['dislikeCount']),
181 })
182 except urllib2.HTTPError as e:
183 if e.getcode() == 403:
184 return 'API limit exceeded'
185 else:
186 return str(e)
187 except IndexError:
188 return 'no results'
189 except Exception as e:
190 return str(e)
191
192 def gottwitch(uri):
193 url = 'https://api.twitch.tv/helix/streams?user_login=%s' % uri.split('/')[0]
194 opener = urllib2.build_opener()
195 opener.addheaders = [('Client-ID', lib.parent.cfg.get('urls', 'twitch_api_key'))]
196 respdata = opener.open(url).read()
197 twitch = json.loads(respdata)['data']
198 try:
199 # TODO: add current game.
200 return unescape('\037%s\037 is %s (%s)' % (twitch[0]['user_name'], twitch[0]['type'], twitch[0]['title']))
201 except:
202 return 'Channel offline.'
203
204 def goturl(url):
205 for _, group, _ in other_regexes:
206 for regex in group:
207 if regex.match(url):
208 return None
209 request = urllib2.Request(url)
210 opener = urllib2.build_opener(SmartRedirectHandler())
211 try:
212 soup = BeautifulSoup(opener.open(request, timeout=0.5))
213 if soup.title:
214 return unescape('%s' % (soup.title.string))
215 else:
216 return None
217 except urllib2.HTTPError as e:
218 return 'Error: %s %s' % (e.code, e.reason)
219 except Exception as e:
220 return 'Error: %r' % (e.message)
221
222 url_regex = (
223 re.compile(r'https?://[^/\s]+\.[^/\s]+(?:/\S+)?'),
224 )
225 spotify_regex = (
226 re.compile(r'spotify:(?P<type>\w+):(?P<track_id>\w{22})'),
227 re.compile(r'https?://open\.spotify\.com/(?P<type>\w+)/(?P<track_id>\w+)')
228 )
229 youtube_regex = (
230 re.compile(r'https?://(?:www\.)?youtube\.com/watch\?[a-zA-Z0-9=&_\-]+'),
231 )
232 twitch_regex = (
233 re.compile(r'https?:\/\/(?:www\.)?twitch.tv\/([A-Za-z0-9]*)'),
234 )
235 other_regexes = (
236 (gotspotify, spotify_regex, 'Spotify'),
237 (gotyoutube, youtube_regex, 'YouTube'),
238 (gottwitch, twitch_regex, 'Twitch'),
239 )
240 regexes = other_regexes + (
241 (goturl, url_regex, 'Title'),
242 )