1 # Erebus IRC bot - Author: Conny Sjoblom
2 # vim: fileencoding=utf-8
4 # This file is released into the public domain; see http://unlicense.org/
8 'author': 'Erebus Team',
9 'license': 'public domain',
15 # http://embed.ly/tools/generator
19 lib
= modlib
.modlib(__name__
)
20 modstart
= lib
.modstart
25 if sys
.version_info
.major
< 3:
26 stringbase
= basestring
30 html
= HTMLParser
.HTMLParser()
31 from BeautifulSoup
import BeautifulSoup
34 import urllib
.request
as urllib2
35 import urllib
.parse
as urlparse
37 from bs4
import BeautifulSoup
39 import re
, json
, datetime
41 hostmask_regex
= re
.compile(r
'^(.*)!(.*)@(.*)$')
43 def parser_hostmask(hostmask
):
44 if isinstance(hostmask
, dict):
51 if hostmask
is not None:
52 match
= hostmask_regex
.match(hostmask
)
67 class SmartRedirectHandler(urllib2
.HTTPRedirectHandler
):
68 def http_error_301(self
, req
, fp
, code
, msg
, headers
):
69 result
= urllib2
.HTTPRedirectHandler
.http_error_301(
70 self
, req
, fp
, code
, msg
, headers
)
74 def http_error_302(self
, req
, fp
, code
, msg
, headers
):
75 result
= urllib2
.HTTPRedirectHandler
.http_error_302(
76 self
, req
, fp
, code
, msg
, headers
)
80 def _get_blocked_chans():
81 return lib
.parent
.cfg
.get('urls', 'blocked', '').split(',')
83 def process_line(line
):
86 limit
= lib
.parent
.cfg
.getint('urls', 'limit', 2)
87 for action
, group
in regexes
:
89 for match
in regex
.findall(line
):
94 if isinstance(match
, stringbase
):
98 if resp
is not None and resp
!= "":
99 responses
.append(resp
)
102 @lib.hooknum("PRIVMSG")
103 def privmsg_hook(bot
, textline
):
104 user
= parser_hostmask(textline
[1:textline
.find(' ')])
105 chan
= textline
.split()[2]
107 if chan
in _get_blocked_chans(): return
110 line
= textline
.split(None, 3)[3][1:]
114 responses
= process_line(line
)
115 if len(responses
) > 0:
116 if lib
.parent
.cfg
.getboolean('urls', 'multiline'):
118 bot
.msg(chan
, r
, True)
120 bot
.msg(chan
, ' | '.join(responses
), True)
123 return re
.sub('\s+', ' ', html
.unescape(line
))
125 def gotspotify(type, track
):
126 url
= 'http://ws.spotify.com/lookup/1/?uri=spotify:%s:%s' % (type, track
)
127 xml
= urllib2
.urlopen(url
).read()
128 soup
= BeautifulSoup(xml
, convertEntities
=BeautifulSoup
.HTML_ENTITIES
)
129 lookup_type
= soup
.contents
[2].name
131 if lookup_type
== 'track':
132 name
= soup
.find('name').string
133 album_name
= soup
.find('album').find('name').string
134 artist_name
= soup
.find('artist').find('name').string
135 popularity
= soup
.find('popularity')
137 popularity
= float(popularity
.string
)*100
138 length
= float(soup
.find('length').string
)
139 minutes
= int(length
)/60
140 seconds
= int(length
)%60
142 return unescape('Track: %s - %s / %s %s:%.2d %2d%%' % (artist_name
, name
, album_name
, minutes
, seconds
, popularity
))
144 elif lookup_type
== 'album':
145 album_name
= soup
.find('album').find('name').string
146 artist_name
= soup
.find('artist').find('name').string
147 released
= soup
.find('released').string
148 return unescape('Album: %s - %s - %s' % (artist_name
, album_name
, released
))
151 return 'Unsupported type.'
154 mo
= re
.match(r
'P(\d+D)?T(\d+H)?(\d+M)?(\d+S)?', s
)
155 pcs
= [x
for x
in mo
.groups() if x
]
156 return ''.join(pcs
).lower()
158 mo
= re
.match(r
'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})\.(\d+)Z', s
)
159 return datetime
.datetime(*(int(x
) for x
in mo
.groups())).strftime(f
)
163 return '%.1ft' % (n
/10**12)
165 return '%.1fb' % (n
/10**9)
167 return '%.1fm' % (n
/10**6)
169 return '%.1fk' % (n
/10**3)
174 url_data
= urlparse
.urlparse(url
)
175 query
= urlparse
.parse_qs(url_data
.query
)
176 video
= query
["v"][0]
177 api_url
= 'https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails,statistics&id=%s&key=%s' % (video
, lib
.parent
.cfg
.get('urls', 'api_key'))
179 respdata
= urllib2
.urlopen(api_url
).read()
180 v
= json
.loads(respdata
)
183 return unescape(lib
.parent
.cfg
.get('urls', 'yt_format', "\002%(author)s\002: \037%(title)s\037 [%(duration)s, uploaded %(uploaded)s, %(views)s v/%(likes)s l/%(dislikes)s d]") % {
184 'title': v
['snippet']['title'],
185 'author': v
['snippet']['channelTitle'],
186 'duration': _yt_duration(v
['contentDetails']['duration']),
187 'uploaded': _yt_date(v
['snippet']['publishedAt'], lib
.parent
.cfg
.get('urls', 'yt_date_format', '%b %d %Y')),
188 'views': _yt_round(v
['statistics']['viewCount']),
189 'likes': _yt_round(v
['statistics']['likeCount']),
190 'dislikes': _yt_round(v
['statistics']['dislikeCount']),
192 except urllib2
.HTTPError
as e
:
193 if e
.getcode() == 403:
194 return 'API limit exceeded'
199 except Exception as e
:
203 url
= 'https://api.twitch.tv/helix/streams?user_login=%s' % uri
.split('/')[0]
204 opener
= urllib2
.build_opener()
205 opener
.addheaders
= [('Client-ID', lib
.parent
.cfg
.get('urls', 'twitch_api_key'))]
206 respdata
= opener
.open(url
).read()
207 twitch
= json
.loads(respdata
)['data']
209 # TODO: add current game.
210 return unescape('\037%s\037 is %s (%s)' % (twitch
[0]['user_name'], twitch
[0]['type'], twitch
[0]['title']))
212 return 'Channel offline.'
214 def _humanize_bytes(b
):
217 table
= " kMGTPEZYRQ"
224 return "%.2f%siB" % (b
, table
[i
])
228 for _
, group
in other_regexes
:
232 request
= urllib2
.Request(url
, headers
={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'}
)
233 opener
= urllib2
.build_opener(SmartRedirectHandler())
235 # Send request and handle errors
237 response
= opener
.open(request
, timeout
=2)
238 except urllib2
.HTTPError
as e
:
239 return 'Request error: %s %s' % (e
.code
, e
.reason
)
240 except urllib2
.URLError
as e
:
241 return 'Request error: %s' % (e
.reason
)
242 except TimeoutError
as e
:
243 return 'Request error: request timed out'
244 except Exception as e
:
245 return 'Unknown error: %s %r' % (type(e
).__name
__, e
.args
)
247 # Try to add type and length headers to reply
248 c_type
= response
.getheader('Content-Type', '').split(';', 1)[0]
249 c_len
= response
.getheader('Content-Length')
251 output
.append("[%s] " % (c_type
))
253 output
.append("[no type] ")
254 if c_type
!= "text/html": # else length will be provided by HTML code below
255 if c_len
is not None:
256 output
.append("[%s] " % (_humanize_bytes(c_len
)))
258 output
.append("[no length] ")
260 # Try to add title if HTML
261 if c_type
== 'text/html':
263 responsebody
= response
.read(1024*1024)
264 print(type(responsebody
))
265 except Exception as e
:
266 output
.append('Error reading response body: %s %r' % (type(e
).__name
__, e
.args
))
268 if c_len
is not None and len(responsebody
) != int(c_len
):
269 output
.append("[actual %s; Content-Length %s] " % (_humanize_bytes(len(responsebody
)), _humanize_bytes(c_len
)))
271 output
.append("[%s] " % (_humanize_bytes(len(responsebody
))))
273 soup
= BeautifulSoup(responsebody
)
275 output
.append('Title: ' + unescape('%s' % (soup
.find('title').string
.strip())))
277 output
.append('No title')
278 except Exception as e
:
279 output
.append('Title error: %s %r ' % (type(e
).__name
__, e
.args
))
281 return ''.join(output
)
284 re
.compile(r
'https?://(?:[^/\s.]+\.)+[^/\s.]+(?:/\S+)?'),
288 regexes
= other_regexes
+ (