# Erebus IRC bot - Author: Erebus Team
+# vim: fileencoding=utf-8
# URL Checker
# This file is released into the public domain; see http://unlicense.org/
modinfo = {
'author': 'Erebus Team',
'license': 'public domain',
- 'compatible': [1], # compatible module API versions
- 'depends': [], # other modules required to work properly?
+ 'compatible': [0],
+ 'depends': [],
+ 'softdeps': [],
}
# http://embed.ly/tools/generator
modstop = lib.modstop
# module code
-import re, urllib2, urlparse, json, HTMLParser
-from BeautifulSoup import BeautifulSoup
+import sys
+if sys.version_info.major < 3:
+ import urllib2
+ import urlparse
+ import HTMLParser
+ from BeautifulSoup import BeautifulSoup
+else:
+ import urllib.request as urllib2
+ import urllib.parse as urlparse
+ import html.parser as HTMLParser
+ from bs4 import BeautifulSoup
+
+import re, json
html_parser = HTMLParser.HTMLParser()
hostmask_regex = re.compile(r'^(.*)!(.*)@(.*)$')
-url_regex = re.compile(r'((?:https?://|spotify:)[^\s]+)')
+url_regex = re.compile(r'https?://[^/\s]+\.[^/\s]+(?:/\S+)?')
spotify_regex = (
re.compile(r'spotify:(?P<type>\w+):(?P<track_id>\w{22})'),
- re.compile(r'https?://open.spotify.com/(?P<type>\w+)/(?P<track_id>\w{22})')
+ re.compile(r'https?://open.spotify.com/(?P<type>\w+)/(?P<track_id>\w+)')
)
youtube_regex = (
re.compile(r'https?://(?:www\.)?youtube\.com/watch\?[a-zA-Z0-9=&_\-]+'),
)
twitch_regex = (
- re.compile(r'https?://(?:www\.)?twitch.tv/(.*)\w{1,}'),
+ re.compile(r'https?:\/\/(?:www\.)?twitch.tv\/([A-Za-z0-9]*)'),
)
def parser_hostmask(hostmask):
'host': host
}
+class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
+ def http_error_301(self, req, fp, code, msg, headers):
+ result = urllib2.HTTPRedirectHandler.http_error_301(
+ self, req, fp, code, msg, headers)
+ result.status = code
+ return result
+
+ def http_error_302(self, req, fp, code, msg, headers):
+ result = urllib2.HTTPRedirectHandler.http_error_302(
+ self, req, fp, code, msg, headers)
+ result.status = code
+ return result
+
@lib.hooknum("PRIVMSG")
def privmsg_hook(bot, textline):
user = parser_hostmask(textline[1:textline.find(' ')])
for match in url_regex.findall(line):
if match:
- print match
- if 'open.spotify.com' in match or 'spotify:' in match:
- for r in spotify_regex:
- for sptype, track in r.findall(match):
- bot.msg(chan, unescape(gotspotify(sptype, track)))
-
- elif 'youtube.com' in match or 'youtu.be' in match:
- for r in youtube_regex:
- for url in r.findall(match):
- bot.msg(chan, unescape(gotyoutube(url)))
-
- elif 'twitch.tv' in match:
- for r in twitch_regex:
- for uri in r.findall(match):
- bot.msg(chan, unescape(gottwitch(uri)))
-
- else:
- bot.msg(chan, unescape(goturl(match)))
+ response = goturl(match)
+ if response is not None:
+ bot.msg(chan, response)
def unescape(line):
return html_parser.unescape(line)
minutes = int(length)/60
seconds = int(length)%60
- return 'Track: %s - %s / %s %s:%.2d %2d%%' % (artist_name, name, album_name, minutes, seconds, popularity)
+ return unescape('Track: %s - %s / %s %s:%.2d %2d%%' % (artist_name, name, album_name, minutes, seconds, popularity))
elif lookup_type == 'album':
album_name = soup.find('album').find('name').string
artist_name = soup.find('artist').find('name').string
released = soup.find('released').string
- return 'Album: %s - %s - %s' % (artist_name, album_name, released)
+ return unescape('Album: %s - %s - %s' % (artist_name, album_name, released))
else:
return 'Unsupported type.'
title = video_info['entry']['title']["$t"]
author = video_info['entry']['author'][0]['name']['$t']
- return "Youtube: %s (%s)" % (title, author)
+ return unescape("Youtube: %s (%s)" % (title, author))
except:
pass
respdata = urllib2.urlopen(url).read()
twitch = json.loads(respdata)
try:
- return 'Twitch: %s (%s playing %s)' % (twitch[0]['channel']['status'], twitch[0]['channel']['login'], twitch[0]['channel']['meta_game'])
+ return unescape('Twitch: %s (%s playing %s)' % (twitch[0]['channel']['status'], twitch[0]['channel']['login'], twitch[0]['channel']['meta_game']))
except:
return 'Twitch: Channel offline.'
def goturl(url):
+ request = urllib2.Request(url)
+ opener = urllib2.build_opener(SmartRedirectHandler())
try:
- soup = BeautifulSoup(urllib2.urlopen(url, timeout=2))
- return "Title: %s" % soup.title.string
+ soup = BeautifulSoup(opener.open(request, timeout=2))
+ return unescape('Title: %s' % (soup.title.string))
except:
- return "Invalid URL/Timeout"
+ return None