]> jfr.im git - erebus.git/blobdiff - modules/urls.py
urls: .strip title
[erebus.git] / modules / urls.py
index 9e8e3e2b4776fda0db0a3c81d962a35422ebed1e..f2df04f377bce3d204505c62b0eb7146d1e11c7d 100644 (file)
@@ -1,4 +1,5 @@
-# Erebus IRC bot - Author: Erebus Team
+# Erebus IRC bot - Author: Conny Sjoblom
+# vim: fileencoding=utf-8
 # URL Checker
 # This file is released into the public domain; see http://unlicense.org/
 
@@ -6,10 +7,13 @@
 modinfo = {
        'author': 'Erebus Team',
        'license': 'public domain',
-       'compatible': [1], # compatible module API versions
-       'depends': [], # other modules required to work properly?
+       'compatible': [0],
+       'depends': [],
+       'softdeps': [],
 }
 
+# http://embed.ly/tools/generator
+
 # preamble
 import modlib
 lib = modlib.modlib(__name__)
@@ -17,21 +21,24 @@ modstart = lib.modstart
 modstop = lib.modstop
 
 # module code
-import re, json, urllib2, urlparse, HTMLParser
-from BeautifulSoup import BeautifulSoup
+import sys
+if sys.version_info.major < 3:
+       stringbase = basestring
+       import urllib2
+       import urlparse
+       import HTMLParser
+       html = HTMLParser.HTMLParser()
+       from BeautifulSoup import BeautifulSoup
+else:
+       stringbase = str
+       import urllib.request as urllib2
+       import urllib.parse as urlparse
+       import html
+       from bs4 import BeautifulSoup
 
-hostmask_regex = re.compile(r'^(.*)!(.*)@(.*)$')
+import re, json, datetime
 
-spotify_regex = (
-       re.compile(r'spotify:(?P<type>\w+):(?P<track_id>\w{22})'),
-       re.compile(r'http://open.spotify.com/(?P<type>\w+)/(?P<track_id>\w{22})')
-)
-youtube_regex = (
-       re.compile(r'https?://(?:www\.)?youtube\.com/watch\?[a-zA-Z0-9=&_\-]+'),
-)
-twitch_regex = (
-       re.compile('(http|ftp|https):\/\/([\w\-_]+(?:(?:\.[\w\-_]+)+))([\w\-\.,@?^=%&amp;:/~\+#]*[\w\-\@?^=%&amp;/~\+#])?'), #TODO
-)
+hostmask_regex = re.compile(r'^(.*)!(.*)@(.*)$')
 
 def parser_hostmask(hostmask):
        if isinstance(hostmask, dict):
@@ -57,39 +64,65 @@ def parser_hostmask(hostmask):
                'host': host
        }
 
-@lib.hooknum("PRIVMSG")
-def privmsg_hook(bot, line):
-       sender = parser_hostmask(line[1:line.find(' ')])
+class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
+       def http_error_301(self, req, fp, code, msg, headers):
+               result = urllib2.HTTPRedirectHandler.http_error_301(
+                               self, req, fp, code, msg, headers)
+               result.status = code
+               return result
 
-       try:
-               linetx = line.split(None, 3)[3][1:]
-       except IndexError:
-               linetx = ''
+       def http_error_302(self, req, fp, code, msg, headers):
+               result = urllib2.HTTPRedirectHandler.http_error_302(
+                               self, req, fp, code, msg, headers)
+               result.status = code
+               return result
 
-       chan = line.split()[2]
+def process_line(line):
+       responses = []
+       num_found = 0
+       limit = lib.parent.cfg.getint('urls', 'limit', 2)
+       for action, group in regexes:
+               for regex in group:
+                       for match in regex.findall(line):
+                               if match:
+                                       num_found += 1
+                                       if num_found > limit:
+                                               return responses
+                                       if isinstance(match, stringbase):
+                                               resp = action(match)
+                                       else:
+                                               resp = action(*match)
+                                       if resp is not None and resp != "":
+                                               responses.append(resp)
+       return responses
 
-       if 'open.spotify.com' in line or 'spotify:' in line:
-               for r in spotify_regex:
-                       for sptype, track in r.findall(linetx):
-                               bot.msg(chan, gotspotify(sptype, track))
-
-       elif 'youtube.com' in line or 'youtu.be' in line:
-               print "got youtube!"
-               for r in youtube_regex:
-                       for url in r.findall(linetx):
-                               bot.msg(chan, gotyoutube(url))
+@lib.hooknum("PRIVMSG")
+def privmsg_hook(bot, textline):
+       user = parser_hostmask(textline[1:textline.find(' ')])
+       chan = textline.split()[2]
 
-       elif 'twitch.tv' in line: pass #TODO fix twitch
+       try:
+               line = textline.split(None, 3)[3][1:]
+       except IndexError:
+               line = ''
 
-       else: pass #TODO generic <title> checker
+       responses = process_line(line)
+       if len(responses) > 0:
+               if lib.parent.cfg.getboolean('urls', 'multiline'):
+                       for r in responses:
+                               bot.msg(chan, r, True)
+               else:
+                       bot.msg(chan, ' | '.join(responses), True)
 
+def unescape(line):
+       return re.sub('\s+', ' ', html.unescape(line))
 
 def gotspotify(type, track):
        url = 'http://ws.spotify.com/lookup/1/?uri=spotify:%s:%s' % (type, track)
        xml = urllib2.urlopen(url).read()
-       soup = BeautifulSoup(xml)
+       soup = BeautifulSoup(xml, convertEntities=BeautifulSoup.HTML_ENTITIES)
        lookup_type = soup.contents[2].name
-               
+
        if lookup_type == 'track':
                name = soup.find('name').string
                album_name = soup.find('album').find('name').string
@@ -99,53 +132,154 @@ def gotspotify(type, track):
                        popularity = float(popularity.string)*100
                length = float(soup.find('length').string)
                minutes = int(length)/60
-               seconds =  int(length)%60
-       
-               return 'Track: %s - %s / %s %s:%.2d %2d%%' % (artist_name, name, album_name, minutes, seconds, popularity)
-       
+               seconds = int(length)%60
+
+               return unescape('Track: %s - %s / %s %s:%.2d %2d%%' % (artist_name, name, album_name, minutes, seconds, popularity))
+
        elif lookup_type == 'album':
                album_name = soup.find('album').find('name').string
                artist_name = soup.find('artist').find('name').string
                released = soup.find('released').string
-               return 'Album: %s - %s - %s' % (artist_name, album_name, released)
-       
+               return unescape('Album: %s - %s - %s' % (artist_name, album_name, released))
+
        else:
                return 'Unsupported type.'
 
+def _yt_duration(s):
+       mo = re.match(r'P(\d+D)?T(\d+H)?(\d+M)?(\d+S)?', s)
+       pcs = [x for x in mo.groups() if x]
+       return ''.join(pcs).lower()
+def _yt_date(s, f):
+       mo = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})\.(\d+)Z', s)
+       return datetime.datetime(*(int(x) for x in mo.groups())).strftime(f)
+def _yt_round(n):
+       n = float(n)
+       if n >= 10**12:
+               return '%.1ft' % (n/10**12)
+       elif n >= 10**9:
+               return '%.1fb' % (n/10**9)
+       elif n >= 10**6:
+               return '%.1fm' % (n/10**6)
+       elif n >= 10**3:
+               return '%.1fk' % (n/10**3)
+       else:
+               return int(n)
+
 def gotyoutube(url):
        url_data = urlparse.urlparse(url)
        query = urlparse.parse_qs(url_data.query)
        video = query["v"][0]
-       api_url = 'http://gdata.youtube.com/feeds/api/videos/%s?alt=json&v=2' % video
+       api_url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails,statistics&id=%s&key=%s' % (video, lib.parent.cfg.get('urls', 'api_key'))
        try:
                respdata = urllib2.urlopen(api_url).read()
-               video_info = json.loads(respdata)
+               v = json.loads(respdata)
+               v = v['items'][0]
 
-               title = video_info['entry']['title']["$t"]
-               author = video_info['entry']['author'][0]['name']['$t']
+               return unescape(lib.parent.cfg.get('urls', 'yt_format', "\002%(author)s\002: \037%(title)s\037 [%(duration)s, uploaded %(uploaded)s, %(views)s v/%(likes)s l/%(dislikes)s d]") % {
+                       'title': v['snippet']['title'],
+                       'author': v['snippet']['channelTitle'],
+                       'duration': _yt_duration(v['contentDetails']['duration']),
+                       'uploaded': _yt_date(v['snippet']['publishedAt'], lib.parent.cfg.get('urls', 'yt_date_format', '%b %d %Y')),
+                       'views': _yt_round(v['statistics']['viewCount']),
+                       'likes': _yt_round(v['statistics']['likeCount']),
+                       'dislikes': _yt_round(v['statistics']['dislikeCount']),
+               })
+       except urllib2.HTTPError as e:
+               if e.getcode() == 403:
+                       return 'API limit exceeded'
+               else:
+                       return str(e)
+       except IndexError:
+               return 'no results'
+       except Exception as e:
+               return str(e)
 
-               return "Youtube: %s (%s)" % (title, author)
+def gottwitch(uri):
+       url = 'https://api.twitch.tv/helix/streams?user_login=%s' % uri.split('/')[0]
+       opener = urllib2.build_opener()
+       opener.addheaders = [('Client-ID', lib.parent.cfg.get('urls', 'twitch_api_key'))]
+       respdata = opener.open(url).read()
+       twitch = json.loads(respdata)['data']
+       try:
+               # TODO: add current game.
+               return unescape('\037%s\037 is %s (%s)' % (twitch[0]['user_name'], twitch[0]['type'], twitch[0]['title']))
        except:
-               pass
+               return 'Channel offline.'
+
+def _humanize_bytes(b):
+       b = int(b)
+       i = 0
+       table = " kMGTPEZYRQ"
+       while b > 1024:
+               i += 1
+               b /= 1024.0
+       if i == 0:
+               return "%dB" % (b)
+       else:
+               return "%.2f%siB" % (b, table[i])
 
+def goturl(url):
+       output = []
+       for _, group in other_regexes:
+               for regex in group:
+                       if regex.match(url):
+                               return None
+       request = urllib2.Request(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'})
+       opener = urllib2.build_opener(SmartRedirectHandler())
 
-def gottwitch(url):
-       return ""
-       #FIXME:
+       # Send request and handle errors
        try:
-               linetx = line.split(None, 3)[3][1:]
-       except IndexError:
-               linetx = ''
+               response = opener.open(request, timeout=2)
+       except urllib2.HTTPError as e:
+               return 'Request error: %s %s' % (e.code, e.reason)
+       except urllib2.URLError as e:
+               return 'Request error: %s' % (e.reason)
+       except TimeoutError as e:
+               return 'Request error: request timed out'
+       except Exception as e:
+               return 'Unknown error: %s %r' % (type(e).__name__, e.args)
 
-       if checkfor not in line:
-               return # doesn't concern us
+       # Try to add type and length headers to reply
+       c_type = response.getheader('Content-Type', '').split(';', 1)[0]
+       c_len = response.getheader('Content-Length')
+       if c_type != '':
+               output.append("[%s] " % (c_type))
+       else:
+               output.append("[no type] ")
+       if c_type != "text/html": # else length will be provided by HTML code below
+               if c_len is not None:
+                       output.append("[%s] " % (_humanize_bytes(c_len)))
+               else:
+                       output.append("[no length] ")
 
-       for p, h, c in url_regex.findall(linetx):
-               if checkfor in h:
-                       url = 'http://api.justin.tv/api/stream/list.json?channel=%s' % c[1:]
-                       respdata = urllib2.urlopen(url).read()
-                       twitch = json.loads(respdata)
+       # Try to add title if HTML
+       if c_type == 'text/html':
+               try:
+                       responsebody = response.read(1024*1024)
+                       print(type(responsebody))
+               except Exception as e:
+                       output.append('Error reading response body: %s %r' % (type(e).__name__, e.args))
+               else:
+                       if c_len is not None and len(responsebody) != int(c_len):
+                               output.append("[actual %s; Content-Length %s] " % (_humanize_bytes(len(responsebody)), _humanize_bytes(c_len)))
+                       else:
+                               output.append("[%s] " % (_humanize_bytes(len(responsebody))))
                        try:
-                               bot.msg(line.split()[2], 'Twitch: %s (%s playing %s)' % (twitch[0]['channel']['status'], twitch[0]['channel']['login'], twitch[0]['channel']['meta_game']))
-                       except:
-                               bot.msg(line.split()[2], 'Twitch: Channel offline.')
+                               soup = BeautifulSoup(responsebody)
+                               if soup.title:
+                                       output.append('Title: ' + unescape('%s' % (soup.find('title').string.strip())))
+                               else:
+                                       output.append('No title')
+                       except Exception as e:
+                               output.append('Title error: %s %r ' % (type(e).__name__, e.args))
+
+       return ''.join(output)
+
+url_regex = (
+       re.compile(r'https?://(?:[^/\s.]+\.)+[^/\s.]+(?:/\S+)?'),
+)
+other_regexes = (
+)
+regexes = other_regexes + (
+       (goturl, url_regex),
+)