]>
jfr.im git - erebus.git/blob - modules/urls.py
1 # Erebus IRC bot - Author: Erebus Team
3 # This file is released into the public domain; see http://unlicense.org/
7 'author': 'Erebus Team',
8 'license': 'public domain',
14 # http://embed.ly/tools/generator
18 lib
= modlib
.modlib(__name__
)
19 modstart
= lib
.modstart
23 import re
, urllib2
, urlparse
, json
, HTMLParser
24 from BeautifulSoup
import BeautifulSoup
26 html_parser
= HTMLParser
.HTMLParser()
28 hostmask_regex
= re
.compile(r
'^(.*)!(.*)@(.*)$')
29 url_regex
= re
.compile(r
'((?:https?://|spotify:)[^\s]+)')
31 re
.compile(r
'spotify:(?P<type>\w+):(?P<track_id>\w{22})'),
32 re
.compile(r
'https?://open.spotify.com/(?P<type>\w+)/(?P<track_id>\w{22})')
35 re
.compile(r
'https?://(?:www\.)?youtube\.com/watch\?[a-zA-Z0-9=&_\-]+'),
38 re
.compile(r
'https?:\/\/(?:www\.)?twitch.tv\/([A-Za-z0-9]*)'),
41 def parser_hostmask(hostmask
):
42 if isinstance(hostmask
, dict):
49 if hostmask
is not None:
50 match
= hostmask_regex
.match(hostmask
)
65 class SmartRedirectHandler(urllib2
.HTTPRedirectHandler
):
66 def http_error_301(self
, req
, fp
, code
, msg
, headers
):
67 result
= urllib2
.HTTPRedirectHandler
.http_error_301(
68 self
, req
, fp
, code
, msg
, headers
)
72 def http_error_302(self
, req
, fp
, code
, msg
, headers
):
73 result
= urllib2
.HTTPRedirectHandler
.http_error_302(
74 self
, req
, fp
, code
, msg
, headers
)
78 @lib.hooknum("PRIVMSG")
79 def privmsg_hook(bot
, textline
):
80 user
= parser_hostmask(textline
[1:textline
.find(' ')])
81 chan
= textline
.split()[2]
84 line
= textline
.split(None, 3)[3][1:]
88 for match
in url_regex
.findall(line
):
90 if 'open.spotify.com' in match
or 'spotify:' in match
:
91 for r
in spotify_regex
:
92 for sptype
, track
in r
.findall(match
):
93 bot
.msg(chan
, gotspotify(sptype
, track
))
95 elif 'youtube.com' in match
or 'youtu.be' in match
:
96 for r
in youtube_regex
:
97 for url
in r
.findall(match
):
98 bot
.msg(chan
, gotyoutube(url
))
100 elif 'twitch.tv' in match
:
101 for r
in twitch_regex
:
102 for uri
in r
.findall(match
):
103 bot
.msg(chan
, gottwitch(uri
))
106 bot
.msg(chan
, goturl(match
))
109 return html_parser
.unescape(line
)
111 def gotspotify(type, track
):
112 url
= 'http://ws.spotify.com/lookup/1/?uri=spotify:%s:%s' % (type, track
)
113 xml
= urllib2
.urlopen(url
).read()
114 soup
= BeautifulSoup(xml
, convertEntities
=BeautifulSoup
.HTML_ENTITIES
)
115 lookup_type
= soup
.contents
[2].name
117 if lookup_type
== 'track':
118 name
= soup
.find('name').string
119 album_name
= soup
.find('album').find('name').string
120 artist_name
= soup
.find('artist').find('name').string
121 popularity
= soup
.find('popularity')
123 popularity
= float(popularity
.string
)*100
124 length
= float(soup
.find('length').string
)
125 minutes
= int(length
)/60
126 seconds
= int(length
)%60
128 return unescape('Track: %s - %s / %s %s:%.2d %2d%%' % (artist_name
, name
, album_name
, minutes
, seconds
, popularity
))
130 elif lookup_type
== 'album':
131 album_name
= soup
.find('album').find('name').string
132 artist_name
= soup
.find('artist').find('name').string
133 released
= soup
.find('released').string
134 return unescape('Album: %s - %s - %s' % (artist_name
, album_name
, released
))
137 return 'Unsupported type.'
140 url_data
= urlparse
.urlparse(url
)
141 query
= urlparse
.parse_qs(url_data
.query
)
142 video
= query
["v"][0]
143 api_url
= 'http://gdata.youtube.com/feeds/api/videos/%s?alt=json&v=2' % video
145 respdata
= urllib2
.urlopen(api_url
).read()
146 video_info
= json
.loads(respdata
)
148 title
= video_info
['entry']['title']["$t"]
149 author
= video_info
['entry']['author'][0]['name']['$t']
151 return unescape("Youtube: %s (%s)" % (title
, author
))
156 url
= 'http://api.justin.tv/api/stream/list.json?channel=%s' % uri
.split('/')[0]
157 respdata
= urllib2
.urlopen(url
).read()
158 twitch
= json
.loads(respdata
)
160 return unescape('Twitch: %s (%s playing %s)' % (twitch
[0]['channel']['status'], twitch
[0]['channel']['login'], twitch
[0]['channel']['meta_game']))
162 return 'Twitch: Channel offline.'
165 request
= urllib2
.Request(url
)
166 opener
= urllib2
.build_opener(SmartRedirectHandler())
168 soup
= BeautifulSoup(opener
.open(request
, timeout
=2))
169 return unescape('Title: %s' % (soup
.title
.string
))
171 return 'Invalid URL/Timeout'