]> jfr.im git - erebus.git/commitdiff
mv COPYING LICENSE and fix youtube module
authorJohn Runyon <redacted>
Wed, 5 Feb 2014 21:02:14 +0000 (15:02 -0600)
committerJohn Runyon <redacted>
Wed, 5 Feb 2014 21:02:14 +0000 (15:02 -0600)
LICENSE [moved from COPYING with 100% similarity]
modules/youtube.py

diff --git a/COPYING b/LICENSE
similarity index 100%
rename from COPYING
rename to LICENSE
index c40bf177a6ee32b8e6f6ac92db307c321b202cff..9e701707d8b0579ba56e74550e5a04bc0a0790c1 100644 (file)
@@ -25,8 +25,7 @@ import HTMLParser
 from BeautifulSoup import BeautifulSoup
 
 checkfor = "youtube"
-url_regex = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
-yturl_regex = re.compile('(http|ftp|https):\/\/([\w\-_]+(?:(?:\.[\w\-_]+)+))([\w\-\.,@?^=%&amp;:/~\+#]*[\w\-\@?^=%&amp;/~\+#])?')
+yturl_regex = re.compile(r'https?://(?:www\.)?youtube\.com/watch\?[a-zA-Z0-9=&]+')
 
 @lib.hooknum("PRIVMSG")
 def privmsg_hook(bot, line):
@@ -38,19 +37,18 @@ def privmsg_hook(bot, line):
        if checkfor not in line:
                return # doesn't concern us
 
-       for url in url_regex.findall(linetx):
-               if checkfor in url:
-                       url_data = urlparse.urlparse(url)
-                       query = urlparse.parse_qs(url_data.query)
-                       video = query["v"][0]
-                       api_url = 'http://gdata.youtube.com/feeds/api/videos/%s?alt=json&v=2' % video
-                       try:
-                               respdata = urllib2.urlopen(api_url).read()
-                               video_info = json.loads(respdata)
-
-                               title = video_info['entry']['title']["$t"]
-                               author = video_info['entry']['author'][0]['name']['$t']
-
-                               bot.msg(line.split()[2], "Youtube: %s (%s)" % (title, author))
-                       except:
-                               pass
+       for url in yturl_regex.findall(linetx):
+               url_data = urlparse.urlparse(url)
+               query = urlparse.parse_qs(url_data.query)
+               video = query["v"][0]
+               api_url = 'http://gdata.youtube.com/feeds/api/videos/%s?alt=json&v=2' % video
+               try:
+                       respdata = urllib2.urlopen(api_url).read()
+                       video_info = json.loads(respdata)
+
+                       title = video_info['entry']['title']["$t"]
+                       author = video_info['entry']['author'][0]['name']['$t']
+
+                       bot.msg(line.split()[2], "Youtube: %s (%s)" % (title, author))
+               except:
+                       pass