responses = []
num_found = 0
limit = lib.parent.cfg.getint('urls', 'limit', 2)
- for action, group, prefix in regexes:
+ for action, group in regexes:
for regex in group:
for match in regex.findall(line):
if match:
resp = action(match)
else:
resp = action(*match)
- if resp is not None:
- responses.append("%s: %s" % (prefix, resp))
+ if resp is not None and resp != "":
+ responses.append(resp)
return responses
@lib.hooknum("PRIVMSG")
except:
return 'Channel offline.'
+def _humanize_bytes(b):
+ b = int(b)
+ i = 0
+ table = " kMGTPEZYRQ"
+ while b > 1024:
+ i += 1
+ b /= 1024.0
+ if i == 0:
+ return "%dB" % (b)
+ else:
+ return "%.2f%siB" % (b, table[i])
+
def goturl(url):
- for _, group, _ in other_regexes:
+ output = []
+ for _, group in other_regexes:
for regex in group:
if regex.match(url):
return None
request = urllib2.Request(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'})
opener = urllib2.build_opener(SmartRedirectHandler())
+
+ # Send request and handle errors
try:
- soup = BeautifulSoup(opener.open(request, timeout=2))
- if soup.title:
- return unescape('%s' % (soup.title.string))
- else:
- return None
+ response = opener.open(request, timeout=2)
except urllib2.HTTPError as e:
- return 'Error: %s %s' % (e.code, e.reason)
+ return 'Request error: %s %s' % (e.code, e.reason)
except urllib2.URLError as e:
- return 'Error: %s' % (e.reason)
+ return 'Request error: %s' % (e.reason)
except TimeoutError as e:
- return 'Error: request timed out'
+ return 'Request error: request timed out'
except Exception as e:
- return 'Error: %s %r' % (type(e).__name__, e.args)
+ return 'Unknown error: %s %r' % (type(e).__name__, e.args)
+
+ # Try to add type and length headers to reply
+ c_type = response.getheader('Content-Type', '').split(';', 1)[0]
+ c_len = response.getheader('Content-Length')
+ if c_type != '':
+ output.append("[%s] " % (c_type))
+ else:
+ output.append("[no type] ")
+ if c_type != "text/html": # else length will be provided by HTML code below
+ if c_len is not None:
+ output.append("[%s] " % (_humanize_bytes(c_len)))
+ else:
+ output.append("[no length] ")
+
+ # Try to add title if HTML
+ if c_type == 'text/html':
+ try:
+ responsebody = response.read(1024*1024)
+ print(type(responsebody))
+ except Exception as e:
+ output.append('Error reading response body: %s %r' % (type(e).__name__, e.args))
+ else:
+ if c_len is not None and len(responsebody) != int(c_len):
+ output.append("[actual %s; Content-Length %s] " % (_humanize_bytes(len(responsebody)), _humanize_bytes(c_len)))
+ else:
+ output.append("[%s] " % (_humanize_bytes(len(responsebody))))
+ try:
+ soup = BeautifulSoup(responsebody)
+ if soup.title:
+ output.append('Title: ' + unescape('%s' % (soup.find('title').string)))
+ else:
+ output.append('No title')
+ except Exception as e:
+ output.append('Title error: %s %r ' % (type(e).__name__, e.args))
+
+ return ''.join(output)
url_regex = (
- re.compile(r'https?://[^/\s]+\.[^/\s]+(?:/\S+)?'),
+ re.compile(r'https?://(?:[^/\s.]+\.)+[^/\s.]+(?:/\S+)?'),
)
other_regexes = (
)
regexes = other_regexes + (
- (goturl, url_regex, 'Title'),
+ (goturl, url_regex),
)