+ return 'Request error: %s %s' % (e.code, e.reason), False
+ except urllib2.URLError as e:
+ if "certificate verify failed: unable to get local issuer certificate" in str(e.reason):
+ if aia: # Retry with AIA enabled, if module is present
+ return _do_request(url, True)
+ else:
+ lib.parent.log('urls', '?', 'If the site is not serving the certificate chain, installing the aia library might make this request work: pip install aia')
+ return 'Request error: site may have broken TLS configuration (%s)' % (e.reason), False
+ else:
+ return 'Request error: %s' % (e.reason), False
+ except TimeoutError as e:
+ return 'Request error: request timed out', False
+ except Exception as e:
+ return 'Unknown error: %s %r' % (type(e).__name__, e.args), False
+
+ return response, try_aia
+
+
+def goturl(url):
+ output = []
+ for _, group in other_regexes:
+ for regex in group:
+ if regex.match(url):
+ return None
+
+ response, used_aia = _do_request(url)
+ if isinstance(response, stringbase):
+ return response
+
+ # Try to add type and length headers to reply
+ c_type_fields = response.getheader('Content-Type', '').split(';')
+ c_type = c_type_fields.pop(0)
+ c_charset = None
+ for f in c_type_fields:
+ f = f.strip()
+ if len(f) > 8 and f[0:8] == 'charset=':
+ c_charset = f[8:]
+ c_len = response.getheader('Content-Length')
+ if c_type != '':
+ output.append("[%s] " % (c_type))
+ else:
+ output.append("[no type] ")
+ if c_type != "text/html": # else length will be provided by HTML code below
+ if c_len is not None:
+ output.append("[%s] " % (_humanize_bytes(c_len)))
+ else:
+ output.append("[no length] ")
+
+ if used_aia:
+ output.append("[AIA] ")
+
+ # Try to add title if HTML
+ if c_type == 'text/html':
+ try:
+ responsebody = response.read(1024*1024)
+ except Exception as e:
+ output.append('Error reading response body: %s %r' % (type(e).__name__, e.args))
+ else:
+ if c_len is not None and len(responsebody) != int(c_len): # did we read a different amount than Content-Length?
+ if response.read(1): # there's more data, we just aren't reading it
+ output.append("[read %s; Content-Length %s] " % (_humanize_bytes(len(responsebody)), _humanize_bytes(c_len)))
+ else:
+ output.append("[actual %s; Content-Length %s] " % (_humanize_bytes(len(responsebody)), _humanize_bytes(c_len)))
+ else: # Content-Length = amount read
+ output.append("[%s] " % (_humanize_bytes(len(responsebody))))
+ try:
+ soup = BeautifulSoup(responsebody, from_encoding=c_charset)
+ if soup.title:
+ output.append('Title: ' + unescape('%s' % (soup.find('title').string.strip())))
+ else:
+ output.append('No title')
+ except Exception as e:
+ output.append('Title error: %s %r ' % (type(e).__name__, e.args))
+
+ return ''.join(output)
+
+url_regex = (
+ re.compile(r'https?://(?:[^/\s.]+\.)+[a-z0-9-]+(?::\d{1,5})?(?:/[^\s\]>)}]+)?', re.I),
+)
+other_regexes = (
+ (lambda x: '', (re.compile(r"""https?://(?:www\.)?(?:twitter|x)\.com/""", re.I),)), # skip twitter
+ (lambda x: '', (re.compile(r"""https?://(?:www\.)?reddit\.com/""", re.I),)), # skip new-reddit
+ (lambda x: '', (re.compile(r"""https?://jfr\.im/git/""", re.I),)), # skip my gitweb
+ (lambda x: '', (re.compile(r"""https?://(?:www\.)?wunderground\.com/""", re.I),)), # skip wunderground, they time us out
+)
+regexes = other_regexes + (
+ (goturl, url_regex),
+)