]> jfr.im git - erebus.git/blobdiff - modules/urls.py
urls - fix big exception if AIA encounters an SSL error
[erebus.git] / modules / urls.py
index c1e27055a8ead6cd53f3a4a64e3768ab2d152e4a..fd21676e8f10be99e662f283a70bb91745368ccd 100644 (file)
@@ -248,11 +248,26 @@ def _do_request(url, try_aia=False):
                - and a flag indicating whether AIA was used
        """
        try:
-               request = urllib2.Request(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', 'Sec-Ch-Ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', 'Sec-Ch-Ua-Mobile': '?0', 'Sec-Ch-Ua-Platform': '"Linux"', 'Sec-Fetch-Dest': 'document', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'same-origin', 'Sec-Fetch-User': '?1', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache', 'Upgrade-Insecure-Requests': '1'})
+               request = urllib2.Request(url, headers={
+                       'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
+                       'Sec-Ch-Ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
+                       'Sec-Ch-Ua-Mobile': '?0',
+                       'Sec-Ch-Ua-Platform': '"Linux"',
+                       'Sec-Fetch-Dest': 'document',
+                       'Sec-Fetch-Mode': 'navigate',
+                       'Sec-Fetch-Site': 'same-origin',
+                       'Sec-Fetch-User': '?1',
+                       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+                       'Accept-Language': 'en-US,en;q=0.9',
+                       'Upgrade-Insecure-Requests': '1'
+               })
        except ValueError:
                return '', False
        if try_aia:
-               opener = urllib2.build_opener(urllib2.HTTPSHandler(context=aia_session.ssl_context_from_url(url)), SmartRedirectHandler())
+               try:
+                       opener = urllib2.build_opener(urllib2.HTTPSHandler(context=aia_session.ssl_context_from_url(url)), SmartRedirectHandler())
+               except aia.AIAError as e:
+                       return 'Request error: %s.%s: %s' % (e.__module__, e.__class__.__name__, e.args[0]), True
        else:
                opener = urllib2.build_opener(SmartRedirectHandler())
 
@@ -320,9 +335,12 @@ def goturl(url):
                except Exception as e:
                        output.append('Error reading response body: %s %r' % (type(e).__name__, e.args))
                else:
-                       if c_len is not None and len(responsebody) != int(c_len):
-                               output.append("[actual %s; Content-Length %s] " % (_humanize_bytes(len(responsebody)), _humanize_bytes(c_len)))
-                       else:
+                       if c_len is not None and len(responsebody) != int(c_len): # did we read a different amount than Content-Length?
+                               if response.read(1): # there's more data, we just aren't reading it
+                                       output.append("[read %s; Content-Length %s] " % (_humanize_bytes(len(responsebody)), _humanize_bytes(c_len)))
+                               else:
+                                       output.append("[actual %s; Content-Length %s] " % (_humanize_bytes(len(responsebody)), _humanize_bytes(c_len)))
+                       else: # Content-Length = amount read
                                output.append("[%s] " % (_humanize_bytes(len(responsebody))))
                        try:
                                soup = BeautifulSoup(responsebody, from_encoding=c_charset)
@@ -336,7 +354,7 @@ def goturl(url):
        return ''.join(output)
 
 url_regex = (
-       re.compile(r'https?://(?:[^/\s.]+\.)+[^/\s.]+(?:/\S+)?'),
+       re.compile(r'https?://(?:[^/\s.]+\.)+[a-z0-9-]+(?:/[^\s\]>)}]+)?', re.I),
 )
 other_regexes = (
        (lambda x: '', (re.compile(r"""https?://(?:www\.)?(?:twitter|x)\.com/""", re.I),)), # skip twitter