]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/utils/networking.py
[networking] Remove dot segments during URL normalization (#7662)
[yt-dlp.git] / yt_dlp / utils / networking.py
index 95b54fabef393dfebd7d3c02b8804fd738ade7b3..bbcea84d2c0b20ae4457334a4eff6463b90aff36 100644 (file)
@@ -1,4 +1,9 @@
+import collections
 import random
+import urllib.parse
+import urllib.request
+
+from ._utils import remove_start
 
 
 def random_user_agent():
@@ -46,15 +51,111 @@ def random_user_agent():
     return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
 
 
-std_headers = {
+class HTTPHeaderDict(collections.UserDict, dict):
+    """
+    Store and access keys case-insensitively.
+    The constructor can take multiple dicts, in which keys in the latter are prioritised.
+    """
+
+    def __init__(self, *args, **kwargs):
+        super().__init__()
+        for dct in args:
+            if dct is not None:
+                self.update(dct)
+        self.update(kwargs)
+
+    def __setitem__(self, key, value):
+        super().__setitem__(key.title(), str(value))
+
+    def __getitem__(self, key):
+        return super().__getitem__(key.title())
+
+    def __delitem__(self, key):
+        super().__delitem__(key.title())
+
+    def __contains__(self, key):
+        return super().__contains__(key.title() if isinstance(key, str) else key)
+
+
+std_headers = HTTPHeaderDict({
     'User-Agent': random_user_agent(),
     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
     'Accept-Language': 'en-us,en;q=0.5',
     'Sec-Fetch-Mode': 'navigate',
-}
+})
+
 
+def clean_proxies(proxies: dict, headers: HTTPHeaderDict):
+    req_proxy = headers.pop('Ytdl-Request-Proxy', None)
+    if req_proxy:
+        proxies.clear()  # XXX: compat: Ytdl-Request-Proxy takes preference over everything, including NO_PROXY
+        proxies['all'] = req_proxy
+    for proxy_key, proxy_url in proxies.items():
+        if proxy_url == '__noproxy__':
+            proxies[proxy_key] = None
+            continue
+        if proxy_key == 'no':  # special case
+            continue
+        if proxy_url is not None:
+            # Ensure proxies without a scheme are http.
+            try:
+                proxy_scheme = urllib.request._parse_proxy(proxy_url)[0]
+            except ValueError:
+                # Ignore invalid proxy URLs. Sometimes these may be introduced through environment
+                # variables unrelated to proxy settings - e.g. Colab `COLAB_LANGUAGE_SERVER_PROXY`.
+                # If the proxy is going to be used, the Request Handler proxy validation will handle it.
+                continue
+            if proxy_scheme is None:
+                proxies[proxy_key] = 'http://' + remove_start(proxy_url, '//')
 
-def clean_headers(headers):
-    if 'Youtubedl-no-compression' in headers:  # compat
-        del headers['Youtubedl-no-compression']
+            replace_scheme = {
+                'socks5': 'socks5h',  # compat: socks5 was treated as socks5h
+                'socks': 'socks4'  # compat: non-standard
+            }
+            if proxy_scheme in replace_scheme:
+                proxies[proxy_key] = urllib.parse.urlunparse(
+                    urllib.parse.urlparse(proxy_url)._replace(scheme=replace_scheme[proxy_scheme]))
+
+
+def clean_headers(headers: HTTPHeaderDict):
+    if 'Youtubedl-No-Compression' in headers:  # compat
+        del headers['Youtubedl-No-Compression']
         headers['Accept-Encoding'] = 'identity'
+
+
+def remove_dot_segments(path):
+    # Implements RFC3986 5.2.4 remote_dot_segments
+    # Pseudo-code: https://tools.ietf.org/html/rfc3986#section-5.2.4
+    # https://github.com/urllib3/urllib3/blob/ba49f5c4e19e6bca6827282feb77a3c9f937e64b/src/urllib3/util/url.py#L263
+    output = []
+    segments = path.split('/')
+    for s in segments:
+        if s == '.':
+            continue
+        elif s == '..':
+            if output:
+                output.pop()
+        else:
+            output.append(s)
+    if not segments[0] and (not output or output[0]):
+        output.insert(0, '')
+    if segments[-1] in ('.', '..'):
+        output.append('')
+    return '/'.join(output)
+
+
+def escape_rfc3986(s):
+    """Escape non-ASCII characters as suggested by RFC 3986"""
+    return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
+
+
+def normalize_url(url):
+    """Normalize URL as suggested by RFC 3986"""
+    url_parsed = urllib.parse.urlparse(url)
+    return url_parsed._replace(
+        netloc=url_parsed.netloc.encode('idna').decode('ascii'),
+        path=escape_rfc3986(remove_dot_segments(url_parsed.path)),
+        params=escape_rfc3986(url_parsed.params),
+        query=escape_rfc3986(url_parsed.query),
+        fragment=escape_rfc3986(url_parsed.fragment)
+    ).geturl()