]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/utils.py
[cleanup] Misc fixes
[yt-dlp.git] / yt_dlp / utils.py
index eba89fb8bc3473781eda308154464d57dc5e96f8..00721eb4673c7ab67cbd63e41c4c34fb6bd5c177 100644 (file)
@@ -1,8 +1,5 @@
 #!/usr/bin/env python3
 #!/usr/bin/env python3
-# coding: utf-8
-
-from __future__ import unicode_literals
-
+import atexit
 import base64
 import binascii
 import calendar
 import base64
 import binascii
 import calendar
 import contextlib
 import ctypes
 import datetime
 import contextlib
 import ctypes
 import datetime
-import email.utils
 import email.header
 import email.header
+import email.utils
 import errno
 import errno
-import functools
 import gzip
 import hashlib
 import hmac
 import gzip
 import hashlib
 import hmac
-import imp
+import importlib.util
 import io
 import itertools
 import json
 import locale
 import math
 import io
 import itertools
 import json
 import locale
 import math
+import mimetypes
 import operator
 import os
 import platform
 import random
 import re
 import operator
 import os
 import platform
 import random
 import re
+import shlex
 import socket
 import ssl
 import subprocess
 import socket
 import ssl
 import subprocess
 import tempfile
 import time
 import traceback
 import tempfile
 import time
 import traceback
+import urllib.parse
 import xml.etree.ElementTree
 import zlib
 
 import xml.etree.ElementTree
 import zlib
 
+from .compat import asyncio, functools  # isort: split
 from .compat import (
 from .compat import (
-    compat_HTMLParseError,
-    compat_HTMLParser,
-    compat_HTTPError,
-    compat_basestring,
     compat_chr,
     compat_cookiejar,
     compat_chr,
     compat_cookiejar,
-    compat_ctypes_WINFUNCTYPE,
     compat_etree_fromstring,
     compat_expanduser,
     compat_html_entities,
     compat_html_entities_html5,
     compat_etree_fromstring,
     compat_expanduser,
     compat_html_entities,
     compat_html_entities_html5,
+    compat_HTMLParseError,
+    compat_HTMLParser,
     compat_http_client,
     compat_http_client,
-    compat_integer_types,
-    compat_numeric_types,
-    compat_kwargs,
+    compat_HTTPError,
     compat_os_name,
     compat_parse_qs,
     compat_shlex_quote,
     compat_os_name,
     compat_parse_qs,
     compat_shlex_quote,
     compat_struct_pack,
     compat_struct_unpack,
     compat_urllib_error,
     compat_struct_pack,
     compat_struct_unpack,
     compat_urllib_error,
-    compat_urllib_parse,
+    compat_urllib_parse_unquote_plus,
     compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
     compat_urllib_parse_urlencode,
     compat_urllib_parse_urlparse,
-    compat_urllib_parse_urlunparse,
-    compat_urllib_parse_quote,
-    compat_urllib_parse_quote_plus,
-    compat_urllib_parse_unquote_plus,
     compat_urllib_request,
     compat_urlparse,
     compat_urllib_request,
     compat_urlparse,
-    compat_xpath,
-)
-
-from .socks import (
-    ProxyType,
-    sockssocket,
 )
 )
+from .dependencies import brotli, certifi, websockets
+from .socks import ProxyType, sockssocket
 
 
 def register_socks_protocols():
 
 
 def register_socks_protocols():
@@ -96,1592 +83,59 @@ def register_socks_protocols():
 def random_user_agent():
     _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
     _CHROME_VERSIONS = (
 def random_user_agent():
     _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
     _CHROME_VERSIONS = (
-        '74.0.3729.129',
-        '76.0.3780.3',
-        '76.0.3780.2',
-        '74.0.3729.128',
-        '76.0.3780.1',
-        '76.0.3780.0',
-        '75.0.3770.15',
-        '74.0.3729.127',
-        '74.0.3729.126',
-        '76.0.3779.1',
-        '76.0.3779.0',
-        '75.0.3770.14',
-        '74.0.3729.125',
-        '76.0.3778.1',
-        '76.0.3778.0',
-        '75.0.3770.13',
-        '74.0.3729.124',
-        '74.0.3729.123',
-        '73.0.3683.121',
-        '76.0.3777.1',
-        '76.0.3777.0',
-        '75.0.3770.12',
-        '74.0.3729.122',
-        '76.0.3776.4',
-        '75.0.3770.11',
-        '74.0.3729.121',
-        '76.0.3776.3',
-        '76.0.3776.2',
-        '73.0.3683.120',
-        '74.0.3729.120',
-        '74.0.3729.119',
-        '74.0.3729.118',
-        '76.0.3776.1',
-        '76.0.3776.0',
-        '76.0.3775.5',
-        '75.0.3770.10',
-        '74.0.3729.117',
-        '76.0.3775.4',
-        '76.0.3775.3',
-        '74.0.3729.116',
-        '75.0.3770.9',
-        '76.0.3775.2',
-        '76.0.3775.1',
-        '76.0.3775.0',
-        '75.0.3770.8',
-        '74.0.3729.115',
-        '74.0.3729.114',
-        '76.0.3774.1',
-        '76.0.3774.0',
-        '75.0.3770.7',
-        '74.0.3729.113',
-        '74.0.3729.112',
-        '74.0.3729.111',
-        '76.0.3773.1',
-        '76.0.3773.0',
-        '75.0.3770.6',
-        '74.0.3729.110',
-        '74.0.3729.109',
-        '76.0.3772.1',
-        '76.0.3772.0',
-        '75.0.3770.5',
-        '74.0.3729.108',
-        '74.0.3729.107',
-        '76.0.3771.1',
-        '76.0.3771.0',
-        '75.0.3770.4',
-        '74.0.3729.106',
-        '74.0.3729.105',
-        '75.0.3770.3',
-        '74.0.3729.104',
-        '74.0.3729.103',
-        '74.0.3729.102',
-        '75.0.3770.2',
-        '74.0.3729.101',
-        '75.0.3770.1',
-        '75.0.3770.0',
-        '74.0.3729.100',
-        '75.0.3769.5',
-        '75.0.3769.4',
-        '74.0.3729.99',
-        '75.0.3769.3',
-        '75.0.3769.2',
-        '75.0.3768.6',
-        '74.0.3729.98',
-        '75.0.3769.1',
-        '75.0.3769.0',
-        '74.0.3729.97',
-        '73.0.3683.119',
-        '73.0.3683.118',
-        '74.0.3729.96',
-        '75.0.3768.5',
-        '75.0.3768.4',
-        '75.0.3768.3',
-        '75.0.3768.2',
-        '74.0.3729.95',
-        '74.0.3729.94',
-        '75.0.3768.1',
-        '75.0.3768.0',
-        '74.0.3729.93',
-        '74.0.3729.92',
-        '73.0.3683.117',
-        '74.0.3729.91',
-        '75.0.3766.3',
-        '74.0.3729.90',
-        '75.0.3767.2',
-        '75.0.3767.1',
-        '75.0.3767.0',
-        '74.0.3729.89',
-        '73.0.3683.116',
-        '75.0.3766.2',
-        '74.0.3729.88',
-        '75.0.3766.1',
-        '75.0.3766.0',
-        '74.0.3729.87',
-        '73.0.3683.115',
-        '74.0.3729.86',
-        '75.0.3765.1',
-        '75.0.3765.0',
-        '74.0.3729.85',
-        '73.0.3683.114',
-        '74.0.3729.84',
-        '75.0.3764.1',
-        '75.0.3764.0',
-        '74.0.3729.83',
-        '73.0.3683.113',
-        '75.0.3763.2',
-        '75.0.3761.4',
-        '74.0.3729.82',
-        '75.0.3763.1',
-        '75.0.3763.0',
-        '74.0.3729.81',
-        '73.0.3683.112',
-        '75.0.3762.1',
-        '75.0.3762.0',
-        '74.0.3729.80',
-        '75.0.3761.3',
-        '74.0.3729.79',
-        '73.0.3683.111',
-        '75.0.3761.2',
-        '74.0.3729.78',
-        '74.0.3729.77',
-        '75.0.3761.1',
-        '75.0.3761.0',
-        '73.0.3683.110',
-        '74.0.3729.76',
-        '74.0.3729.75',
-        '75.0.3760.0',
-        '74.0.3729.74',
-        '75.0.3759.8',
-        '75.0.3759.7',
-        '75.0.3759.6',
-        '74.0.3729.73',
-        '75.0.3759.5',
-        '74.0.3729.72',
-        '73.0.3683.109',
-        '75.0.3759.4',
-        '75.0.3759.3',
-        '74.0.3729.71',
-        '75.0.3759.2',
-        '74.0.3729.70',
-        '73.0.3683.108',
-        '74.0.3729.69',
-        '75.0.3759.1',
-        '75.0.3759.0',
-        '74.0.3729.68',
-        '73.0.3683.107',
-        '74.0.3729.67',
-        '75.0.3758.1',
-        '75.0.3758.0',
-        '74.0.3729.66',
-        '73.0.3683.106',
-        '74.0.3729.65',
-        '75.0.3757.1',
-        '75.0.3757.0',
-        '74.0.3729.64',
-        '73.0.3683.105',
-        '74.0.3729.63',
-        '75.0.3756.1',
-        '75.0.3756.0',
-        '74.0.3729.62',
-        '73.0.3683.104',
-        '75.0.3755.3',
-        '75.0.3755.2',
-        '73.0.3683.103',
-        '75.0.3755.1',
-        '75.0.3755.0',
-        '74.0.3729.61',
-        '73.0.3683.102',
-        '74.0.3729.60',
-        '75.0.3754.2',
-        '74.0.3729.59',
-        '75.0.3753.4',
-        '74.0.3729.58',
-        '75.0.3754.1',
-        '75.0.3754.0',
-        '74.0.3729.57',
-        '73.0.3683.101',
-        '75.0.3753.3',
-        '75.0.3752.2',
-        '75.0.3753.2',
-        '74.0.3729.56',
-        '75.0.3753.1',
-        '75.0.3753.0',
-        '74.0.3729.55',
-        '73.0.3683.100',
-        '74.0.3729.54',
-        '75.0.3752.1',
-        '75.0.3752.0',
-        '74.0.3729.53',
-        '73.0.3683.99',
-        '74.0.3729.52',
-        '75.0.3751.1',
-        '75.0.3751.0',
-        '74.0.3729.51',
-        '73.0.3683.98',
-        '74.0.3729.50',
-        '75.0.3750.0',
-        '74.0.3729.49',
-        '74.0.3729.48',
-        '74.0.3729.47',
-        '75.0.3749.3',
-        '74.0.3729.46',
-        '73.0.3683.97',
-        '75.0.3749.2',
-        '74.0.3729.45',
-        '75.0.3749.1',
-        '75.0.3749.0',
-        '74.0.3729.44',
-        '73.0.3683.96',
-        '74.0.3729.43',
-        '74.0.3729.42',
-        '75.0.3748.1',
-        '75.0.3748.0',
-        '74.0.3729.41',
-        '75.0.3747.1',
-        '73.0.3683.95',
-        '75.0.3746.4',
-        '74.0.3729.40',
-        '74.0.3729.39',
-        '75.0.3747.0',
-        '75.0.3746.3',
-        '75.0.3746.2',
-        '74.0.3729.38',
-        '75.0.3746.1',
-        '75.0.3746.0',
-        '74.0.3729.37',
-        '73.0.3683.94',
-        '75.0.3745.5',
-        '75.0.3745.4',
-        '75.0.3745.3',
-        '75.0.3745.2',
-        '74.0.3729.36',
-        '75.0.3745.1',
-        '75.0.3745.0',
-        '75.0.3744.2',
-        '74.0.3729.35',
-        '73.0.3683.93',
-        '74.0.3729.34',
-        '75.0.3744.1',
-        '75.0.3744.0',
-        '74.0.3729.33',
-        '73.0.3683.92',
-        '74.0.3729.32',
-        '74.0.3729.31',
-        '73.0.3683.91',
-        '75.0.3741.2',
-        '75.0.3740.5',
-        '74.0.3729.30',
-        '75.0.3741.1',
-        '75.0.3741.0',
-        '74.0.3729.29',
-        '75.0.3740.4',
-        '73.0.3683.90',
-        '74.0.3729.28',
-        '75.0.3740.3',
-        '73.0.3683.89',
-        '75.0.3740.2',
-        '74.0.3729.27',
-        '75.0.3740.1',
-        '75.0.3740.0',
-        '74.0.3729.26',
-        '73.0.3683.88',
-        '73.0.3683.87',
-        '74.0.3729.25',
-        '75.0.3739.1',
-        '75.0.3739.0',
-        '73.0.3683.86',
-        '74.0.3729.24',
-        '73.0.3683.85',
-        '75.0.3738.4',
-        '75.0.3738.3',
-        '75.0.3738.2',
-        '75.0.3738.1',
-        '75.0.3738.0',
-        '74.0.3729.23',
-        '73.0.3683.84',
-        '74.0.3729.22',
-        '74.0.3729.21',
-        '75.0.3737.1',
-        '75.0.3737.0',
-        '74.0.3729.20',
-        '73.0.3683.83',
-        '74.0.3729.19',
-        '75.0.3736.1',
-        '75.0.3736.0',
-        '74.0.3729.18',
-        '73.0.3683.82',
-        '74.0.3729.17',
-        '75.0.3735.1',
-        '75.0.3735.0',
-        '74.0.3729.16',
-        '73.0.3683.81',
-        '75.0.3734.1',
-        '75.0.3734.0',
-        '74.0.3729.15',
-        '73.0.3683.80',
-        '74.0.3729.14',
-        '75.0.3733.1',
-        '75.0.3733.0',
-        '75.0.3732.1',
-        '74.0.3729.13',
-        '74.0.3729.12',
-        '73.0.3683.79',
-        '74.0.3729.11',
-        '75.0.3732.0',
-        '74.0.3729.10',
-        '73.0.3683.78',
-        '74.0.3729.9',
-        '74.0.3729.8',
-        '74.0.3729.7',
-        '75.0.3731.3',
-        '75.0.3731.2',
-        '75.0.3731.0',
-        '74.0.3729.6',
-        '73.0.3683.77',
-        '73.0.3683.76',
-        '75.0.3730.5',
-        '75.0.3730.4',
-        '73.0.3683.75',
-        '74.0.3729.5',
-        '73.0.3683.74',
-        '75.0.3730.3',
-        '75.0.3730.2',
-        '74.0.3729.4',
-        '73.0.3683.73',
-        '73.0.3683.72',
-        '75.0.3730.1',
-        '75.0.3730.0',
-        '74.0.3729.3',
-        '73.0.3683.71',
-        '74.0.3729.2',
-        '73.0.3683.70',
-        '74.0.3729.1',
-        '74.0.3729.0',
-        '74.0.3726.4',
-        '73.0.3683.69',
-        '74.0.3726.3',
-        '74.0.3728.0',
-        '74.0.3726.2',
-        '73.0.3683.68',
-        '74.0.3726.1',
-        '74.0.3726.0',
-        '74.0.3725.4',
-        '73.0.3683.67',
-        '73.0.3683.66',
-        '74.0.3725.3',
-        '74.0.3725.2',
-        '74.0.3725.1',
-        '74.0.3724.8',
-        '74.0.3725.0',
-        '73.0.3683.65',
-        '74.0.3724.7',
-        '74.0.3724.6',
-        '74.0.3724.5',
-        '74.0.3724.4',
-        '74.0.3724.3',
-        '74.0.3724.2',
-        '74.0.3724.1',
-        '74.0.3724.0',
-        '73.0.3683.64',
-        '74.0.3723.1',
-        '74.0.3723.0',
-        '73.0.3683.63',
-        '74.0.3722.1',
-        '74.0.3722.0',
-        '73.0.3683.62',
-        '74.0.3718.9',
-        '74.0.3702.3',
-        '74.0.3721.3',
-        '74.0.3721.2',
-        '74.0.3721.1',
-        '74.0.3721.0',
-        '74.0.3720.6',
-        '73.0.3683.61',
-        '72.0.3626.122',
-        '73.0.3683.60',
-        '74.0.3720.5',
-        '72.0.3626.121',
-        '74.0.3718.8',
-        '74.0.3720.4',
-        '74.0.3720.3',
-        '74.0.3718.7',
-        '74.0.3720.2',
-        '74.0.3720.1',
-        '74.0.3720.0',
-        '74.0.3718.6',
-        '74.0.3719.5',
-        '73.0.3683.59',
-        '74.0.3718.5',
-        '74.0.3718.4',
-        '74.0.3719.4',
-        '74.0.3719.3',
-        '74.0.3719.2',
-        '74.0.3719.1',
-        '73.0.3683.58',
-        '74.0.3719.0',
-        '73.0.3683.57',
-        '73.0.3683.56',
-        '74.0.3718.3',
-        '73.0.3683.55',
-        '74.0.3718.2',
-        '74.0.3718.1',
-        '74.0.3718.0',
-        '73.0.3683.54',
-        '74.0.3717.2',
-        '73.0.3683.53',
-        '74.0.3717.1',
-        '74.0.3717.0',
-        '73.0.3683.52',
-        '74.0.3716.1',
-        '74.0.3716.0',
-        '73.0.3683.51',
-        '74.0.3715.1',
-        '74.0.3715.0',
-        '73.0.3683.50',
-        '74.0.3711.2',
-        '74.0.3714.2',
-        '74.0.3713.3',
-        '74.0.3714.1',
-        '74.0.3714.0',
-        '73.0.3683.49',
-        '74.0.3713.1',
-        '74.0.3713.0',
-        '72.0.3626.120',
-        '73.0.3683.48',
-        '74.0.3712.2',
-        '74.0.3712.1',
-        '74.0.3712.0',
-        '73.0.3683.47',
-        '72.0.3626.119',
-        '73.0.3683.46',
-        '74.0.3710.2',
-        '72.0.3626.118',
-        '74.0.3711.1',
-        '74.0.3711.0',
-        '73.0.3683.45',
-        '72.0.3626.117',
-        '74.0.3710.1',
-        '74.0.3710.0',
-        '73.0.3683.44',
-        '72.0.3626.116',
-        '74.0.3709.1',
-        '74.0.3709.0',
-        '74.0.3704.9',
-        '73.0.3683.43',
-        '72.0.3626.115',
-        '74.0.3704.8',
-        '74.0.3704.7',
-        '74.0.3708.0',
-        '74.0.3706.7',
-        '74.0.3704.6',
-        '73.0.3683.42',
-        '72.0.3626.114',
-        '74.0.3706.6',
-        '72.0.3626.113',
-        '74.0.3704.5',
-        '74.0.3706.5',
-        '74.0.3706.4',
-        '74.0.3706.3',
-        '74.0.3706.2',
-        '74.0.3706.1',
-        '74.0.3706.0',
-        '73.0.3683.41',
-        '72.0.3626.112',
-        '74.0.3705.1',
-        '74.0.3705.0',
-        '73.0.3683.40',
-        '72.0.3626.111',
-        '73.0.3683.39',
-        '74.0.3704.4',
-        '73.0.3683.38',
-        '74.0.3704.3',
-        '74.0.3704.2',
-        '74.0.3704.1',
-        '74.0.3704.0',
-        '73.0.3683.37',
-        '72.0.3626.110',
-        '72.0.3626.109',
-        '74.0.3703.3',
-        '74.0.3703.2',
-        '73.0.3683.36',
-        '74.0.3703.1',
-        '74.0.3703.0',
-        '73.0.3683.35',
-        '72.0.3626.108',
-        '74.0.3702.2',
-        '74.0.3699.3',
-        '74.0.3702.1',
-        '74.0.3702.0',
-        '73.0.3683.34',
-        '72.0.3626.107',
-        '73.0.3683.33',
-        '74.0.3701.1',
-        '74.0.3701.0',
-        '73.0.3683.32',
-        '73.0.3683.31',
-        '72.0.3626.105',
-        '74.0.3700.1',
-        '74.0.3700.0',
-        '73.0.3683.29',
-        '72.0.3626.103',
-        '74.0.3699.2',
-        '74.0.3699.1',
-        '74.0.3699.0',
-        '73.0.3683.28',
-        '72.0.3626.102',
-        '73.0.3683.27',
-        '73.0.3683.26',
-        '74.0.3698.0',
-        '74.0.3696.2',
-        '72.0.3626.101',
-        '73.0.3683.25',
-        '74.0.3696.1',
-        '74.0.3696.0',
-        '74.0.3694.8',
-        '72.0.3626.100',
-        '74.0.3694.7',
-        '74.0.3694.6',
-        '74.0.3694.5',
-        '74.0.3694.4',
-        '72.0.3626.99',
-        '72.0.3626.98',
-        '74.0.3694.3',
-        '73.0.3683.24',
-        '72.0.3626.97',
-        '72.0.3626.96',
-        '72.0.3626.95',
-        '73.0.3683.23',
-        '72.0.3626.94',
-        '73.0.3683.22',
-        '73.0.3683.21',
-        '72.0.3626.93',
-        '74.0.3694.2',
-        '72.0.3626.92',
-        '74.0.3694.1',
-        '74.0.3694.0',
-        '74.0.3693.6',
-        '73.0.3683.20',
-        '72.0.3626.91',
-        '74.0.3693.5',
-        '74.0.3693.4',
-        '74.0.3693.3',
-        '74.0.3693.2',
-        '73.0.3683.19',
-        '74.0.3693.1',
-        '74.0.3693.0',
-        '73.0.3683.18',
-        '72.0.3626.90',
-        '74.0.3692.1',
-        '74.0.3692.0',
-        '73.0.3683.17',
-        '72.0.3626.89',
-        '74.0.3687.3',
-        '74.0.3691.1',
-        '74.0.3691.0',
-        '73.0.3683.16',
-        '72.0.3626.88',
-        '72.0.3626.87',
-        '73.0.3683.15',
-        '74.0.3690.1',
-        '74.0.3690.0',
-        '73.0.3683.14',
-        '72.0.3626.86',
-        '73.0.3683.13',
-        '73.0.3683.12',
-        '74.0.3689.1',
-        '74.0.3689.0',
-        '73.0.3683.11',
-        '72.0.3626.85',
-        '73.0.3683.10',
-        '72.0.3626.84',
-        '73.0.3683.9',
-        '74.0.3688.1',
-        '74.0.3688.0',
-        '73.0.3683.8',
-        '72.0.3626.83',
-        '74.0.3687.2',
-        '74.0.3687.1',
-        '74.0.3687.0',
-        '73.0.3683.7',
-        '72.0.3626.82',
-        '74.0.3686.4',
-        '72.0.3626.81',
-        '74.0.3686.3',
-        '74.0.3686.2',
-        '74.0.3686.1',
-        '74.0.3686.0',
-        '73.0.3683.6',
-        '72.0.3626.80',
-        '74.0.3685.1',
-        '74.0.3685.0',
-        '73.0.3683.5',
-        '72.0.3626.79',
-        '74.0.3684.1',
-        '74.0.3684.0',
-        '73.0.3683.4',
-        '72.0.3626.78',
-        '72.0.3626.77',
-        '73.0.3683.3',
-        '73.0.3683.2',
-        '72.0.3626.76',
-        '73.0.3683.1',
-        '73.0.3683.0',
-        '72.0.3626.75',
-        '71.0.3578.141',
-        '73.0.3682.1',
-        '73.0.3682.0',
-        '72.0.3626.74',
-        '71.0.3578.140',
-        '73.0.3681.4',
-        '73.0.3681.3',
-        '73.0.3681.2',
-        '73.0.3681.1',
-        '73.0.3681.0',
-        '72.0.3626.73',
-        '71.0.3578.139',
-        '72.0.3626.72',
-        '72.0.3626.71',
-        '73.0.3680.1',
-        '73.0.3680.0',
-        '72.0.3626.70',
-        '71.0.3578.138',
-        '73.0.3678.2',
-        '73.0.3679.1',
-        '73.0.3679.0',
-        '72.0.3626.69',
-        '71.0.3578.137',
-        '73.0.3678.1',
-        '73.0.3678.0',
-        '71.0.3578.136',
-        '73.0.3677.1',
-        '73.0.3677.0',
-        '72.0.3626.68',
-        '72.0.3626.67',
-        '71.0.3578.135',
-        '73.0.3676.1',
-        '73.0.3676.0',
-        '73.0.3674.2',
-        '72.0.3626.66',
-        '71.0.3578.134',
-        '73.0.3674.1',
-        '73.0.3674.0',
-        '72.0.3626.65',
-        '71.0.3578.133',
-        '73.0.3673.2',
-        '73.0.3673.1',
-        '73.0.3673.0',
-        '72.0.3626.64',
-        '71.0.3578.132',
-        '72.0.3626.63',
-        '72.0.3626.62',
-        '72.0.3626.61',
-        '72.0.3626.60',
-        '73.0.3672.1',
-        '73.0.3672.0',
-        '72.0.3626.59',
-        '71.0.3578.131',
-        '73.0.3671.3',
-        '73.0.3671.2',
-        '73.0.3671.1',
-        '73.0.3671.0',
-        '72.0.3626.58',
-        '71.0.3578.130',
-        '73.0.3670.1',
-        '73.0.3670.0',
-        '72.0.3626.57',
-        '71.0.3578.129',
-        '73.0.3669.1',
-        '73.0.3669.0',
-        '72.0.3626.56',
-        '71.0.3578.128',
-        '73.0.3668.2',
-        '73.0.3668.1',
-        '73.0.3668.0',
-        '72.0.3626.55',
-        '71.0.3578.127',
-        '73.0.3667.2',
-        '73.0.3667.1',
-        '73.0.3667.0',
-        '72.0.3626.54',
-        '71.0.3578.126',
-        '73.0.3666.1',
-        '73.0.3666.0',
-        '72.0.3626.53',
-        '71.0.3578.125',
-        '73.0.3665.4',
-        '73.0.3665.3',
-        '72.0.3626.52',
-        '73.0.3665.2',
-        '73.0.3664.4',
-        '73.0.3665.1',
-        '73.0.3665.0',
-        '72.0.3626.51',
-        '71.0.3578.124',
-        '72.0.3626.50',
-        '73.0.3664.3',
-        '73.0.3664.2',
-        '73.0.3664.1',
-        '73.0.3664.0',
-        '73.0.3663.2',
-        '72.0.3626.49',
-        '71.0.3578.123',
-        '73.0.3663.1',
-        '73.0.3663.0',
-        '72.0.3626.48',
-        '71.0.3578.122',
-        '73.0.3662.1',
-        '73.0.3662.0',
-        '72.0.3626.47',
-        '71.0.3578.121',
-        '73.0.3661.1',
-        '72.0.3626.46',
-        '73.0.3661.0',
-        '72.0.3626.45',
-        '71.0.3578.120',
-        '73.0.3660.2',
-        '73.0.3660.1',
-        '73.0.3660.0',
-        '72.0.3626.44',
-        '71.0.3578.119',
-        '73.0.3659.1',
-        '73.0.3659.0',
-        '72.0.3626.43',
-        '71.0.3578.118',
-        '73.0.3658.1',
-        '73.0.3658.0',
-        '72.0.3626.42',
-        '71.0.3578.117',
-        '73.0.3657.1',
-        '73.0.3657.0',
-        '72.0.3626.41',
-        '71.0.3578.116',
-        '73.0.3656.1',
-        '73.0.3656.0',
-        '72.0.3626.40',
-        '71.0.3578.115',
-        '73.0.3655.1',
-        '73.0.3655.0',
-        '72.0.3626.39',
-        '71.0.3578.114',
-        '73.0.3654.1',
-        '73.0.3654.0',
-        '72.0.3626.38',
-        '71.0.3578.113',
-        '73.0.3653.1',
-        '73.0.3653.0',
-        '72.0.3626.37',
-        '71.0.3578.112',
-        '73.0.3652.1',
-        '73.0.3652.0',
-        '72.0.3626.36',
-        '71.0.3578.111',
-        '73.0.3651.1',
-        '73.0.3651.0',
-        '72.0.3626.35',
-        '71.0.3578.110',
-        '73.0.3650.1',
-        '73.0.3650.0',
-        '72.0.3626.34',
-        '71.0.3578.109',
-        '73.0.3649.1',
-        '73.0.3649.0',
-        '72.0.3626.33',
-        '71.0.3578.108',
-        '73.0.3648.2',
-        '73.0.3648.1',
-        '73.0.3648.0',
-        '72.0.3626.32',
-        '71.0.3578.107',
-        '73.0.3647.2',
-        '73.0.3647.1',
-        '73.0.3647.0',
-        '72.0.3626.31',
-        '71.0.3578.106',
-        '73.0.3635.3',
-        '73.0.3646.2',
-        '73.0.3646.1',
-        '73.0.3646.0',
-        '72.0.3626.30',
-        '71.0.3578.105',
-        '72.0.3626.29',
-        '73.0.3645.2',
-        '73.0.3645.1',
-        '73.0.3645.0',
-        '72.0.3626.28',
-        '71.0.3578.104',
-        '72.0.3626.27',
-        '72.0.3626.26',
-        '72.0.3626.25',
-        '72.0.3626.24',
-        '73.0.3644.0',
-        '73.0.3643.2',
-        '72.0.3626.23',
-        '71.0.3578.103',
-        '73.0.3643.1',
-        '73.0.3643.0',
-        '72.0.3626.22',
-        '71.0.3578.102',
-        '73.0.3642.1',
-        '73.0.3642.0',
-        '72.0.3626.21',
-        '71.0.3578.101',
-        '73.0.3641.1',
-        '73.0.3641.0',
-        '72.0.3626.20',
-        '71.0.3578.100',
-        '72.0.3626.19',
-        '73.0.3640.1',
-        '73.0.3640.0',
-        '72.0.3626.18',
-        '73.0.3639.1',
-        '71.0.3578.99',
-        '73.0.3639.0',
-        '72.0.3626.17',
-        '73.0.3638.2',
-        '72.0.3626.16',
-        '73.0.3638.1',
-        '73.0.3638.0',
-        '72.0.3626.15',
-        '71.0.3578.98',
-        '73.0.3635.2',
-        '71.0.3578.97',
-        '73.0.3637.1',
-        '73.0.3637.0',
-        '72.0.3626.14',
-        '71.0.3578.96',
-        '71.0.3578.95',
-        '72.0.3626.13',
-        '71.0.3578.94',
-        '73.0.3636.2',
-        '71.0.3578.93',
-        '73.0.3636.1',
-        '73.0.3636.0',
-        '72.0.3626.12',
-        '71.0.3578.92',
-        '73.0.3635.1',
-        '73.0.3635.0',
-        '72.0.3626.11',
-        '71.0.3578.91',
-        '73.0.3634.2',
-        '73.0.3634.1',
-        '73.0.3634.0',
-        '72.0.3626.10',
-        '71.0.3578.90',
-        '71.0.3578.89',
-        '73.0.3633.2',
-        '73.0.3633.1',
-        '73.0.3633.0',
-        '72.0.3610.4',
-        '72.0.3626.9',
-        '71.0.3578.88',
-        '73.0.3632.5',
-        '73.0.3632.4',
-        '73.0.3632.3',
-        '73.0.3632.2',
-        '73.0.3632.1',
-        '73.0.3632.0',
-        '72.0.3626.8',
-        '71.0.3578.87',
-        '73.0.3631.2',
-        '73.0.3631.1',
-        '73.0.3631.0',
-        '72.0.3626.7',
-        '71.0.3578.86',
-        '72.0.3626.6',
-        '73.0.3630.1',
-        '73.0.3630.0',
-        '72.0.3626.5',
-        '71.0.3578.85',
-        '72.0.3626.4',
-        '73.0.3628.3',
-        '73.0.3628.2',
-        '73.0.3629.1',
-        '73.0.3629.0',
-        '72.0.3626.3',
-        '71.0.3578.84',
-        '73.0.3628.1',
-        '73.0.3628.0',
-        '71.0.3578.83',
-        '73.0.3627.1',
-        '73.0.3627.0',
-        '72.0.3626.2',
-        '71.0.3578.82',
-        '71.0.3578.81',
-        '71.0.3578.80',
-        '72.0.3626.1',
-        '72.0.3626.0',
-        '71.0.3578.79',
-        '70.0.3538.124',
-        '71.0.3578.78',
-        '72.0.3623.4',
-        '72.0.3625.2',
-        '72.0.3625.1',
-        '72.0.3625.0',
-        '71.0.3578.77',
-        '70.0.3538.123',
-        '72.0.3624.4',
-        '72.0.3624.3',
-        '72.0.3624.2',
-        '71.0.3578.76',
-        '72.0.3624.1',
-        '72.0.3624.0',
-        '72.0.3623.3',
-        '71.0.3578.75',
-        '70.0.3538.122',
-        '71.0.3578.74',
-        '72.0.3623.2',
-        '72.0.3610.3',
-        '72.0.3623.1',
-        '72.0.3623.0',
-        '72.0.3622.3',
-        '72.0.3622.2',
-        '71.0.3578.73',
-        '70.0.3538.121',
-        '72.0.3622.1',
-        '72.0.3622.0',
-        '71.0.3578.72',
-        '70.0.3538.120',
-        '72.0.3621.1',
-        '72.0.3621.0',
-        '71.0.3578.71',
-        '70.0.3538.119',
-        '72.0.3620.1',
-        '72.0.3620.0',
-        '71.0.3578.70',
-        '70.0.3538.118',
-        '71.0.3578.69',
-        '72.0.3619.1',
-        '72.0.3619.0',
-        '71.0.3578.68',
-        '70.0.3538.117',
-        '71.0.3578.67',
-        '72.0.3618.1',
-        '72.0.3618.0',
-        '71.0.3578.66',
-        '70.0.3538.116',
-        '72.0.3617.1',
-        '72.0.3617.0',
-        '71.0.3578.65',
-        '70.0.3538.115',
-        '72.0.3602.3',
-        '71.0.3578.64',
-        '72.0.3616.1',
-        '72.0.3616.0',
-        '71.0.3578.63',
-        '70.0.3538.114',
-        '71.0.3578.62',
-        '72.0.3615.1',
-        '72.0.3615.0',
-        '71.0.3578.61',
-        '70.0.3538.113',
-        '72.0.3614.1',
-        '72.0.3614.0',
-        '71.0.3578.60',
-        '70.0.3538.112',
-        '72.0.3613.1',
-        '72.0.3613.0',
-        '71.0.3578.59',
-        '70.0.3538.111',
-        '72.0.3612.2',
-        '72.0.3612.1',
-        '72.0.3612.0',
-        '70.0.3538.110',
-        '71.0.3578.58',
-        '70.0.3538.109',
-        '72.0.3611.2',
-        '72.0.3611.1',
-        '72.0.3611.0',
-        '71.0.3578.57',
-        '70.0.3538.108',
-        '72.0.3610.2',
-        '71.0.3578.56',
-        '71.0.3578.55',
-        '72.0.3610.1',
-        '72.0.3610.0',
-        '71.0.3578.54',
-        '70.0.3538.107',
-        '71.0.3578.53',
-        '72.0.3609.3',
-        '71.0.3578.52',
-        '72.0.3609.2',
-        '71.0.3578.51',
-        '72.0.3608.5',
-        '72.0.3609.1',
-        '72.0.3609.0',
-        '71.0.3578.50',
-        '70.0.3538.106',
-        '72.0.3608.4',
-        '72.0.3608.3',
-        '72.0.3608.2',
-        '71.0.3578.49',
-        '72.0.3608.1',
-        '72.0.3608.0',
-        '70.0.3538.105',
-        '71.0.3578.48',
-        '72.0.3607.1',
-        '72.0.3607.0',
-        '71.0.3578.47',
-        '70.0.3538.104',
-        '72.0.3606.2',
-        '72.0.3606.1',
-        '72.0.3606.0',
-        '71.0.3578.46',
-        '70.0.3538.103',
-        '70.0.3538.102',
-        '72.0.3605.3',
-        '72.0.3605.2',
-        '72.0.3605.1',
-        '72.0.3605.0',
-        '71.0.3578.45',
-        '70.0.3538.101',
-        '71.0.3578.44',
-        '71.0.3578.43',
-        '70.0.3538.100',
-        '70.0.3538.99',
-        '71.0.3578.42',
-        '72.0.3604.1',
-        '72.0.3604.0',
-        '71.0.3578.41',
-        '70.0.3538.98',
-        '71.0.3578.40',
-        '72.0.3603.2',
-        '72.0.3603.1',
-        '72.0.3603.0',
-        '71.0.3578.39',
-        '70.0.3538.97',
-        '72.0.3602.2',
-        '71.0.3578.38',
-        '71.0.3578.37',
-        '72.0.3602.1',
-        '72.0.3602.0',
-        '71.0.3578.36',
-        '70.0.3538.96',
-        '72.0.3601.1',
-        '72.0.3601.0',
-        '71.0.3578.35',
-        '70.0.3538.95',
-        '72.0.3600.1',
-        '72.0.3600.0',
-        '71.0.3578.34',
-        '70.0.3538.94',
-        '72.0.3599.3',
-        '72.0.3599.2',
-        '72.0.3599.1',
-        '72.0.3599.0',
-        '71.0.3578.33',
-        '70.0.3538.93',
-        '72.0.3598.1',
-        '72.0.3598.0',
-        '71.0.3578.32',
-        '70.0.3538.87',
-        '72.0.3597.1',
-        '72.0.3597.0',
-        '72.0.3596.2',
-        '71.0.3578.31',
-        '70.0.3538.86',
-        '71.0.3578.30',
-        '71.0.3578.29',
-        '72.0.3596.1',
-        '72.0.3596.0',
-        '71.0.3578.28',
-        '70.0.3538.85',
-        '72.0.3595.2',
-        '72.0.3591.3',
-        '72.0.3595.1',
-        '72.0.3595.0',
-        '71.0.3578.27',
-        '70.0.3538.84',
-        '72.0.3594.1',
-        '72.0.3594.0',
-        '71.0.3578.26',
-        '70.0.3538.83',
-        '72.0.3593.2',
-        '72.0.3593.1',
-        '72.0.3593.0',
-        '71.0.3578.25',
-        '70.0.3538.82',
-        '72.0.3589.3',
-        '72.0.3592.2',
-        '72.0.3592.1',
-        '72.0.3592.0',
-        '71.0.3578.24',
-        '72.0.3589.2',
-        '70.0.3538.81',
-        '70.0.3538.80',
-        '72.0.3591.2',
-        '72.0.3591.1',
-        '72.0.3591.0',
-        '71.0.3578.23',
-        '70.0.3538.79',
-        '71.0.3578.22',
-        '72.0.3590.1',
-        '72.0.3590.0',
-        '71.0.3578.21',
-        '70.0.3538.78',
-        '70.0.3538.77',
-        '72.0.3589.1',
-        '72.0.3589.0',
-        '71.0.3578.20',
-        '70.0.3538.76',
-        '71.0.3578.19',
-        '70.0.3538.75',
-        '72.0.3588.1',
-        '72.0.3588.0',
-        '71.0.3578.18',
-        '70.0.3538.74',
-        '72.0.3586.2',
-        '72.0.3587.0',
-        '71.0.3578.17',
-        '70.0.3538.73',
-        '72.0.3586.1',
-        '72.0.3586.0',
-        '71.0.3578.16',
-        '70.0.3538.72',
-        '72.0.3585.1',
-        '72.0.3585.0',
-        '71.0.3578.15',
-        '70.0.3538.71',
-        '71.0.3578.14',
-        '72.0.3584.1',
-        '72.0.3584.0',
-        '71.0.3578.13',
-        '70.0.3538.70',
-        '72.0.3583.2',
-        '71.0.3578.12',
-        '72.0.3583.1',
-        '72.0.3583.0',
-        '71.0.3578.11',
-        '70.0.3538.69',
-        '71.0.3578.10',
-        '72.0.3582.0',
-        '72.0.3581.4',
-        '71.0.3578.9',
-        '70.0.3538.67',
-        '72.0.3581.3',
-        '72.0.3581.2',
-        '72.0.3581.1',
-        '72.0.3581.0',
-        '71.0.3578.8',
-        '70.0.3538.66',
-        '72.0.3580.1',
-        '72.0.3580.0',
-        '71.0.3578.7',
-        '70.0.3538.65',
-        '71.0.3578.6',
-        '72.0.3579.1',
-        '72.0.3579.0',
-        '71.0.3578.5',
-        '70.0.3538.64',
-        '71.0.3578.4',
-        '71.0.3578.3',
-        '71.0.3578.2',
-        '71.0.3578.1',
-        '71.0.3578.0',
-        '70.0.3538.63',
-        '69.0.3497.128',
-        '70.0.3538.62',
-        '70.0.3538.61',
-        '70.0.3538.60',
-        '70.0.3538.59',
-        '71.0.3577.1',
-        '71.0.3577.0',
-        '70.0.3538.58',
-        '69.0.3497.127',
-        '71.0.3576.2',
-        '71.0.3576.1',
-        '71.0.3576.0',
-        '70.0.3538.57',
-        '70.0.3538.56',
-        '71.0.3575.2',
-        '70.0.3538.55',
-        '69.0.3497.126',
-        '70.0.3538.54',
-        '71.0.3575.1',
-        '71.0.3575.0',
-        '71.0.3574.1',
-        '71.0.3574.0',
-        '70.0.3538.53',
-        '69.0.3497.125',
-        '70.0.3538.52',
-        '71.0.3573.1',
-        '71.0.3573.0',
-        '70.0.3538.51',
-        '69.0.3497.124',
-        '71.0.3572.1',
-        '71.0.3572.0',
-        '70.0.3538.50',
-        '69.0.3497.123',
-        '71.0.3571.2',
-        '70.0.3538.49',
-        '69.0.3497.122',
-        '71.0.3571.1',
-        '71.0.3571.0',
-        '70.0.3538.48',
-        '69.0.3497.121',
-        '71.0.3570.1',
-        '71.0.3570.0',
-        '70.0.3538.47',
-        '69.0.3497.120',
-        '71.0.3568.2',
-        '71.0.3569.1',
-        '71.0.3569.0',
-        '70.0.3538.46',
-        '69.0.3497.119',
-        '70.0.3538.45',
-        '71.0.3568.1',
-        '71.0.3568.0',
-        '70.0.3538.44',
-        '69.0.3497.118',
-        '70.0.3538.43',
-        '70.0.3538.42',
-        '71.0.3567.1',
-        '71.0.3567.0',
-        '70.0.3538.41',
-        '69.0.3497.117',
-        '71.0.3566.1',
-        '71.0.3566.0',
-        '70.0.3538.40',
-        '69.0.3497.116',
-        '71.0.3565.1',
-        '71.0.3565.0',
-        '70.0.3538.39',
-        '69.0.3497.115',
-        '71.0.3564.1',
-        '71.0.3564.0',
-        '70.0.3538.38',
-        '69.0.3497.114',
-        '71.0.3563.0',
-        '71.0.3562.2',
-        '70.0.3538.37',
-        '69.0.3497.113',
-        '70.0.3538.36',
-        '70.0.3538.35',
-        '71.0.3562.1',
-        '71.0.3562.0',
-        '70.0.3538.34',
-        '69.0.3497.112',
-        '70.0.3538.33',
-        '71.0.3561.1',
-        '71.0.3561.0',
-        '70.0.3538.32',
-        '69.0.3497.111',
-        '71.0.3559.6',
-        '71.0.3560.1',
-        '71.0.3560.0',
-        '71.0.3559.5',
-        '71.0.3559.4',
-        '70.0.3538.31',
-        '69.0.3497.110',
-        '71.0.3559.3',
-        '70.0.3538.30',
-        '69.0.3497.109',
-        '71.0.3559.2',
-        '71.0.3559.1',
-        '71.0.3559.0',
-        '70.0.3538.29',
-        '69.0.3497.108',
-        '71.0.3558.2',
-        '71.0.3558.1',
-        '71.0.3558.0',
-        '70.0.3538.28',
-        '69.0.3497.107',
-        '71.0.3557.2',
-        '71.0.3557.1',
-        '71.0.3557.0',
-        '70.0.3538.27',
-        '69.0.3497.106',
-        '71.0.3554.4',
-        '70.0.3538.26',
-        '71.0.3556.1',
-        '71.0.3556.0',
-        '70.0.3538.25',
-        '71.0.3554.3',
-        '69.0.3497.105',
-        '71.0.3554.2',
-        '70.0.3538.24',
-        '69.0.3497.104',
-        '71.0.3555.2',
-        '70.0.3538.23',
-        '71.0.3555.1',
-        '71.0.3555.0',
-        '70.0.3538.22',
-        '69.0.3497.103',
-        '71.0.3554.1',
-        '71.0.3554.0',
-        '70.0.3538.21',
-        '69.0.3497.102',
-        '71.0.3553.3',
-        '70.0.3538.20',
-        '69.0.3497.101',
-        '71.0.3553.2',
-        '69.0.3497.100',
-        '71.0.3553.1',
-        '71.0.3553.0',
-        '70.0.3538.19',
-        '69.0.3497.99',
-        '69.0.3497.98',
-        '69.0.3497.97',
-        '71.0.3552.6',
-        '71.0.3552.5',
-        '71.0.3552.4',
-        '71.0.3552.3',
-        '71.0.3552.2',
-        '71.0.3552.1',
-        '71.0.3552.0',
-        '70.0.3538.18',
-        '69.0.3497.96',
-        '71.0.3551.3',
-        '71.0.3551.2',
-        '71.0.3551.1',
-        '71.0.3551.0',
-        '70.0.3538.17',
-        '69.0.3497.95',
-        '71.0.3550.3',
-        '71.0.3550.2',
-        '71.0.3550.1',
-        '71.0.3550.0',
-        '70.0.3538.16',
-        '69.0.3497.94',
-        '71.0.3549.1',
-        '71.0.3549.0',
-        '70.0.3538.15',
-        '69.0.3497.93',
-        '69.0.3497.92',
-        '71.0.3548.1',
-        '71.0.3548.0',
-        '70.0.3538.14',
-        '69.0.3497.91',
-        '71.0.3547.1',
-        '71.0.3547.0',
-        '70.0.3538.13',
-        '69.0.3497.90',
-        '71.0.3546.2',
-        '69.0.3497.89',
-        '71.0.3546.1',
-        '71.0.3546.0',
-        '70.0.3538.12',
-        '69.0.3497.88',
-        '71.0.3545.4',
-        '71.0.3545.3',
-        '71.0.3545.2',
-        '71.0.3545.1',
-        '71.0.3545.0',
-        '70.0.3538.11',
-        '69.0.3497.87',
-        '71.0.3544.5',
-        '71.0.3544.4',
-        '71.0.3544.3',
-        '71.0.3544.2',
-        '71.0.3544.1',
-        '71.0.3544.0',
-        '69.0.3497.86',
-        '70.0.3538.10',
-        '69.0.3497.85',
-        '70.0.3538.9',
-        '69.0.3497.84',
-        '71.0.3543.4',
-        '70.0.3538.8',
-        '71.0.3543.3',
-        '71.0.3543.2',
-        '71.0.3543.1',
-        '71.0.3543.0',
-        '70.0.3538.7',
-        '69.0.3497.83',
-        '71.0.3542.2',
-        '71.0.3542.1',
-        '71.0.3542.0',
-        '70.0.3538.6',
-        '69.0.3497.82',
-        '69.0.3497.81',
-        '71.0.3541.1',
-        '71.0.3541.0',
-        '70.0.3538.5',
-        '69.0.3497.80',
-        '71.0.3540.1',
-        '71.0.3540.0',
-        '70.0.3538.4',
-        '69.0.3497.79',
-        '70.0.3538.3',
-        '71.0.3539.1',
-        '71.0.3539.0',
-        '69.0.3497.78',
-        '68.0.3440.134',
-        '69.0.3497.77',
-        '70.0.3538.2',
-        '70.0.3538.1',
-        '70.0.3538.0',
-        '69.0.3497.76',
-        '68.0.3440.133',
-        '69.0.3497.75',
-        '70.0.3537.2',
-        '70.0.3537.1',
-        '70.0.3537.0',
-        '69.0.3497.74',
-        '68.0.3440.132',
-        '70.0.3536.0',
-        '70.0.3535.5',
-        '70.0.3535.4',
-        '70.0.3535.3',
-        '69.0.3497.73',
-        '68.0.3440.131',
-        '70.0.3532.8',
-        '70.0.3532.7',
-        '69.0.3497.72',
-        '69.0.3497.71',
-        '70.0.3535.2',
-        '70.0.3535.1',
-        '70.0.3535.0',
-        '69.0.3497.70',
-        '68.0.3440.130',
-        '69.0.3497.69',
-        '68.0.3440.129',
-        '70.0.3534.4',
-        '70.0.3534.3',
-        '70.0.3534.2',
-        '70.0.3534.1',
-        '70.0.3534.0',
-        '69.0.3497.68',
-        '68.0.3440.128',
-        '70.0.3533.2',
-        '70.0.3533.1',
-        '70.0.3533.0',
-        '69.0.3497.67',
-        '68.0.3440.127',
-        '70.0.3532.6',
-        '70.0.3532.5',
-        '70.0.3532.4',
-        '69.0.3497.66',
-        '68.0.3440.126',
-        '70.0.3532.3',
-        '70.0.3532.2',
-        '70.0.3532.1',
-        '69.0.3497.60',
-        '69.0.3497.65',
-        '69.0.3497.64',
-        '70.0.3532.0',
-        '70.0.3531.0',
-        '70.0.3530.4',
-        '70.0.3530.3',
-        '70.0.3530.2',
-        '69.0.3497.58',
-        '68.0.3440.125',
-        '69.0.3497.57',
-        '69.0.3497.56',
-        '69.0.3497.55',
-        '69.0.3497.54',
-        '70.0.3530.1',
-        '70.0.3530.0',
-        '69.0.3497.53',
-        '68.0.3440.124',
-        '69.0.3497.52',
-        '70.0.3529.3',
-        '70.0.3529.2',
-        '70.0.3529.1',
-        '70.0.3529.0',
-        '69.0.3497.51',
-        '70.0.3528.4',
-        '68.0.3440.123',
-        '70.0.3528.3',
-        '70.0.3528.2',
-        '70.0.3528.1',
-        '70.0.3528.0',
-        '69.0.3497.50',
-        '68.0.3440.122',
-        '70.0.3527.1',
-        '70.0.3527.0',
-        '69.0.3497.49',
-        '68.0.3440.121',
-        '70.0.3526.1',
-        '70.0.3526.0',
-        '68.0.3440.120',
-        '69.0.3497.48',
-        '69.0.3497.47',
-        '68.0.3440.119',
-        '68.0.3440.118',
-        '70.0.3525.5',
-        '70.0.3525.4',
-        '70.0.3525.3',
-        '68.0.3440.117',
-        '69.0.3497.46',
-        '70.0.3525.2',
-        '70.0.3525.1',
-        '70.0.3525.0',
-        '69.0.3497.45',
-        '68.0.3440.116',
-        '70.0.3524.4',
-        '70.0.3524.3',
-        '69.0.3497.44',
-        '70.0.3524.2',
-        '70.0.3524.1',
-        '70.0.3524.0',
-        '70.0.3523.2',
-        '69.0.3497.43',
-        '68.0.3440.115',
-        '70.0.3505.9',
-        '69.0.3497.42',
-        '70.0.3505.8',
-        '70.0.3523.1',
-        '70.0.3523.0',
-        '69.0.3497.41',
-        '68.0.3440.114',
-        '70.0.3505.7',
-        '69.0.3497.40',
-        '70.0.3522.1',
-        '70.0.3522.0',
-        '70.0.3521.2',
-        '69.0.3497.39',
-        '68.0.3440.113',
-        '70.0.3505.6',
-        '70.0.3521.1',
-        '70.0.3521.0',
-        '69.0.3497.38',
-        '68.0.3440.112',
-        '70.0.3520.1',
-        '70.0.3520.0',
-        '69.0.3497.37',
-        '68.0.3440.111',
-        '70.0.3519.3',
-        '70.0.3519.2',
-        '70.0.3519.1',
-        '70.0.3519.0',
-        '69.0.3497.36',
-        '68.0.3440.110',
-        '70.0.3518.1',
-        '70.0.3518.0',
-        '69.0.3497.35',
-        '69.0.3497.34',
-        '68.0.3440.109',
-        '70.0.3517.1',
-        '70.0.3517.0',
-        '69.0.3497.33',
-        '68.0.3440.108',
-        '69.0.3497.32',
-        '70.0.3516.3',
-        '70.0.3516.2',
-        '70.0.3516.1',
-        '70.0.3516.0',
-        '69.0.3497.31',
-        '68.0.3440.107',
-        '70.0.3515.4',
-        '68.0.3440.106',
-        '70.0.3515.3',
-        '70.0.3515.2',
-        '70.0.3515.1',
-        '70.0.3515.0',
-        '69.0.3497.30',
-        '68.0.3440.105',
-        '68.0.3440.104',
-        '70.0.3514.2',
-        '70.0.3514.1',
-        '70.0.3514.0',
-        '69.0.3497.29',
-        '68.0.3440.103',
-        '70.0.3513.1',
-        '70.0.3513.0',
-        '69.0.3497.28',
+        '90.0.4430.212',
+        '90.0.4430.24',
+        '90.0.4430.70',
+        '90.0.4430.72',
+        '90.0.4430.85',
+        '90.0.4430.93',
+        '91.0.4472.101',
+        '91.0.4472.106',
+        '91.0.4472.114',
+        '91.0.4472.124',
+        '91.0.4472.164',
+        '91.0.4472.19',
+        '91.0.4472.77',
+        '92.0.4515.107',
+        '92.0.4515.115',
+        '92.0.4515.131',
+        '92.0.4515.159',
+        '92.0.4515.43',
+        '93.0.4556.0',
+        '93.0.4577.15',
+        '93.0.4577.63',
+        '93.0.4577.82',
+        '94.0.4606.41',
+        '94.0.4606.54',
+        '94.0.4606.61',
+        '94.0.4606.71',
+        '94.0.4606.81',
+        '94.0.4606.85',
+        '95.0.4638.17',
+        '95.0.4638.50',
+        '95.0.4638.54',
+        '95.0.4638.69',
+        '95.0.4638.74',
+        '96.0.4664.18',
+        '96.0.4664.45',
+        '96.0.4664.55',
+        '96.0.4664.93',
+        '97.0.4692.20',
     )
     return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
 
 
     )
     return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
 
 
+SUPPORTED_ENCODINGS = [
+    'gzip', 'deflate'
+]
+if brotli:
+    SUPPORTED_ENCODINGS.append('br')
+
 std_headers = {
     'User-Agent': random_user_agent(),
 std_headers = {
     'User-Agent': random_user_agent(),
-    'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
-    'Accept-Encoding': 'gzip, deflate',
     'Accept-Language': 'en-us,en;q=0.5',
     'Accept-Language': 'en-us,en;q=0.5',
+    'Sec-Fetch-Mode': 'navigate',
 }
 
 
 }
 
 
@@ -1748,6 +202,7 @@ def random_user_agent():
     '%Y/%m/%d %H:%M:%S',
     '%Y%m%d%H%M',
     '%Y%m%d%H%M%S',
     '%Y/%m/%d %H:%M:%S',
     '%Y%m%d%H%M',
     '%Y%m%d%H%M%S',
+    '%Y%m%d',
     '%Y-%m-%d %H:%M',
     '%Y-%m-%d %H:%M:%S',
     '%Y-%m-%d %H:%M:%S.%f',
     '%Y-%m-%d %H:%M',
     '%Y-%m-%d %H:%M:%S',
     '%Y-%m-%d %H:%M:%S.%f',
@@ -1789,7 +244,10 @@ def random_user_agent():
 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
 JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
 
 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
 JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
 
+NUMBER_RE = r'\d+(?:\.\d+)?'
+
 
 
+@functools.cache
 def preferredencoding():
     """Get preferred encoding.
 
 def preferredencoding():
     """Get preferred encoding.
 
@@ -1808,77 +266,34 @@ def preferredencoding():
 def write_json_file(obj, fn):
     """ Encode obj as JSON and write it to fn, atomically if possible """
 
 def write_json_file(obj, fn):
     """ Encode obj as JSON and write it to fn, atomically if possible """
 
-    fn = encodeFilename(fn)
-    if sys.version_info < (3, 0) and sys.platform != 'win32':
-        encoding = get_filesystem_encoding()
-        # os.path.basename returns a bytes object, but NamedTemporaryFile
-        # will fail if the filename contains non ascii characters unless we
-        # use a unicode object
-        path_basename = lambda f: os.path.basename(fn).decode(encoding)
-        # the same for os.path.dirname
-        path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
-    else:
-        path_basename = os.path.basename
-        path_dirname = os.path.dirname
-
-    args = {
-        'suffix': '.tmp',
-        'prefix': path_basename(fn) + '.',
-        'dir': path_dirname(fn),
-        'delete': False,
-    }
-
-    # In Python 2.x, json.dump expects a bytestream.
-    # In Python 3.x, it writes to a character stream
-    if sys.version_info < (3, 0):
-        args['mode'] = 'wb'
-    else:
-        args.update({
-            'mode': 'w',
-            'encoding': 'utf-8',
-        })
-
-    tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
+    tf = tempfile.NamedTemporaryFile(
+        prefix=f'{os.path.basename(fn)}.', dir=os.path.dirname(fn),
+        suffix='.tmp', delete=False, mode='w', encoding='utf-8')
 
     try:
         with tf:
 
     try:
         with tf:
-            json.dump(obj, tf)
+            json.dump(obj, tf, ensure_ascii=False)
         if sys.platform == 'win32':
             # Need to remove existing file on Windows, else os.rename raises
             # WindowsError or FileExistsError.
         if sys.platform == 'win32':
             # Need to remove existing file on Windows, else os.rename raises
             # WindowsError or FileExistsError.
-            try:
+            with contextlib.suppress(OSError):
                 os.unlink(fn)
                 os.unlink(fn)
-            except OSError:
-                pass
-        try:
+        with contextlib.suppress(OSError):
             mask = os.umask(0)
             os.umask(mask)
             os.chmod(tf.name, 0o666 & ~mask)
             mask = os.umask(0)
             os.umask(mask)
             os.chmod(tf.name, 0o666 & ~mask)
-        except OSError:
-            pass
         os.rename(tf.name, fn)
     except Exception:
         os.rename(tf.name, fn)
     except Exception:
-        try:
+        with contextlib.suppress(OSError):
             os.remove(tf.name)
             os.remove(tf.name)
-        except OSError:
-            pass
         raise
 
 
         raise
 
 
-if sys.version_info >= (2, 7):
-    def find_xpath_attr(node, xpath, key, val=None):
-        """ Find the xpath xpath[@key=val] """
-        assert re.match(r'^[a-zA-Z_-]+$', key)
-        expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
-        return node.find(expr)
-else:
-    def find_xpath_attr(node, xpath, key, val=None):
-        for f in node.findall(compat_xpath(xpath)):
-            if key not in f.attrib:
-                continue
-            if val is None or f.attrib.get(key) == val:
-                return f
-        return None
+def find_xpath_attr(node, xpath, key, val=None):
+    """ Find the xpath xpath[@key=val] """
+    assert re.match(r'^[a-zA-Z_-]+$', key)
+    expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}']")
+    return node.find(expr)
 
 # On python2.6 the xml.etree.ElementTree.Element methods don't support
 # the namespace parameter
 
 # On python2.6 the xml.etree.ElementTree.Element methods don't support
 # the namespace parameter
@@ -1898,7 +313,7 @@ def xpath_with_ns(path, ns_map):
 
 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
     def _find_xpath(xpath):
 
 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
     def _find_xpath(xpath):
-        return node.find(compat_xpath(xpath))
+        return node.find(xpath)
 
     if isinstance(xpath, (str, compat_str)):
         n = _find_xpath(xpath)
 
     if isinstance(xpath, (str, compat_str)):
         n = _find_xpath(xpath)
@@ -1940,16 +355,21 @@ def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
         if default is not NO_DEFAULT:
             return default
         elif fatal:
         if default is not NO_DEFAULT:
             return default
         elif fatal:
-            name = '%s[@%s]' % (xpath, key) if name is None else name
+            name = f'{xpath}[@{key}]' if name is None else name
             raise ExtractorError('Could not find XML attribute %s' % name)
         else:
             return None
     return n.attrib[key]
 
 
             raise ExtractorError('Could not find XML attribute %s' % name)
         else:
             return None
     return n.attrib[key]
 
 
-def get_element_by_id(id, html):
+def get_element_by_id(id, html, **kwargs):
     """Return the content of the tag with the specified ID in the passed HTML document"""
     """Return the content of the tag with the specified ID in the passed HTML document"""
-    return get_element_by_attribute('id', id, html)
+    return get_element_by_attribute('id', id, html, **kwargs)
+
+
+def get_element_html_by_id(id, html, **kwargs):
+    """Return the html of the tag with the specified ID in the passed HTML document"""
+    return get_element_html_by_attribute('id', id, html, **kwargs)
 
 
 def get_element_by_class(class_name, html):
 
 
 def get_element_by_class(class_name, html):
@@ -1958,41 +378,146 @@ def get_element_by_class(class_name, html):
     return retval[0] if retval else None
 
 
     return retval[0] if retval else None
 
 
-def get_element_by_attribute(attribute, value, html, escape_value=True):
-    retval = get_elements_by_attribute(attribute, value, html, escape_value)
+def get_element_html_by_class(class_name, html):
+    """Return the html of the first tag with the specified class in the passed HTML document"""
+    retval = get_elements_html_by_class(class_name, html)
+    return retval[0] if retval else None
+
+
+def get_element_by_attribute(attribute, value, html, **kwargs):
+    retval = get_elements_by_attribute(attribute, value, html, **kwargs)
+    return retval[0] if retval else None
+
+
+def get_element_html_by_attribute(attribute, value, html, **kargs):
+    retval = get_elements_html_by_attribute(attribute, value, html, **kargs)
     return retval[0] if retval else None
 
 
     return retval[0] if retval else None
 
 
-def get_elements_by_class(class_name, html):
+def get_elements_by_class(class_name, html, **kargs):
     """Return the content of all tags with the specified class in the passed HTML document as a list"""
     return get_elements_by_attribute(
         'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
         html, escape_value=False)
 
 
     """Return the content of all tags with the specified class in the passed HTML document as a list"""
     return get_elements_by_attribute(
         'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
         html, escape_value=False)
 
 
-def get_elements_by_attribute(attribute, value, html, escape_value=True):
+def get_elements_html_by_class(class_name, html):
+    """Return the html of all tags with the specified class in the passed HTML document as a list"""
+    return get_elements_html_by_attribute(
+        'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
+        html, escape_value=False)
+
+
+def get_elements_by_attribute(*args, **kwargs):
     """Return the content of the tag with the specified attribute in the passed HTML document"""
     """Return the content of the tag with the specified attribute in the passed HTML document"""
+    return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
+
+
+def get_elements_html_by_attribute(*args, **kwargs):
+    """Return the html of the tag with the specified attribute in the passed HTML document"""
+    return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
+
+
+def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True):
+    """
+    Return the text (content) and the html (whole) of the tag with the specified
+    attribute in the passed HTML document
+    """
+
+    quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
 
     value = re.escape(value) if escape_value else value
 
 
     value = re.escape(value) if escape_value else value
 
-    retlist = []
-    for m in re.finditer(r'''(?xs)
-        <([a-zA-Z0-9:._-]+)
-         (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
-         \s+%s=['"]?%s['"]?
-         (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
-        \s*>
-        (?P<content>.*?)
-        </\1>
-    ''' % (re.escape(attribute), value), html):
-        res = m.group('content')
+    partial_element_re = rf'''(?x)
+        <(?P<tag>[a-zA-Z0-9:._-]+)
+         (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
+         \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q)
+        '''
+
+    for m in re.finditer(partial_element_re, html):
+        content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
+
+        yield (
+            unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
+            whole
+        )
+
+
+class HTMLBreakOnClosingTagParser(compat_HTMLParser):
+    """
+    HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
+    closing tag for the first opening tag it has encountered, and can be used
+    as a context manager
+    """
+
+    class HTMLBreakOnClosingTagException(Exception):
+        pass
+
+    def __init__(self):
+        self.tagstack = collections.deque()
+        compat_HTMLParser.__init__(self)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *_):
+        self.close()
+
+    def close(self):
+        # handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
+        # so data remains buffered; we no longer have any interest in it, thus
+        # override this method to discard it
+        pass
 
 
-        if res.startswith('"') or res.startswith("'"):
-            res = res[1:-1]
+    def handle_starttag(self, tag, _):
+        self.tagstack.append(tag)
 
 
-        retlist.append(unescapeHTML(res))
+    def handle_endtag(self, tag):
+        if not self.tagstack:
+            raise compat_HTMLParseError('no tags in the stack')
+        while self.tagstack:
+            inner_tag = self.tagstack.pop()
+            if inner_tag == tag:
+                break
+        else:
+            raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
+        if not self.tagstack:
+            raise self.HTMLBreakOnClosingTagException()
 
 
-    return retlist
+
+def get_element_text_and_html_by_tag(tag, html):
+    """
+    For the first element with the specified tag in the passed HTML document
+    return its' content (text) and the whole element (html)
+    """
+    def find_or_raise(haystack, needle, exc):
+        try:
+            return haystack.index(needle)
+        except ValueError:
+            raise exc
+    closing_tag = f'</{tag}>'
+    whole_start = find_or_raise(
+        html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
+    content_start = find_or_raise(
+        html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
+    content_start += whole_start + 1
+    with HTMLBreakOnClosingTagParser() as parser:
+        parser.feed(html[whole_start:content_start])
+        if not parser.tagstack or parser.tagstack[0] != tag:
+            raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
+        offset = content_start
+        while offset < len(html):
+            next_closing_tag_start = find_or_raise(
+                html[offset:], closing_tag,
+                compat_HTMLParseError(f'closing {tag} tag not found'))
+            next_closing_tag_end = next_closing_tag_start + len(closing_tag)
+            try:
+                parser.feed(html[offset:offset + next_closing_tag_end])
+                offset += next_closing_tag_end
+            except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
+                return html[content_start:offset + next_closing_tag_start], \
+                    html[whole_start:offset + next_closing_tag_end]
+        raise compat_HTMLParseError('unexpected end of html')
 
 
 class HTMLAttributeParser(compat_HTMLParser):
 
 
 class HTMLAttributeParser(compat_HTMLParser):
@@ -2006,6 +531,23 @@ def handle_starttag(self, tag, attrs):
         self.attrs = dict(attrs)
 
 
         self.attrs = dict(attrs)
 
 
+class HTMLListAttrsParser(compat_HTMLParser):
+    """HTML parser to gather the attributes for the elements of a list"""
+
+    def __init__(self):
+        compat_HTMLParser.__init__(self)
+        self.items = []
+        self._level = 0
+
+    def handle_starttag(self, tag, attrs):
+        if tag == 'li' and self._level == 0:
+            self.items.append(dict(attrs))
+        self._level += 1
+
+    def handle_endtag(self, tag):
+        self._level -= 1
+
+
 def extract_attributes(html_element):
     """Given a string for an HTML element such as
     <el
 def extract_attributes(html_element):
     """Given a string for an HTML element such as
     <el
@@ -2019,29 +561,32 @@ def extract_attributes(html_element):
         'empty': '', 'noval': None, 'entity': '&',
         'sq': '"', 'dq': '\''
     }.
         'empty': '', 'noval': None, 'entity': '&',
         'sq': '"', 'dq': '\''
     }.
-    NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
-    but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
     """
     parser = HTMLAttributeParser()
     """
     parser = HTMLAttributeParser()
-    try:
+    with contextlib.suppress(compat_HTMLParseError):
         parser.feed(html_element)
         parser.close()
         parser.feed(html_element)
         parser.close()
-    # Older Python may throw HTMLParseError in case of malformed HTML
-    except compat_HTMLParseError:
-        pass
     return parser.attrs
 
 
     return parser.attrs
 
 
+def parse_list(webpage):
+    """Given a string for an series of HTML <li> elements,
+    return a dictionary of their attributes"""
+    parser = HTMLListAttrsParser()
+    parser.feed(webpage)
+    parser.close()
+    return parser.items
+
+
 def clean_html(html):
     """Clean an HTML snippet into a readable string"""
 
     if html is None:  # Convenience for sanitizing descriptions etc.
         return html
 
 def clean_html(html):
     """Clean an HTML snippet into a readable string"""
 
     if html is None:  # Convenience for sanitizing descriptions etc.
         return html
 
-    # Newline vs <br />
-    html = html.replace('\n', ' ')
-    html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
-    html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
+    html = re.sub(r'\s+', ' ', html)
+    html = re.sub(r'(?u)\s?<\s?br\s?/?\s?>\s?', '\n', html)
+    html = re.sub(r'(?u)<\s?/\s?p\s?>\s?<\s?p[^>]*>', '\n', html)
     # Strip html tags
     html = re.sub('<.*?>', '', html)
     # Replace html entities
     # Strip html tags
     html = re.sub('<.*?>', '', html)
     # Replace html entities
@@ -2059,26 +604,30 @@ def sanitize_open(filename, open_mode):
 
     It returns the tuple (stream, definitive_file_name).
     """
 
     It returns the tuple (stream, definitive_file_name).
     """
-    try:
-        if filename == '-':
-            if sys.platform == 'win32':
-                import msvcrt
-                msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
-            return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
-        stream = open(encodeFilename(filename), open_mode)
-        return (stream, filename)
-    except (IOError, OSError) as err:
-        if err.errno in (errno.EACCES,):
-            raise
+    if filename == '-':
+        if sys.platform == 'win32':
+            import msvcrt
+            msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+        return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
 
 
-        # In case of error, try to remove win32 forbidden chars
-        alt_filename = sanitize_path(filename)
-        if alt_filename == filename:
-            raise
-        else:
-            # An exception here should be caught in the caller
-            stream = open(encodeFilename(alt_filename), open_mode)
-            return (stream, alt_filename)
+    for attempt in range(2):
+        try:
+            try:
+                if sys.platform == 'win32':
+                    # FIXME: An exclusive lock also locks the file from being read.
+                    # Since windows locks are mandatory, don't lock the file on windows (for now).
+                    # Ref: https://github.com/yt-dlp/yt-dlp/issues/3124
+                    raise LockingUnsupportedError()
+                stream = locked_file(filename, open_mode, block=False).__enter__()
+            except OSError:
+                stream = open(filename, open_mode)
+            return stream, filename
+        except OSError as err:
+            if attempt or err.errno in (errno.EACCES,):
+                raise
+            old_filename, filename = filename, sanitize_path(filename)
+            if old_filename == filename:
+                raise
 
 
 def timeconvert(timestr):
 
 
 def timeconvert(timestr):
@@ -2090,36 +639,40 @@ def timeconvert(timestr):
     return timestamp
 
 
     return timestamp
 
 
-def sanitize_filename(s, restricted=False, is_id=False):
+def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
     """Sanitizes a string so it could be used as part of a filename.
     """Sanitizes a string so it could be used as part of a filename.
-    If restricted is set, use a stricter subset of allowed characters.
-    Set is_id if this is not an arbitrary string, but an ID that should be kept
-    if possible.
+    @param restricted   Use a stricter subset of allowed characters
+    @param is_id        Whether this is an ID that should be kept unchanged if possible.
+                        If unset, yt-dlp's new sanitization rules are in effect
     """
     """
+    if s == '':
+        return ''
+
     def replace_insane(char):
         if restricted and char in ACCENT_CHARS:
             return ACCENT_CHARS[char]
         elif not restricted and char == '\n':
     def replace_insane(char):
         if restricted and char in ACCENT_CHARS:
             return ACCENT_CHARS[char]
         elif not restricted and char == '\n':
-            return ' '
+            return '\0 '
         elif char == '?' or ord(char) < 32 or ord(char) == 127:
             return ''
         elif char == '"':
             return '' if restricted else '\''
         elif char == ':':
         elif char == '?' or ord(char) < 32 or ord(char) == 127:
             return ''
         elif char == '"':
             return '' if restricted else '\''
         elif char == ':':
-            return '_-' if restricted else ' -'
+            return '\0_\0-' if restricted else '\0 \0-'
         elif char in '\\/|*<>':
         elif char in '\\/|*<>':
-            return '_'
-        if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
-            return '_'
-        if restricted and ord(char) > 127:
-            return '_'
+            return '\0_'
+        if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
+            return '\0_'
         return char
 
         return char
 
-    if s == '':
-        return ''
-    # Handle timestamps
-    s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
+    s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)  # Handle timestamps
     result = ''.join(map(replace_insane, s))
     result = ''.join(map(replace_insane, s))
+    if is_id is NO_DEFAULT:
+        result = re.sub('(\0.)(?:(?=\\1)..)+', r'\1', result)  # Remove repeated substitute chars
+        STRIP_RE = '(?:\0.|[ _-])*'
+        result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result)  # Remove substitute chars from start/end
+    result = result.replace('\0', '') or '_'
+
     if not is_id:
         while '__' in result:
             result = result.replace('__', '_')
     if not is_id:
         while '__' in result:
             result = result.replace('__', '_')
@@ -2140,8 +693,6 @@ def sanitize_path(s, force=False):
     if sys.platform == 'win32':
         force = False
         drive_or_unc, _ = os.path.splitdrive(s)
     if sys.platform == 'win32':
         force = False
         drive_or_unc, _ = os.path.splitdrive(s)
-        if sys.version_info < (2, 7) and not drive_or_unc:
-            drive_or_unc, _ = os.path.splitunc(s)
     elif force:
         drive_or_unc = ''
     else:
     elif force:
         drive_or_unc = ''
     else:
@@ -2155,7 +706,7 @@ def sanitize_path(s, force=False):
         for path_part in norm_path]
     if drive_or_unc:
         sanitized_path.insert(0, drive_or_unc + os.path.sep)
         for path_part in norm_path]
     if drive_or_unc:
         sanitized_path.insert(0, drive_or_unc + os.path.sep)
-    elif force and s[0] == os.path.sep:
+    elif force and s and s[0] == os.path.sep:
         sanitized_path.insert(0, os.path.sep)
     return os.path.join(*sanitized_path)
 
         sanitized_path.insert(0, os.path.sep)
     return os.path.join(*sanitized_path)
 
@@ -2163,7 +714,9 @@ def sanitize_path(s, force=False):
 def sanitize_url(url):
     # Prepend protocol-less URLs with `http:` scheme in order to mitigate
     # the number of unwanted failures due to missing protocol
 def sanitize_url(url):
     # Prepend protocol-less URLs with `http:` scheme in order to mitigate
     # the number of unwanted failures due to missing protocol
-    if url.startswith('//'):
+    if url is None:
+        return
+    elif url.startswith('//'):
         return 'http:%s' % url
     # Fix some common typos seen so far
     COMMON_TYPOS = (
         return 'http:%s' % url
     # Fix some common typos seen so far
     COMMON_TYPOS = (
@@ -2186,8 +739,8 @@ def extract_basic_auth(url):
         parts.hostname if parts.port is None
         else '%s:%d' % (parts.hostname, parts.port))))
     auth_payload = base64.b64encode(
         parts.hostname if parts.port is None
         else '%s:%d' % (parts.hostname, parts.port))))
     auth_payload = base64.b64encode(
-        ('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
-    return url, 'Basic ' + auth_payload.decode('utf-8')
+        ('%s:%s' % (parts.username, parts.password or '')).encode())
+    return url, f'Basic {auth_payload.decode()}'
 
 
 def sanitized_Request(url, *args, **kwargs):
 
 
 def sanitized_Request(url, *args, **kwargs):
@@ -2234,10 +787,8 @@ def _htmlentity_transform(entity_with_semicolon):
         else:
             base = 10
         # See https://github.com/ytdl-org/youtube-dl/issues/7518
         else:
             base = 10
         # See https://github.com/ytdl-org/youtube-dl/issues/7518
-        try:
+        with contextlib.suppress(ValueError):
             return compat_chr(int(numstr, base))
             return compat_chr(int(numstr, base))
-        except ValueError:
-            pass
 
     # Unknown entity in name, return its literal representation
     return '&%s;' % entity
 
     # Unknown entity in name, return its literal representation
     return '&%s;' % entity
@@ -2246,7 +797,7 @@ def _htmlentity_transform(entity_with_semicolon):
 def unescapeHTML(s):
     if s is None:
         return None
 def unescapeHTML(s):
     if s is None:
         return None
-    assert type(s) == compat_str
+    assert isinstance(s, str)
 
     return re.sub(
         r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
 
     return re.sub(
         r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
@@ -2264,12 +815,28 @@ def escapeHTML(text):
 
 
 def process_communicate_or_kill(p, *args, **kwargs):
 
 
 def process_communicate_or_kill(p, *args, **kwargs):
-    try:
-        return p.communicate(*args, **kwargs)
-    except BaseException:  # Including KeyboardInterrupt
-        p.kill()
-        p.wait()
-        raise
+    write_string('DeprecationWarning: yt_dlp.utils.process_communicate_or_kill is deprecated '
+                 'and may be removed in a future version. Use yt_dlp.utils.Popen.communicate_or_kill instead')
+    return Popen.communicate_or_kill(p, *args, **kwargs)
+
+
+class Popen(subprocess.Popen):
+    if sys.platform == 'win32':
+        _startupinfo = subprocess.STARTUPINFO()
+        _startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+    else:
+        _startupinfo = None
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs, startupinfo=self._startupinfo)
+
+    def communicate_or_kill(self, *args, **kwargs):
+        try:
+            return self.communicate(*args, **kwargs)
+        except BaseException:  # Including KeyboardInterrupt
+            self.kill()
+            self.wait()
+            raise
 
 
 def get_subprocess_encoding():
 
 
 def get_subprocess_encoding():
@@ -2285,51 +852,23 @@ def get_subprocess_encoding():
 
 
 def encodeFilename(s, for_subprocess=False):
 
 
 def encodeFilename(s, for_subprocess=False):
-    """
-    @param s The name of the file
-    """
-
-    assert type(s) == compat_str
-
-    # Python 3 has a Unicode API
-    if sys.version_info >= (3, 0):
-        return s
-
-    # Pass '' directly to use Unicode APIs on Windows 2000 and up
-    # (Detecting Windows NT 4 is tricky because 'major >= 4' would
-    # match Windows 9x series as well. Besides, NT 4 is obsolete.)
-    if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
-        return s
-
-    # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
-    if sys.platform.startswith('java'):
-        return s
-
-    return s.encode(get_subprocess_encoding(), 'ignore')
+    assert isinstance(s, str)
+    return s
 
 
 def decodeFilename(b, for_subprocess=False):
 
 
 def decodeFilename(b, for_subprocess=False):
-
-    if sys.version_info >= (3, 0):
-        return b
-
-    if not isinstance(b, bytes):
-        return b
-
-    return b.decode(get_subprocess_encoding(), 'ignore')
+    return b
 
 
 def encodeArgument(s):
 
 
 def encodeArgument(s):
-    if not isinstance(s, compat_str):
-        # Legacy code that uses byte strings
-        # Uncomment the following line after fixing all post processors
-        # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
-        s = s.decode('ascii')
-    return encodeFilename(s, True)
+    # Legacy code that uses byte strings
+    # Uncomment the following line after fixing all post processors
+    # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
+    return s if isinstance(s, str) else s.decode('ascii')
 
 
 def decodeArgument(b):
 
 
 def decodeArgument(b):
-    return decodeFilename(b, True)
+    return b
 
 
 def decodeOption(optval):
 
 
 def decodeOption(optval):
@@ -2342,49 +881,87 @@ def decodeOption(optval):
     return optval
 
 
     return optval
 
 
+_timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
+
+
+def timetuple_from_msec(msec):
+    secs, msec = divmod(msec, 1000)
+    mins, secs = divmod(secs, 60)
+    hrs, mins = divmod(mins, 60)
+    return _timetuple(hrs, mins, secs, msec)
+
+
 def formatSeconds(secs, delim=':', msec=False):
 def formatSeconds(secs, delim=':', msec=False):
-    if secs > 3600:
-        ret = '%d%s%02d%s%02d' % (secs // 3600, delim, (secs % 3600) // 60, delim, secs % 60)
-    elif secs > 60:
-        ret = '%d%s%02d' % (secs // 60, delim, secs % 60)
+    time = timetuple_from_msec(secs * 1000)
+    if time.hours:
+        ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
+    elif time.minutes:
+        ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
     else:
     else:
-        ret = '%d' % secs
-    return '%s.%03d' % (ret, secs % 1) if msec else ret
+        ret = '%d' % time.seconds
+    return '%s.%03d' % (ret, time.milliseconds) if msec else ret
+
+
+def _ssl_load_windows_store_certs(ssl_context, storename):
+    # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
+    try:
+        certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
+                 if encoding == 'x509_asn' and (
+                     trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
+    except PermissionError:
+        return
+    for cert in certs:
+        with contextlib.suppress(ssl.SSLError):
+            ssl_context.load_verify_locations(cadata=cert)
 
 
 def make_HTTPS_handler(params, **kwargs):
 
 
 def make_HTTPS_handler(params, **kwargs):
-    opts_no_check_certificate = params.get('nocheckcertificate', False)
-    if hasattr(ssl, 'create_default_context'):  # Python >= 3.4 or 2.7.9
-        context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
-        if opts_no_check_certificate:
-            context.check_hostname = False
-            context.verify_mode = ssl.CERT_NONE
+    opts_check_certificate = not params.get('nocheckcertificate')
+    context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
+    context.check_hostname = opts_check_certificate
+    if params.get('legacyserverconnect'):
+        context.options |= 4  # SSL_OP_LEGACY_SERVER_CONNECT
+        # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998
+        context.set_ciphers('DEFAULT')
+
+    context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
+    if opts_check_certificate:
+        if has_certifi and 'no-certifi' not in params.get('compat_opts', []):
+            context.load_verify_locations(cafile=certifi.where())
         try:
         try:
-            return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
-        except TypeError:
-            # Python 2.7.8
-            # (create_default_context present but HTTPSHandler has no context=)
-            pass
+            context.load_default_certs()
+        # Work around the issue in load_default_certs when there are bad certificates. See:
+        # https://github.com/yt-dlp/yt-dlp/issues/1060,
+        # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
+        except ssl.SSLError:
+            # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
+            if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
+                for storename in ('CA', 'ROOT'):
+                    _ssl_load_windows_store_certs(context, storename)
+            context.set_default_verify_paths()
+
+    client_certfile = params.get('client_certificate')
+    if client_certfile:
+        try:
+            context.load_cert_chain(
+                client_certfile, keyfile=params.get('client_certificate_key'),
+                password=params.get('client_certificate_password'))
+        except ssl.SSLError:
+            raise YoutubeDLError('Unable to load client certificate')
 
 
-    if sys.version_info < (3, 2):
-        return YoutubeDLHTTPSHandler(params, **kwargs)
-    else:  # Python < 3.4
-        context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
-        context.verify_mode = (ssl.CERT_NONE
-                               if opts_no_check_certificate
-                               else ssl.CERT_REQUIRED)
-        context.set_default_verify_paths()
-        return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
+    # Some servers may reject requests if ALPN extension is not sent. See:
+    # https://github.com/python/cpython/issues/85140
+    # https://github.com/yt-dlp/yt-dlp/issues/3878
+    with contextlib.suppress(NotImplementedError):
+        context.set_alpn_protocols(['http/1.1'])
+
+    return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
 
 
 def bug_reports_message(before=';'):
 
 
 def bug_reports_message(before=';'):
-    if ytdl_is_updateable():
-        update_cmd = 'type  yt-dlp -U  to update'
-    else:
-        update_cmd = 'see  https://github.com/yt-dlp/yt-dlp  on how to update'
-    msg = 'please report this issue on  https://github.com/yt-dlp/yt-dlp .'
-    msg += ' Make sure you are using the latest version; %s.' % update_cmd
-    msg += ' Be sure to call yt-dlp with the --verbose flag and include its complete output.'
+    msg = ('please report this issue on  https://github.com/yt-dlp/yt-dlp/issues?q= , '
+           'filling out the appropriate issue template. '
+           'Confirm you are on the latest version using  yt-dlp -U')
 
     before = before.rstrip()
     if not before or before.endswith(('.', '!', '?')):
 
     before = before.rstrip()
     if not before or before.endswith(('.', '!', '?')):
@@ -2395,7 +972,14 @@ def bug_reports_message(before=';'):
 
 class YoutubeDLError(Exception):
     """Base exception for YoutubeDL errors."""
 
 class YoutubeDLError(Exception):
     """Base exception for YoutubeDL errors."""
-    pass
+    msg = None
+
+    def __init__(self, msg=None):
+        if msg is not None:
+            self.msg = msg
+        elif self.msg is None:
+            self.msg = type(self).__name__
+        super().__init__(self.msg)
 
 
 network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
 
 
 network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
@@ -2414,7 +998,7 @@ def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=N
         if sys.exc_info()[0] in network_exceptions:
             expected = True
 
         if sys.exc_info()[0] in network_exceptions:
             expected = True
 
-        self.msg = str(msg)
+        self.orig_msg = str(msg)
         self.traceback = tb
         self.expected = expected
         self.cause = cause
         self.traceback = tb
         self.expected = expected
         self.cause = cause
@@ -2422,22 +1006,23 @@ def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=N
         self.ie = ie
         self.exc_info = sys.exc_info()  # preserve original exception
 
         self.ie = ie
         self.exc_info = sys.exc_info()  # preserve original exception
 
-        super(ExtractorError, self).__init__(''.join((
+        super().__init__(''.join((
             format_field(ie, template='[%s] '),
             format_field(video_id, template='%s: '),
             format_field(ie, template='[%s] '),
             format_field(video_id, template='%s: '),
-            self.msg,
+            msg,
             format_field(cause, template=' (caused by %r)'),
             '' if expected else bug_reports_message())))
 
     def format_traceback(self):
             format_field(cause, template=' (caused by %r)'),
             '' if expected else bug_reports_message())))
 
     def format_traceback(self):
-        if self.traceback is None:
-            return None
-        return ''.join(traceback.format_tb(self.traceback))
+        return join_nonempty(
+            self.traceback and ''.join(traceback.format_tb(self.traceback)),
+            self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]),
+            delim='\n') or None
 
 
 class UnsupportedError(ExtractorError):
     def __init__(self, url):
 
 
 class UnsupportedError(ExtractorError):
     def __init__(self, url):
-        super(UnsupportedError, self).__init__(
+        super().__init__(
             'Unsupported URL: %s' % url, expected=True)
         self.url = url
 
             'Unsupported URL: %s' % url, expected=True)
         self.url = url
 
@@ -2454,9 +1039,9 @@ class GeoRestrictedError(ExtractorError):
     geographic location due to geographic restrictions imposed by a website.
     """
 
     geographic location due to geographic restrictions imposed by a website.
     """
 
-    def __init__(self, msg, countries=None):
-        super(GeoRestrictedError, self).__init__(msg, expected=True)
-        self.msg = msg
+    def __init__(self, msg, countries=None, **kwargs):
+        kwargs['expected'] = True
+        super().__init__(msg, **kwargs)
         self.countries = countries
 
 
         self.countries = countries
 
 
@@ -2470,7 +1055,7 @@ class DownloadError(YoutubeDLError):
 
     def __init__(self, msg, exc_info=None):
         """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
 
     def __init__(self, msg, exc_info=None):
         """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
-        super(DownloadError, self).__init__(msg)
+        super().__init__(msg)
         self.exc_info = exc_info
 
 
         self.exc_info = exc_info
 
 
@@ -2480,7 +1065,7 @@ class EntryNotInPlaylist(YoutubeDLError):
     This exception will be thrown by YoutubeDL when a requested entry
     is not found in the playlist info_dict
     """
     This exception will be thrown by YoutubeDL when a requested entry
     is not found in the playlist info_dict
     """
-    pass
+    msg = 'Entry not found in info'
 
 
 class SameFileError(YoutubeDLError):
 
 
 class SameFileError(YoutubeDLError):
@@ -2489,7 +1074,12 @@ class SameFileError(YoutubeDLError):
     This exception will be thrown by FileDownloader objects if they detect
     multiple files would have to be downloaded to the same file on disk.
     """
     This exception will be thrown by FileDownloader objects if they detect
     multiple files would have to be downloaded to the same file on disk.
     """
-    pass
+    msg = 'Fixed output name but more than one file to download'
+
+    def __init__(self, filename=None):
+        if filename is not None:
+            self.msg += f': {filename}'
+        super().__init__(self.msg)
 
 
 class PostProcessingError(YoutubeDLError):
 
 
 class PostProcessingError(YoutubeDLError):
@@ -2499,29 +1089,41 @@ class PostProcessingError(YoutubeDLError):
     indicate an error in the postprocessing task.
     """
 
     indicate an error in the postprocessing task.
     """
 
-    def __init__(self, msg):
-        super(PostProcessingError, self).__init__(msg)
-        self.msg = msg
 
 
+class DownloadCancelled(YoutubeDLError):
+    """ Exception raised when the download queue should be interrupted """
+    msg = 'The download was cancelled'
 
 
-class ExistingVideoReached(YoutubeDLError):
-    """ --max-downloads limit has been reached. """
-    pass
+
+class ExistingVideoReached(DownloadCancelled):
+    """ --break-on-existing triggered """
+    msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
 
 
 
 
-class RejectedVideoReached(YoutubeDLError):
+class RejectedVideoReached(DownloadCancelled):
+    """ --break-on-reject triggered """
+    msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
+
+
+class MaxDownloadsReached(DownloadCancelled):
     """ --max-downloads limit has been reached. """
     """ --max-downloads limit has been reached. """
-    pass
+    msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
 
 
 
 
-class ThrottledDownload(YoutubeDLError):
-    """ Download speed below --throttled-rate. """
-    pass
+class ReExtractInfo(YoutubeDLError):
+    """ Video info needs to be re-extracted. """
 
 
+    def __init__(self, msg, expected=False):
+        super().__init__(msg)
+        self.expected = expected
 
 
-class MaxDownloadsReached(YoutubeDLError):
-    """ --max-downloads limit has been reached. """
-    pass
+
+class ThrottledDownload(ReExtractInfo):
+    """ Download speed below --throttled-rate. """
+    msg = 'The download speed is below throttle limit'
+
+    def __init__(self):
+        super().__init__(self.msg, expected=False)
 
 
 class UnavailableVideoError(YoutubeDLError):
 
 
 class UnavailableVideoError(YoutubeDLError):
@@ -2530,7 +1132,12 @@ class UnavailableVideoError(YoutubeDLError):
     This exception will be thrown when a video is requested
     in a format that is not available for that video.
     """
     This exception will be thrown when a video is requested
     in a format that is not available for that video.
     """
-    pass
+    msg = 'Unable to download video'
+
+    def __init__(self, err=None):
+        if err is not None:
+            self.msg += f': {err}'
+        super().__init__(self.msg)
 
 
 class ContentTooShortError(YoutubeDLError):
 
 
 class ContentTooShortError(YoutubeDLError):
@@ -2542,9 +1149,7 @@ class ContentTooShortError(YoutubeDLError):
     """
 
     def __init__(self, downloaded, expected):
     """
 
     def __init__(self, downloaded, expected):
-        super(ContentTooShortError, self).__init__(
-            'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
-        )
+        super().__init__(f'Downloaded {downloaded} bytes, expected {expected} bytes')
         # Both in bytes
         self.downloaded = downloaded
         self.expected = expected
         # Both in bytes
         self.downloaded = downloaded
         self.expected = expected
@@ -2552,7 +1157,7 @@ def __init__(self, downloaded, expected):
 
 class XAttrMetadataError(YoutubeDLError):
     def __init__(self, code=None, msg='Unknown error'):
 
 class XAttrMetadataError(YoutubeDLError):
     def __init__(self, code=None, msg='Unknown error'):
-        super(XAttrMetadataError, self).__init__(msg)
+        super().__init__(msg)
         self.code = code
         self.msg = msg
 
         self.code = code
         self.msg = msg
 
@@ -2571,12 +1176,7 @@ class XAttrUnavailableError(YoutubeDLError):
 
 
 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
 
 
 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
-    # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
-    # expected HTTP responses to meet HTTP/1.0 or later (see also
-    # https://github.com/ytdl-org/youtube-dl/issues/6727)
-    if sys.version_info < (3, 0):
-        kwargs['strict'] = True
-    hc = http_class(*args, **compat_kwargs(kwargs))
+    hc = http_class(*args, **kwargs)
     source_address = ydl_handler._params.get('source_address')
 
     if source_address is not None:
     source_address = ydl_handler._params.get('source_address')
 
     if source_address is not None:
@@ -2593,7 +1193,7 @@ def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_a
             ip_addrs = [addr for addr in addrs if addr[0] == af]
             if addrs and not ip_addrs:
                 ip_version = 'v4' if af == socket.AF_INET else 'v6'
             ip_addrs = [addr for addr in addrs if addr[0] == af]
             if addrs and not ip_addrs:
                 ip_version = 'v4' if af == socket.AF_INET else 'v6'
-                raise socket.error(
+                raise OSError(
                     "No remote IP%s addresses available for connect, can't use '%s' as source address"
                     % (ip_version, source_address[0]))
             for res in ip_addrs:
                     "No remote IP%s addresses available for connect, can't use '%s' as source address"
                     % (ip_version, source_address[0]))
             for res in ip_addrs:
@@ -2607,30 +1207,17 @@ def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_a
                     sock.connect(sa)
                     err = None  # Explicitly break reference cycle
                     return sock
                     sock.connect(sa)
                     err = None  # Explicitly break reference cycle
                     return sock
-                except socket.error as _:
+                except OSError as _:
                     err = _
                     if sock is not None:
                         sock.close()
             if err is not None:
                 raise err
             else:
                     err = _
                     if sock is not None:
                         sock.close()
             if err is not None:
                 raise err
             else:
-                raise socket.error('getaddrinfo returns an empty list')
+                raise OSError('getaddrinfo returns an empty list')
         if hasattr(hc, '_create_connection'):
             hc._create_connection = _create_connection
         if hasattr(hc, '_create_connection'):
             hc._create_connection = _create_connection
-        sa = (source_address, 0)
-        if hasattr(hc, 'source_address'):  # Python 2.7+
-            hc.source_address = sa
-        else:  # Python 2.6
-            def _hc_connect(self, *args, **kwargs):
-                sock = _create_connection(
-                    (self.host, self.port), self.timeout, sa)
-                if is_https:
-                    self.sock = ssl.wrap_socket(
-                        sock, self.key_file, self.cert_file,
-                        ssl_version=ssl.PROTOCOL_TLSv1)
-                else:
-                    self.sock = sock
-            hc.connect = functools.partial(_hc_connect, hc)
+        hc.source_address = (source_address, 0)
 
     return hc
 
 
     return hc
 
@@ -2639,7 +1226,7 @@ def handle_youtubedl_headers(headers):
     filtered_headers = headers
 
     if 'Youtubedl-no-compression' in filtered_headers:
     filtered_headers = headers
 
     if 'Youtubedl-no-compression' in filtered_headers:
-        filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
+        filtered_headers = {k: v for k, v in filtered_headers.items() if k.lower() != 'accept-encoding'}
         del filtered_headers['Youtubedl-no-compression']
 
     return filtered_headers
         del filtered_headers['Youtubedl-no-compression']
 
     return filtered_headers
@@ -2688,6 +1275,12 @@ def deflate(data):
         except zlib.error:
             return zlib.decompress(data)
 
         except zlib.error:
             return zlib.decompress(data)
 
+    @staticmethod
+    def brotli(data):
+        if not data:
+            return data
+        return brotli.decompress(data)
+
     def http_request(self, req):
         # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
         # always respected by websites, some tend to give out URLs with non percent-encoded
     def http_request(self, req):
         # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
         # always respected by websites, some tend to give out URLs with non percent-encoded
@@ -2704,18 +1297,16 @@ def http_request(self, req):
         if url != url_escaped:
             req = update_Request(req, url=url_escaped)
 
         if url != url_escaped:
             req = update_Request(req, url=url_escaped)
 
-        for h, v in std_headers.items():
+        for h, v in self._params.get('http_headers', std_headers).items():
             # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
             # The dict keys are capitalized because of this bug by urllib
             if h.capitalize() not in req.headers:
                 req.add_header(h, v)
 
             # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
             # The dict keys are capitalized because of this bug by urllib
             if h.capitalize() not in req.headers:
                 req.add_header(h, v)
 
-        req.headers = handle_youtubedl_headers(req.headers)
+        if 'Accept-encoding' not in req.headers:
+            req.add_header('Accept-encoding', ', '.join(SUPPORTED_ENCODINGS))
 
 
-        if sys.version_info < (2, 7) and '#' in req.get_full_url():
-            # Python 2.6 is brain-dead when it comes to fragments
-            req._Request__original = req._Request__original.partition('#')[0]
-            req._Request__r_type = req._Request__r_type.partition('#')[0]
+        req.headers = handle_youtubedl_headers(req.headers)
 
         return req
 
 
         return req
 
@@ -2727,14 +1318,14 @@ def http_response(self, req, resp):
             gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
             try:
                 uncompressed = io.BytesIO(gz.read())
             gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
             try:
                 uncompressed = io.BytesIO(gz.read())
-            except IOError as original_ioerror:
+            except OSError as original_ioerror:
                 # There may be junk add the end of the file
                 # See http://stackoverflow.com/q/4928560/35070 for details
                 for i in range(1, 1024):
                     try:
                         gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
                         uncompressed = io.BytesIO(gz.read())
                 # There may be junk add the end of the file
                 # See http://stackoverflow.com/q/4928560/35070 for details
                 for i in range(1, 1024):
                     try:
                         gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
                         uncompressed = io.BytesIO(gz.read())
-                    except IOError:
+                    except OSError:
                         continue
                     break
                 else:
                         continue
                     break
                 else:
@@ -2748,21 +1339,22 @@ def http_response(self, req, resp):
             resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
             resp.msg = old_resp.msg
             del resp.headers['Content-encoding']
             resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
             resp.msg = old_resp.msg
             del resp.headers['Content-encoding']
+        # brotli
+        if resp.headers.get('Content-encoding', '') == 'br':
+            resp = compat_urllib_request.addinfourl(
+                io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
+            resp.msg = old_resp.msg
+            del resp.headers['Content-encoding']
         # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
         # https://github.com/ytdl-org/youtube-dl/issues/6457).
         if 300 <= resp.code < 400:
             location = resp.headers.get('Location')
             if location:
                 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
         # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
         # https://github.com/ytdl-org/youtube-dl/issues/6457).
         if 300 <= resp.code < 400:
             location = resp.headers.get('Location')
             if location:
                 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
-                if sys.version_info >= (3, 0):
-                    location = location.encode('iso-8859-1').decode('utf-8')
-                else:
-                    location = location.decode('utf-8')
+                location = location.encode('iso-8859-1').decode()
                 location_escaped = escape_url(location)
                 if location != location_escaped:
                     del resp.headers['Location']
                 location_escaped = escape_url(location)
                 if location != location_escaped:
                     del resp.headers['Location']
-                    if sys.version_info < (3, 0):
-                        location_escaped = location_escaped.encode('utf-8')
                     resp.headers['Location'] = location_escaped
         return resp
 
                     resp.headers['Location'] = location_escaped
         return resp
 
@@ -2799,7 +1391,7 @@ class SocksConnection(base_class):
         def connect(self):
             self.sock = sockssocket()
             self.sock.setproxy(*proxy_args)
         def connect(self):
             self.sock = sockssocket()
             self.sock.setproxy(*proxy_args)
-            if type(self.timeout) in (int, float):
+            if isinstance(self.timeout, (int, float)):
                 self.sock.settimeout(self.timeout)
             self.sock.connect((self.host, self.port))
 
                 self.sock.settimeout(self.timeout)
             self.sock.connect((self.host, self.port))
 
@@ -2833,9 +1425,14 @@ def https_open(self, req):
             conn_class = make_socks_conn_class(conn_class, socks_proxy)
             del req.headers['Ytdl-socks-proxy']
 
             conn_class = make_socks_conn_class(conn_class, socks_proxy)
             del req.headers['Ytdl-socks-proxy']
 
-        return self.do_open(functools.partial(
-            _create_http_connection, self, conn_class, True),
-            req, **kwargs)
+        try:
+            return self.do_open(
+                functools.partial(_create_http_connection, self, conn_class, True), req, **kwargs)
+        except urllib.error.URLError as e:
+            if (isinstance(e.reason, ssl.SSLError)
+                    and getattr(e.reason, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'):
+                raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect')
+            raise
 
 
 class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
 
 
 class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
@@ -2854,57 +1451,71 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
         'CookieFileEntry',
         ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
 
         'CookieFileEntry',
         ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
 
-    def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+    def __init__(self, filename=None, *args, **kwargs):
+        super().__init__(None, *args, **kwargs)
+        if self.is_path(filename):
+            filename = os.fspath(filename)
+        self.filename = filename
+
+    @staticmethod
+    def _true_or_false(cndn):
+        return 'TRUE' if cndn else 'FALSE'
+
+    @staticmethod
+    def is_path(file):
+        return isinstance(file, (str, bytes, os.PathLike))
+
+    @contextlib.contextmanager
+    def open(self, file, *, write=False):
+        if self.is_path(file):
+            with open(file, 'w' if write else 'r', encoding='utf-8') as f:
+                yield f
+        else:
+            if write:
+                file.truncate(0)
+            yield file
+
+    def _really_save(self, f, ignore_discard=False, ignore_expires=False):
+        now = time.time()
+        for cookie in self:
+            if (not ignore_discard and cookie.discard
+                    or not ignore_expires and cookie.is_expired(now)):
+                continue
+            name, value = cookie.name, cookie.value
+            if value is None:
+                # cookies.txt regards 'Set-Cookie: foo' as a cookie
+                # with no name, whereas http.cookiejar regards it as a
+                # cookie with no value.
+                name, value = '', name
+            f.write('%s\n' % '\t'.join((
+                cookie.domain,
+                self._true_or_false(cookie.domain.startswith('.')),
+                cookie.path,
+                self._true_or_false(cookie.secure),
+                str_or_none(cookie.expires, default=''),
+                name, value
+            )))
+
+    def save(self, filename=None, *args, **kwargs):
         """
         Save cookies to a file.
         """
         Save cookies to a file.
+        Code is taken from CPython 3.6
+        https://github.com/python/cpython/blob/8d999cbf4adea053be6dbb612b9844635c4dfb8e/Lib/http/cookiejar.py#L2091-L2117 """
 
 
-        Most of the code is taken from CPython 3.8 and slightly adapted
-        to support cookie files with UTF-8 in both python 2 and 3.
-        """
         if filename is None:
             if self.filename is not None:
                 filename = self.filename
             else:
                 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
 
         if filename is None:
             if self.filename is not None:
                 filename = self.filename
             else:
                 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
 
-        # Store session cookies with `expires` set to 0 instead of an empty
-        # string
+        # Store session cookies with `expires` set to 0 instead of an empty string
         for cookie in self:
             if cookie.expires is None:
                 cookie.expires = 0
 
         for cookie in self:
             if cookie.expires is None:
                 cookie.expires = 0
 
-        with io.open(filename, 'w', encoding='utf-8') as f:
+        with self.open(filename, write=True) as f:
             f.write(self._HEADER)
             f.write(self._HEADER)
-            now = time.time()
-            for cookie in self:
-                if not ignore_discard and cookie.discard:
-                    continue
-                if not ignore_expires and cookie.is_expired(now):
-                    continue
-                if cookie.secure:
-                    secure = 'TRUE'
-                else:
-                    secure = 'FALSE'
-                if cookie.domain.startswith('.'):
-                    initial_dot = 'TRUE'
-                else:
-                    initial_dot = 'FALSE'
-                if cookie.expires is not None:
-                    expires = compat_str(cookie.expires)
-                else:
-                    expires = ''
-                if cookie.value is None:
-                    # cookies.txt regards 'Set-Cookie: foo' as a cookie
-                    # with no name, whereas http.cookiejar regards it as a
-                    # cookie with no value.
-                    name = ''
-                    value = cookie.name
-                else:
-                    name = cookie.name
-                    value = cookie.value
-                f.write(
-                    '\t'.join([cookie.domain, initial_dot, cookie.path,
-                               secure, expires, name, value]) + '\n')
+            self._really_save(f, *args, **kwargs)
 
     def load(self, filename=None, ignore_discard=False, ignore_expires=False):
         """Load cookies from a file."""
 
     def load(self, filename=None, ignore_discard=False, ignore_expires=False):
         """Load cookies from a file."""
@@ -2929,14 +1540,16 @@ def prepare_line(line):
             return line
 
         cf = io.StringIO()
             return line
 
         cf = io.StringIO()
-        with io.open(filename, encoding='utf-8') as f:
+        with self.open(filename) as f:
             for line in f:
                 try:
                     cf.write(prepare_line(line))
                 except compat_cookiejar.LoadError as e:
             for line in f:
                 try:
                     cf.write(prepare_line(line))
                 except compat_cookiejar.LoadError as e:
-                    write_string(
-                        'WARNING: skipping cookie file entry due to %s: %r\n'
-                        % (e, line), sys.stderr)
+                    if f'{line.strip()} '[0] in '[{"':
+                        raise compat_cookiejar.LoadError(
+                            'Cookies file must be Netscape formatted, not JSON. See  '
+                            'https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl')
+                    write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n')
                     continue
         cf.seek(0)
         self._really_load(cf, filename, ignore_discard, ignore_expires)
                     continue
         cf.seek(0)
         self._really_load(cf, filename, ignore_discard, ignore_expires)
@@ -2961,19 +1574,6 @@ def __init__(self, cookiejar=None):
         compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
 
     def http_response(self, request, response):
         compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
 
     def http_response(self, request, response):
-        # Python 2 will choke on next HTTP request in row if there are non-ASCII
-        # characters in Set-Cookie HTTP header of last response (see
-        # https://github.com/ytdl-org/youtube-dl/issues/6769).
-        # In order to at least prevent crashing we will percent encode Set-Cookie
-        # header before HTTPCookieProcessor starts processing it.
-        # if sys.version_info < (3, 0) and response.headers:
-        #     for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
-        #         set_cookie = response.headers.get(set_cookie_header)
-        #         if set_cookie:
-        #             set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
-        #             if set_cookie != set_cookie_escaped:
-        #                 del response.headers[set_cookie_header]
-        #                 response.headers[set_cookie_header] = set_cookie_escaped
         return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
 
     https_request = compat_urllib_request.HTTPCookieProcessor.http_request
         return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
 
     https_request = compat_urllib_request.HTTPCookieProcessor.http_request
@@ -3017,12 +1617,6 @@ def redirect_request(self, req, fp, code, msg, headers, newurl):
         # essentially all clients do redirect in this case, so we do
         # the same.
 
         # essentially all clients do redirect in this case, so we do
         # the same.
 
-        # On python 2 urlh.geturl() may sometimes return redirect URL
-        # as byte string instead of unicode. This workaround allows
-        # to force it always return unicode.
-        if sys.version_info[0] < 3:
-            newurl = compat_str(newurl)
-
         # Be conciliant with URIs containing a space.  This is mainly
         # redundant with the more complete encoding done in http_error_302(),
         # but it is kept for compatibility with other callers.
         # Be conciliant with URIs containing a space.  This is mainly
         # redundant with the more complete encoding done in http_error_302(),
         # but it is kept for compatibility with other callers.
@@ -3030,11 +1624,22 @@ def redirect_request(self, req, fp, code, msg, headers, newurl):
 
         CONTENT_HEADERS = ("content-length", "content-type")
         # NB: don't use dict comprehension for python 2.6 compatibility
 
         CONTENT_HEADERS = ("content-length", "content-type")
         # NB: don't use dict comprehension for python 2.6 compatibility
-        newheaders = dict((k, v) for k, v in req.headers.items()
-                          if k.lower() not in CONTENT_HEADERS)
+        newheaders = {k: v for k, v in req.headers.items() if k.lower() not in CONTENT_HEADERS}
+
+        # A 303 must either use GET or HEAD for subsequent request
+        # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4
+        if code == 303 and m != 'HEAD':
+            m = 'GET'
+        # 301 and 302 redirects are commonly turned into a GET from a POST
+        # for subsequent requests by browsers, so we'll do the same.
+        # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2
+        # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3
+        if code in (301, 302) and m == 'POST':
+            m = 'GET'
+
         return compat_urllib_request.Request(
             newurl, headers=newheaders, origin_req_host=req.origin_req_host,
         return compat_urllib_request.Request(
             newurl, headers=newheaders, origin_req_host=req.origin_req_host,
-            unverifiable=True)
+            unverifiable=True, method=m)
 
 
 def extract_timezone(date_str):
 
 
 def extract_timezone(date_str):
@@ -3074,12 +1679,10 @@ def parse_iso8601(date_str, delimiter='T', timezone=None):
     if timezone is None:
         timezone, date_str = extract_timezone(date_str)
 
     if timezone is None:
         timezone, date_str = extract_timezone(date_str)
 
-    try:
-        date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
+    with contextlib.suppress(ValueError):
+        date_format = f'%Y-%m-%d{delimiter}%H:%M:%S'
         dt = datetime.datetime.strptime(date_str, date_format) - timezone
         return calendar.timegm(dt.timetuple())
         dt = datetime.datetime.strptime(date_str, date_format) - timezone
         return calendar.timegm(dt.timetuple())
-    except ValueError:
-        pass
 
 
 def date_formats(day_first=True):
 
 
 def date_formats(day_first=True):
@@ -3099,17 +1702,13 @@ def unified_strdate(date_str, day_first=True):
     _, date_str = extract_timezone(date_str)
 
     for expression in date_formats(day_first):
     _, date_str = extract_timezone(date_str)
 
     for expression in date_formats(day_first):
-        try:
+        with contextlib.suppress(ValueError):
             upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
             upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
-        except ValueError:
-            pass
     if upload_date is None:
         timetuple = email.utils.parsedate_tz(date_str)
         if timetuple:
     if upload_date is None:
         timetuple = email.utils.parsedate_tz(date_str)
         if timetuple:
-            try:
+            with contextlib.suppress(ValueError):
                 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
                 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
-            except ValueError:
-                pass
     if upload_date is not None:
         return compat_str(upload_date)
 
     if upload_date is not None:
         return compat_str(upload_date)
 
@@ -3137,11 +1736,9 @@ def unified_timestamp(date_str, day_first=True):
         date_str = m.group(1)
 
     for expression in date_formats(day_first):
         date_str = m.group(1)
 
     for expression in date_formats(day_first):
-        try:
+        with contextlib.suppress(ValueError):
             dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
             return calendar.timegm(dt.timetuple())
             dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
             return calendar.timegm(dt.timetuple())
-        except ValueError:
-            pass
     timetuple = email.utils.parsedate_tz(date_str)
     if timetuple:
         return calendar.timegm(timetuple) + pm_delta * 3600
     timetuple = email.utils.parsedate_tz(date_str)
     if timetuple:
         return calendar.timegm(timetuple) + pm_delta * 3600
@@ -3165,26 +1762,26 @@ def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
 
 
 def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
 
 
 def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
-    """
-    Return a datetime object from a string in the format YYYYMMDD or
-    (now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
-
-    format: string date format used to return datetime object from
-    precision: round the time portion of a datetime object.
-                auto|microsecond|second|minute|hour|day.
-                auto: round to the unit provided in date_str (if applicable).
+    R"""
+    Return a datetime object from a string.
+    Supported format:
+        (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)?
+
+    @param format       strftime format of DATE
+    @param precision    Round the datetime object: auto|microsecond|second|minute|hour|day
+                        auto: round to the unit provided in date_str (if applicable).
     """
     auto_precision = False
     if precision == 'auto':
         auto_precision = True
         precision = 'microsecond'
     """
     auto_precision = False
     if precision == 'auto':
         auto_precision = True
         precision = 'microsecond'
-    today = datetime_round(datetime.datetime.now(), precision)
+    today = datetime_round(datetime.datetime.utcnow(), precision)
     if date_str in ('now', 'today'):
         return today
     if date_str == 'yesterday':
         return today - datetime.timedelta(days=1)
     match = re.match(
     if date_str in ('now', 'today'):
         return today
     if date_str == 'yesterday':
         return today - datetime.timedelta(days=1)
     match = re.match(
-        r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
+        r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?',
         date_str)
     if match is not None:
         start_time = datetime_from_str(match.group('start'), precision, format)
         date_str)
     if match is not None:
         start_time = datetime_from_str(match.group('start'), precision, format)
@@ -3206,13 +1803,15 @@ def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
     return datetime_round(datetime.datetime.strptime(date_str, format), precision)
 
 
     return datetime_round(datetime.datetime.strptime(date_str, format), precision)
 
 
-def date_from_str(date_str, format='%Y%m%d'):
-    """
-    Return a datetime object from a string in the format YYYYMMDD or
-    (now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
+def date_from_str(date_str, format='%Y%m%d', strict=False):
+    R"""
+    Return a date object from a string using datetime_from_str
 
 
-    format: string date format used to return datetime object from
+    @param strict  Restrict allowed patterns to "YYYYMMDD" and
+                   (now|today|yesterday)(-\d+(day|week|month|year)s?)?
     """
     """
+    if strict and not re.fullmatch(r'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str):
+        raise ValueError(f'Invalid date format "{date_str}"')
     return datetime_from_str(date_str, precision='microsecond', format=format).date()
 
 
     return datetime_from_str(date_str, precision='microsecond', format=format).date()
 
 
@@ -3253,17 +1852,17 @@ def hyphenate_date(date_str):
         return date_str
 
 
         return date_str
 
 
-class DateRange(object):
+class DateRange:
     """Represents a time interval between two dates"""
 
     def __init__(self, start=None, end=None):
         """start and end must be strings in the format accepted by date"""
         if start is not None:
     """Represents a time interval between two dates"""
 
     def __init__(self, start=None, end=None):
         """start and end must be strings in the format accepted by date"""
         if start is not None:
-            self.start = date_from_str(start)
+            self.start = date_from_str(start, strict=True)
         else:
             self.start = datetime.datetime.min.date()
         if end is not None:
         else:
             self.start = datetime.datetime.min.date()
         if end is not None:
-            self.end = date_from_str(end)
+            self.end = date_from_str(end, strict=True)
         else:
             self.end = datetime.datetime.max.date()
         if self.start > self.end:
         else:
             self.end = datetime.datetime.max.date()
         if self.start > self.end:
@@ -3281,7 +1880,7 @@ def __contains__(self, date):
         return self.start <= date <= self.end
 
     def __str__(self):
         return self.start <= date <= self.end
 
     def __str__(self):
-        return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
+        return f'{self.start.isoformat()} - {self.end.isoformat()}'
 
 
 def platform_name():
 
 
 def platform_name():
@@ -3294,108 +1893,30 @@ def platform_name():
     return res
 
 
     return res
 
 
+@functools.cache
 def get_windows_version():
 def get_windows_version():
-    ''' Get Windows version. None if it's not running on Windows '''
+    ''' Get Windows version. returns () if it's not running on Windows '''
     if compat_os_name == 'nt':
         return version_tuple(platform.win32_ver()[1])
     else:
     if compat_os_name == 'nt':
         return version_tuple(platform.win32_ver()[1])
     else:
-        return None
-
-
-def _windows_write_string(s, out):
-    """ Returns True if the string was written using special methods,
-    False if it has yet to be written out."""
-    # Adapted from http://stackoverflow.com/a/3259271/35070
-
-    import ctypes
-    import ctypes.wintypes
-
-    WIN_OUTPUT_IDS = {
-        1: -11,
-        2: -12,
-    }
-
-    try:
-        fileno = out.fileno()
-    except AttributeError:
-        # If the output stream doesn't have a fileno, it's virtual
-        return False
-    except io.UnsupportedOperation:
-        # Some strange Windows pseudo files?
-        return False
-    if fileno not in WIN_OUTPUT_IDS:
-        return False
-
-    GetStdHandle = compat_ctypes_WINFUNCTYPE(
-        ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
-        ('GetStdHandle', ctypes.windll.kernel32))
-    h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
-
-    WriteConsoleW = compat_ctypes_WINFUNCTYPE(
-        ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
-        ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
-        ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
-    written = ctypes.wintypes.DWORD(0)
-
-    GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
-    FILE_TYPE_CHAR = 0x0002
-    FILE_TYPE_REMOTE = 0x8000
-    GetConsoleMode = compat_ctypes_WINFUNCTYPE(
-        ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
-        ctypes.POINTER(ctypes.wintypes.DWORD))(
-        ('GetConsoleMode', ctypes.windll.kernel32))
-    INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
-
-    def not_a_console(handle):
-        if handle == INVALID_HANDLE_VALUE or handle is None:
-            return True
-        return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
-                or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
-
-    if not_a_console(h):
-        return False
-
-    def next_nonbmp_pos(s):
-        try:
-            return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
-        except StopIteration:
-            return len(s)
-
-    while s:
-        count = min(next_nonbmp_pos(s), 1024)
-
-        ret = WriteConsoleW(
-            h, s, count if count else 2, ctypes.byref(written), None)
-        if ret == 0:
-            raise OSError('Failed to write string')
-        if not count:  # We just wrote a non-BMP character
-            assert written.value == 2
-            s = s[1:]
-        else:
-            assert written.value > 0
-            s = s[written.value:]
-    return True
+        return ()
 
 
 def write_string(s, out=None, encoding=None):
 
 
 def write_string(s, out=None, encoding=None):
-    if out is None:
-        out = sys.stderr
-    assert type(s) == compat_str
+    assert isinstance(s, str)
+    out = out or sys.stderr
 
 
-    if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
-        if _windows_write_string(s, out):
-            return
+    if compat_os_name == 'nt' and supports_terminal_sequences(out):
+        s = re.sub(r'([\r\n]+)', r' \1', s)
 
 
-    if ('b' in getattr(out, 'mode', '')
-            or sys.version_info[0] < 3):  # Python 2 lies about mode of sys.stderr
-        byt = s.encode(encoding or preferredencoding(), 'ignore')
-        out.write(byt)
+    enc, buffer = None, out
+    if 'b' in getattr(out, 'mode', ''):
+        enc = encoding or preferredencoding()
     elif hasattr(out, 'buffer'):
     elif hasattr(out, 'buffer'):
+        buffer = out.buffer
         enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
         enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
-        byt = s.encode(enc, 'ignore')
-        out.buffer.write(byt)
-    else:
-        out.write(s)
+
+    buffer.write(s.encode(enc, 'ignore') if enc else s)
     out.flush()
 
 
     out.flush()
 
 
@@ -3414,6 +1935,13 @@ def intlist_to_bytes(xs):
     return compat_struct_pack('%dB' % len(xs), *xs)
 
 
     return compat_struct_pack('%dB' % len(xs), *xs)
 
 
+class LockingUnsupportedError(OSError):
+    msg = 'File locking is not supported'
+
+    def __init__(self):
+        super().__init__(self.msg)
+
+
 # Cross-platform file locking
 if sys.platform == 'win32':
     import ctypes.wintypes
 # Cross-platform file locking
 if sys.platform == 'win32':
     import ctypes.wintypes
@@ -3451,75 +1979,120 @@ class OVERLAPPED(ctypes.Structure):
     whole_low = 0xffffffff
     whole_high = 0x7fffffff
 
     whole_low = 0xffffffff
     whole_high = 0x7fffffff
 
-    def _lock_file(f, exclusive):
+    def _lock_file(f, exclusive, block):
         overlapped = OVERLAPPED()
         overlapped.Offset = 0
         overlapped.OffsetHigh = 0
         overlapped.hEvent = 0
         f._lock_file_overlapped_p = ctypes.pointer(overlapped)
         overlapped = OVERLAPPED()
         overlapped.Offset = 0
         overlapped.OffsetHigh = 0
         overlapped.hEvent = 0
         f._lock_file_overlapped_p = ctypes.pointer(overlapped)
-        handle = msvcrt.get_osfhandle(f.fileno())
-        if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
-                          whole_low, whole_high, f._lock_file_overlapped_p):
-            raise OSError('Locking file failed: %r' % ctypes.FormatError())
+
+        if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
+                          (0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
+                          0, whole_low, whole_high, f._lock_file_overlapped_p):
+            raise BlockingIOError('Locking file failed: %r' % ctypes.FormatError())
 
     def _unlock_file(f):
         assert f._lock_file_overlapped_p
         handle = msvcrt.get_osfhandle(f.fileno())
 
     def _unlock_file(f):
         assert f._lock_file_overlapped_p
         handle = msvcrt.get_osfhandle(f.fileno())
-        if not UnlockFileEx(handle, 0,
-                            whole_low, whole_high, f._lock_file_overlapped_p):
+        if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p):
             raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
 
 else:
             raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
 
 else:
-    # Some platforms, such as Jython, is missing fcntl
     try:
         import fcntl
 
     try:
         import fcntl
 
-        def _lock_file(f, exclusive):
-            fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
+        def _lock_file(f, exclusive, block):
+            flags = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
+            if not block:
+                flags |= fcntl.LOCK_NB
+            try:
+                fcntl.flock(f, flags)
+            except BlockingIOError:
+                raise
+            except OSError:  # AOSP does not have flock()
+                fcntl.lockf(f, flags)
 
         def _unlock_file(f):
 
         def _unlock_file(f):
-            fcntl.flock(f, fcntl.LOCK_UN)
+            try:
+                fcntl.flock(f, fcntl.LOCK_UN)
+            except OSError:
+                fcntl.lockf(f, fcntl.LOCK_UN)
+
     except ImportError:
     except ImportError:
-        UNSUPPORTED_MSG = 'file locking is not supported on this platform'
 
 
-        def _lock_file(f, exclusive):
-            raise IOError(UNSUPPORTED_MSG)
+        def _lock_file(f, exclusive, block):
+            raise LockingUnsupportedError()
 
         def _unlock_file(f):
 
         def _unlock_file(f):
-            raise IOError(UNSUPPORTED_MSG)
+            raise LockingUnsupportedError()
+
+
+class locked_file:
+    locked = False
 
 
+    def __init__(self, filename, mode, block=True, encoding=None):
+        if mode not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}:
+            raise NotImplementedError(mode)
+        self.mode, self.block = mode, block
 
 
-class locked_file(object):
-    def __init__(self, filename, mode, encoding=None):
-        assert mode in ['r', 'a', 'w']
-        self.f = io.open(filename, mode, encoding=encoding)
-        self.mode = mode
+        writable = any(f in mode for f in 'wax+')
+        readable = any(f in mode for f in 'r+')
+        flags = functools.reduce(operator.ior, (
+            getattr(os, 'O_CLOEXEC', 0),  # UNIX only
+            getattr(os, 'O_BINARY', 0),  # Windows only
+            getattr(os, 'O_NOINHERIT', 0),  # Windows only
+            os.O_CREAT if writable else 0,  # O_TRUNC only after locking
+            os.O_APPEND if 'a' in mode else 0,
+            os.O_EXCL if 'x' in mode else 0,
+            os.O_RDONLY if not writable else os.O_RDWR if readable else os.O_WRONLY,
+        ))
+
+        self.f = os.fdopen(os.open(filename, flags, 0o666), mode, encoding=encoding)
 
     def __enter__(self):
 
     def __enter__(self):
-        exclusive = self.mode != 'r'
+        exclusive = 'r' not in self.mode
         try:
         try:
-            _lock_file(self.f, exclusive)
-        except IOError:
+            _lock_file(self.f, exclusive, self.block)
+            self.locked = True
+        except OSError:
             self.f.close()
             raise
             self.f.close()
             raise
+        if 'w' in self.mode:
+            try:
+                self.f.truncate()
+            except OSError as e:
+                if e.errno not in (
+                    errno.ESPIPE,  # Illegal seek - expected for FIFO
+                    errno.EINVAL,  # Invalid argument - expected for /dev/null
+                ):
+                    raise
         return self
 
         return self
 
-    def __exit__(self, etype, value, traceback):
+    def unlock(self):
+        if not self.locked:
+            return
         try:
             _unlock_file(self.f)
         try:
             _unlock_file(self.f)
+        finally:
+            self.locked = False
+
+    def __exit__(self, *_):
+        try:
+            self.unlock()
         finally:
             self.f.close()
 
         finally:
             self.f.close()
 
-    def __iter__(self):
-        return iter(self.f)
+    open = __enter__
+    close = __exit__
 
 
-    def write(self, *args):
-        return self.f.write(*args)
+    def __getattr__(self, attr):
+        return getattr(self.f, attr)
 
 
-    def read(self, *args):
-        return self.f.read(*args)
+    def __iter__(self):
+        return iter(self.f)
 
 
 
 
+@functools.cache
 def get_filesystem_encoding():
     encoding = sys.getfilesystemencoding()
     return encoding if encoding is not None else 'utf-8'
 def get_filesystem_encoding():
     encoding = sys.getfilesystemencoding()
     return encoding if encoding is not None else 'utf-8'
@@ -3555,18 +2128,22 @@ def unsmuggle_url(smug_url, default=None):
     return url, data
 
 
     return url, data
 
 
+def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
+    """ Formats numbers with decimal sufixes like K, M, etc """
+    num, factor = float_or_none(num), float(factor)
+    if num is None or num < 0:
+        return None
+    POSSIBLE_SUFFIXES = 'kMGTPEZY'
+    exponent = 0 if num == 0 else min(int(math.log(num, factor)), len(POSSIBLE_SUFFIXES))
+    suffix = ['', *POSSIBLE_SUFFIXES][exponent]
+    if factor == 1024:
+        suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
+    converted = num / (factor ** exponent)
+    return fmt % (converted, suffix)
+
+
 def format_bytes(bytes):
 def format_bytes(bytes):
-    if bytes is None:
-        return 'N/A'
-    if type(bytes) is str:
-        bytes = float(bytes)
-    if bytes == 0.0:
-        exponent = 0
-    else:
-        exponent = int(math.log(bytes, 1024.0))
-    suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
-    converted = float(bytes) / float(1024 ** exponent)
-    return '%.2f%s' % (converted, suffix)
+    return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
 
 
 def lookup_unit_table(unit_table, s):
 
 
 def lookup_unit_table(unit_table, s):
@@ -3655,7 +2232,7 @@ def parse_count(s):
     if s is None:
         return None
 
     if s is None:
         return None
 
-    s = s.strip()
+    s = re.sub(r'^[^\d]+\s', '', s).strip()
 
     if re.match(r'^[\d,.]+$', s):
         return str_to_int(s)
 
     if re.match(r'^[\d,.]+$', s):
         return str_to_int(s)
@@ -3667,23 +2244,34 @@ def parse_count(s):
         'M': 1000 ** 2,
         'kk': 1000 ** 2,
         'KK': 1000 ** 2,
         'M': 1000 ** 2,
         'kk': 1000 ** 2,
         'KK': 1000 ** 2,
+        'b': 1000 ** 3,
+        'B': 1000 ** 3,
     }
 
     }
 
-    return lookup_unit_table(_UNIT_TABLE, s)
+    ret = lookup_unit_table(_UNIT_TABLE, s)
+    if ret is not None:
+        return ret
+
+    mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
+    if mobj:
+        return str_to_int(mobj.group(1))
 
 
 
 
-def parse_resolution(s):
+def parse_resolution(s, *, lenient=False):
     if s is None:
         return {}
 
     if s is None:
         return {}
 
-    mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s)
+    if lenient:
+        mobj = re.search(r'(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)', s)
+    else:
+        mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
     if mobj:
         return {
             'width': int(mobj.group('w')),
             'height': int(mobj.group('h')),
         }
 
     if mobj:
         return {
             'width': int(mobj.group('w')),
             'height': int(mobj.group('h')),
         }
 
-    mobj = re.search(r'\b(\d+)[pPiI]\b', s)
+    mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
     if mobj:
         return {'height': int(mobj.group(1))}
 
     if mobj:
         return {'height': int(mobj.group(1))}
 
@@ -3748,7 +2336,7 @@ def setproctitle(title):
         # a bytestring, but since unicode_literals turns
         # every string into a unicode string, it fails.
         return
         # a bytestring, but since unicode_literals turns
         # every string into a unicode string, it fails.
         return
-    title_bytes = title.encode('utf-8')
+    title_bytes = title.encode()
     buf = ctypes.create_string_buffer(len(title_bytes))
     buf.value = title_bytes
     try:
     buf = ctypes.create_string_buffer(len(title_bytes))
     buf.value = title_bytes
     try:
@@ -3790,13 +2378,13 @@ def base_url(url):
 
 def urljoin(base, path):
     if isinstance(path, bytes):
 
 def urljoin(base, path):
     if isinstance(path, bytes):
-        path = path.decode('utf-8')
+        path = path.decode()
     if not isinstance(path, compat_str) or not path:
         return None
     if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
         return path
     if isinstance(base, bytes):
     if not isinstance(path, compat_str) or not path:
         return None
     if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
         return path
     if isinstance(base, bytes):
-        base = base.decode('utf-8')
+        base = base.decode()
     if not isinstance(base, compat_str) or not re.match(
             r'^(?:https?:)?//', base):
         return None
     if not isinstance(base, compat_str) or not re.match(
             r'^(?:https?:)?//', base):
         return None
@@ -3814,16 +2402,11 @@ def get_method(self):
 
 
 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
 
 
 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
-    if get_attr:
-        if v is not None:
-            v = getattr(v, get_attr, None)
-    if v == '':
-        v = None
-    if v is None:
-        return default
+    if get_attr and v is not None:
+        v = getattr(v, get_attr, None)
     try:
         return int(v) * invscale // scale
     try:
         return int(v) * invscale // scale
-    except (ValueError, TypeError):
+    except (ValueError, TypeError, OverflowError):
         return default
 
 
         return default
 
 
@@ -3833,7 +2416,7 @@ def str_or_none(v, default=None):
 
 def str_to_int(int_str):
     """ A more relaxed version of int_or_none """
 
 def str_to_int(int_str):
     """ A more relaxed version of int_or_none """
-    if isinstance(int_str, compat_integer_types):
+    if isinstance(int_str, int):
         return int_str
     elif isinstance(int_str, compat_str):
         int_str = re.sub(r'[,\.\+]', '', int_str)
         return int_str
     elif isinstance(int_str, compat_str):
         int_str = re.sub(r'[,\.\+]', '', int_str)
@@ -3864,10 +2447,17 @@ def url_or_none(url):
     return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
 
 
     return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
 
 
+def request_to_url(req):
+    if isinstance(req, compat_urllib_request.Request):
+        return req.get_full_url()
+    else:
+        return req
+
+
 def strftime_or_none(timestamp, date_format, default=None):
     datetime_object = None
     try:
 def strftime_or_none(timestamp, date_format, default=None):
     datetime_object = None
     try:
-        if isinstance(timestamp, compat_numeric_types):  # unix timestamp
+        if isinstance(timestamp, (int, float)):  # unix timestamp
             datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
         elif isinstance(timestamp, compat_str):  # assume YYYYMMDD
             datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
             datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
         elif isinstance(timestamp, compat_str):  # assume YYYYMMDD
             datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
@@ -3877,36 +2467,42 @@ def strftime_or_none(timestamp, date_format, default=None):
 
 
 def parse_duration(s):
 
 
 def parse_duration(s):
-    if not isinstance(s, compat_basestring):
+    if not isinstance(s, str):
         return None
         return None
-
     s = s.strip()
     s = s.strip()
+    if not s:
+        return None
 
     days, hours, mins, secs, ms = [None] * 5
 
     days, hours, mins, secs, ms = [None] * 5
-    m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
+    m = re.match(r'''(?x)
+            (?P<before_secs>
+                (?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?
+            (?P<secs>(?(before_secs)[0-9]{1,2}|[0-9]+))
+            (?P<ms>[.:][0-9]+)?Z?$
+        ''', s)
     if m:
     if m:
-        days, hours, mins, secs, ms = m.groups()
+        days, hours, mins, secs, ms = m.group('days', 'hours', 'mins', 'secs', 'ms')
     else:
         m = re.match(
             r'''(?ix)(?:P?
                 (?:
     else:
         m = re.match(
             r'''(?ix)(?:P?
                 (?:
-                    [0-9]+\s*y(?:ears?)?\s*
+                    [0-9]+\s*y(?:ears?)?,?\s*
                 )?
                 (?:
                 )?
                 (?:
-                    [0-9]+\s*m(?:onths?)?\s*
+                    [0-9]+\s*m(?:onths?)?,?\s*
                 )?
                 (?:
                 )?
                 (?:
-                    [0-9]+\s*w(?:eeks?)?\s*
+                    [0-9]+\s*w(?:eeks?)?,?\s*
                 )?
                 (?:
                 )?
                 (?:
-                    (?P<days>[0-9]+)\s*d(?:ays?)?\s*
+                    (?P<days>[0-9]+)\s*d(?:ays?)?,?\s*
                 )?
                 T)?
                 (?:
                 )?
                 T)?
                 (?:
-                    (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
+                    (?P<hours>[0-9]+)\s*h(?:ours?)?,?\s*
                 )?
                 (?:
                 )?
                 (?:
-                    (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
+                    (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?,?\s*
                 )?
                 (?:
                     (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
                 )?
                 (?:
                     (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
@@ -3920,31 +2516,23 @@ def parse_duration(s):
             else:
                 return None
 
             else:
                 return None
 
-    duration = 0
-    if secs:
-        duration += float(secs)
-    if mins:
-        duration += float(mins) * 60
-    if hours:
-        duration += float(hours) * 60 * 60
-    if days:
-        duration += float(days) * 24 * 60 * 60
     if ms:
     if ms:
-        duration += float(ms)
-    return duration
+        ms = ms.replace(':', '.')
+    return sum(float(part or 0) * mult for part, mult in (
+        (days, 86400), (hours, 3600), (mins, 60), (secs, 1), (ms, 1)))
 
 
 def prepend_extension(filename, ext, expected_real_ext=None):
     name, real_ext = os.path.splitext(filename)
     return (
 
 
 def prepend_extension(filename, ext, expected_real_ext=None):
     name, real_ext = os.path.splitext(filename)
     return (
-        '{0}.{1}{2}'.format(name, ext, real_ext)
+        f'{name}.{ext}{real_ext}'
         if not expected_real_ext or real_ext[1:] == expected_real_ext
         if not expected_real_ext or real_ext[1:] == expected_real_ext
-        else '{0}.{1}'.format(filename, ext))
+        else f'{filename}.{ext}')
 
 
 def replace_extension(filename, ext, expected_real_ext=None):
     name, real_ext = os.path.splitext(filename)
 
 
 def replace_extension(filename, ext, expected_real_ext=None):
     name, real_ext = os.path.splitext(filename)
-    return '{0}.{1}'.format(
+    return '{}.{}'.format(
         name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
         ext)
 
         name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
         ext)
 
@@ -3953,30 +2541,27 @@ def check_executable(exe, args=[]):
     """ Checks if the given binary is installed somewhere in PATH, and returns its name.
     args can be a list of arguments for a short output (like -version) """
     try:
     """ Checks if the given binary is installed somewhere in PATH, and returns its name.
     args can be a list of arguments for a short output (like -version) """
     try:
-        process_communicate_or_kill(subprocess.Popen(
-            [exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
+        Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate_or_kill()
     except OSError:
         return False
     return exe
 
 
     except OSError:
         return False
     return exe
 
 
-def get_exe_version(exe, args=['--version'],
-                    version_re=None, unrecognized='present'):
-    """ Returns the version of the specified executable,
-    or False if the executable is not present """
+def _get_exe_version_output(exe, args, *, to_screen=None):
+    if to_screen:
+        to_screen(f'Checking exe version: {shell_quote([exe] + args)}')
     try:
         # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
         # SIGTTOU if yt-dlp is run in the background.
         # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
     try:
         # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
         # SIGTTOU if yt-dlp is run in the background.
         # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
-        out, _ = process_communicate_or_kill(subprocess.Popen(
-            [encodeArgument(exe)] + args,
-            stdin=subprocess.PIPE,
-            stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
+        out, _ = Popen(
+            [encodeArgument(exe)] + args, stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate_or_kill()
     except OSError:
         return False
     if isinstance(out, bytes):  # Python 2.x
         out = out.decode('ascii', 'ignore')
     except OSError:
         return False
     if isinstance(out, bytes):  # Python 2.x
         out = out.decode('ascii', 'ignore')
-    return detect_exe_version(out, version_re, unrecognized)
+    return out
 
 
 def detect_exe_version(output, version_re=None, unrecognized='present'):
 
 
 def detect_exe_version(output, version_re=None, unrecognized='present'):
@@ -3990,48 +2575,57 @@ def detect_exe_version(output, version_re=None, unrecognized='present'):
         return unrecognized
 
 
         return unrecognized
 
 
+def get_exe_version(exe, args=['--version'],
+                    version_re=None, unrecognized='present'):
+    """ Returns the version of the specified executable,
+    or False if the executable is not present """
+    out = _get_exe_version_output(exe, args)
+    return detect_exe_version(out, version_re, unrecognized) if out else False
+
+
 class LazyList(collections.abc.Sequence):
 class LazyList(collections.abc.Sequence):
-    ''' Lazy immutable list from an iterable
-    Note that slices of a LazyList are lists and not LazyList'''
+    """Lazy immutable list from an iterable
+    Note that slices of a LazyList are lists and not LazyList"""
 
     class IndexError(IndexError):
         pass
 
 
     class IndexError(IndexError):
         pass
 
-    def __init__(self, iterable):
-        self.__iterable = iter(iterable)
-        self.__cache = []
-        self.__reversed = False
+    def __init__(self, iterable, *, reverse=False, _cache=None):
+        self._iterable = iter(iterable)
+        self._cache = [] if _cache is None else _cache
+        self._reversed = reverse
 
     def __iter__(self):
 
     def __iter__(self):
-        if self.__reversed:
+        if self._reversed:
             # We need to consume the entire iterable to iterate in reverse
             yield from self.exhaust()
             return
             # We need to consume the entire iterable to iterate in reverse
             yield from self.exhaust()
             return
-        yield from self.__cache
-        for item in self.__iterable:
-            self.__cache.append(item)
+        yield from self._cache
+        for item in self._iterable:
+            self._cache.append(item)
             yield item
 
             yield item
 
-    def __exhaust(self):
-        self.__cache.extend(self.__iterable)
-        return self.__cache
+    def _exhaust(self):
+        self._cache.extend(self._iterable)
+        self._iterable = []  # Discard the emptied iterable to make it pickle-able
+        return self._cache
 
     def exhaust(self):
 
     def exhaust(self):
-        ''' Evaluate the entire iterable '''
-        return self.__exhaust()[::-1 if self.__reversed else 1]
+        """Evaluate the entire iterable"""
+        return self._exhaust()[::-1 if self._reversed else 1]
 
     @staticmethod
 
     @staticmethod
-    def __reverse_index(x):
+    def _reverse_index(x):
         return None if x is None else -(x + 1)
 
     def __getitem__(self, idx):
         if isinstance(idx, slice):
         return None if x is None else -(x + 1)
 
     def __getitem__(self, idx):
         if isinstance(idx, slice):
-            if self.__reversed:
-                idx = slice(self.__reverse_index(idx.start), self.__reverse_index(idx.stop), -(idx.step or 1))
+            if self._reversed:
+                idx = slice(self._reverse_index(idx.start), self._reverse_index(idx.stop), -(idx.step or 1))
             start, stop, step = idx.start, idx.stop, idx.step or 1
         elif isinstance(idx, int):
             start, stop, step = idx.start, idx.stop, idx.step or 1
         elif isinstance(idx, int):
-            if self.__reversed:
-                idx = self.__reverse_index(idx)
+            if self._reversed:
+                idx = self._reverse_index(idx)
             start, stop, step = idx, idx, 0
         else:
             raise TypeError('indices must be integers or slices')
             start, stop, step = idx, idx, 0
         else:
             raise TypeError('indices must be integers or slices')
@@ -4040,33 +2634,35 @@ def __getitem__(self, idx):
                 or (stop is None and step > 0)):
             # We need to consume the entire iterable to be able to slice from the end
             # Obviously, never use this with infinite iterables
                 or (stop is None and step > 0)):
             # We need to consume the entire iterable to be able to slice from the end
             # Obviously, never use this with infinite iterables
-            self.__exhaust()
+            self._exhaust()
             try:
             try:
-                return self.__cache[idx]
+                return self._cache[idx]
             except IndexError as e:
                 raise self.IndexError(e) from e
             except IndexError as e:
                 raise self.IndexError(e) from e
-        n = max(start or 0, stop or 0) - len(self.__cache) + 1
+        n = max(start or 0, stop or 0) - len(self._cache) + 1
         if n > 0:
         if n > 0:
-            self.__cache.extend(itertools.islice(self.__iterable, n))
+            self._cache.extend(itertools.islice(self._iterable, n))
         try:
         try:
-            return self.__cache[idx]
+            return self._cache[idx]
         except IndexError as e:
             raise self.IndexError(e) from e
 
     def __bool__(self):
         try:
         except IndexError as e:
             raise self.IndexError(e) from e
 
     def __bool__(self):
         try:
-            self[-1] if self.__reversed else self[0]
+            self[-1] if self._reversed else self[0]
         except self.IndexError:
             return False
         return True
 
     def __len__(self):
         except self.IndexError:
             return False
         return True
 
     def __len__(self):
-        self.__exhaust()
-        return len(self.__cache)
+        self._exhaust()
+        return len(self._cache)
 
 
-    def reverse(self):
-        self.__reversed = not self.__reversed
-        return self
+    def __reversed__(self):
+        return type(self)(self._iterable, reverse=not self._reversed, _cache=self._cache)
+
+    def __copy__(self):
+        return type(self)(self._iterable, reverse=self._reversed, _cache=self._cache)
 
     def __repr__(self):
         # repr and str should mimic a list. So we exhaust the iterable
 
     def __repr__(self):
         # repr and str should mimic a list. So we exhaust the iterable
@@ -4077,6 +2673,10 @@ def __str__(self):
 
 
 class PagedList:
 
 
 class PagedList:
+
+    class IndexError(IndexError):
+        pass
+
     def __len__(self):
         # This is only useful for tests
         return len(self.getslice())
     def __len__(self):
         # This is only useful for tests
         return len(self.getslice())
@@ -4084,11 +2684,14 @@ def __len__(self):
     def __init__(self, pagefunc, pagesize, use_cache=True):
         self._pagefunc = pagefunc
         self._pagesize = pagesize
     def __init__(self, pagefunc, pagesize, use_cache=True):
         self._pagefunc = pagefunc
         self._pagesize = pagesize
+        self._pagecount = float('inf')
         self._use_cache = use_cache
         self._cache = {}
 
     def getpage(self, pagenum):
         self._use_cache = use_cache
         self._cache = {}
 
     def getpage(self, pagenum):
-        page_results = self._cache.get(pagenum) or list(self._pagefunc(pagenum))
+        page_results = self._cache.get(pagenum)
+        if page_results is None:
+            page_results = [] if pagenum > self._pagecount else list(self._pagefunc(pagenum))
         if self._use_cache:
             self._cache[pagenum] = page_results
         return page_results
         if self._use_cache:
             self._cache[pagenum] = page_results
         return page_results
@@ -4100,14 +2703,18 @@ def _getslice(self, start, end):
         raise NotImplementedError('This method must be implemented by subclasses')
 
     def __getitem__(self, idx):
         raise NotImplementedError('This method must be implemented by subclasses')
 
     def __getitem__(self, idx):
-        # NOTE: cache must be enabled if this is used
+        assert self._use_cache, 'Indexing PagedList requires cache'
         if not isinstance(idx, int) or idx < 0:
             raise TypeError('indices must be non-negative integers')
         entries = self.getslice(idx, idx + 1)
         if not isinstance(idx, int) or idx < 0:
             raise TypeError('indices must be non-negative integers')
         entries = self.getslice(idx, idx + 1)
-        return entries[0] if entries else None
+        if not entries:
+            raise self.IndexError()
+        return entries[0]
 
 
 class OnDemandPagedList(PagedList):
 
 
 class OnDemandPagedList(PagedList):
+    """Download pages until a page with less than maximum results"""
+
     def _getslice(self, start, end):
         for pagenum in itertools.count(start // self._pagesize):
             firstid = pagenum * self._pagesize
     def _getslice(self, start, end):
         for pagenum in itertools.count(start // self._pagesize):
             firstid = pagenum * self._pagesize
@@ -4124,7 +2731,11 @@ def _getslice(self, start, end):
                 if (end is not None and firstid <= end <= nextfirstid)
                 else None)
 
                 if (end is not None and firstid <= end <= nextfirstid)
                 else None)
 
-            page_results = self.getpage(pagenum)
+            try:
+                page_results = self.getpage(pagenum)
+            except Exception:
+                self._pagecount = pagenum - 1
+                raise
             if startv != 0 or endv is not None:
                 page_results = page_results[startv:endv]
             yield from page_results
             if startv != 0 or endv is not None:
                 page_results = page_results[startv:endv]
             yield from page_results
@@ -4143,14 +2754,15 @@ def _getslice(self, start, end):
 
 
 class InAdvancePagedList(PagedList):
 
 
 class InAdvancePagedList(PagedList):
+    """PagedList with total number of pages known in advance"""
+
     def __init__(self, pagefunc, pagecount, pagesize):
     def __init__(self, pagefunc, pagecount, pagesize):
-        self._pagecount = pagecount
         PagedList.__init__(self, pagefunc, pagesize, True)
         PagedList.__init__(self, pagefunc, pagesize, True)
+        self._pagecount = pagecount
 
     def _getslice(self, start, end):
         start_page = start // self._pagesize
 
     def _getslice(self, start, end):
         start_page = start // self._pagesize
-        end_page = (
-            self._pagecount if end is None else (end // self._pagesize + 1))
+        end_page = self._pagecount if end is None else min(self._pagecount, end // self._pagesize + 1)
         skip_elems = start - start_page * self._pagesize
         only_more = None if end is None else end - start
         for pagenum in range(start_page, end_page):
         skip_elems = start - start_page * self._pagesize
         only_more = None if end is None else end - start
         for pagenum in range(start_page, end_page):
@@ -4185,9 +2797,7 @@ def lowercase_escape(s):
 
 def escape_rfc3986(s):
     """Escape non-ASCII characters as suggested by RFC 3986"""
 
 def escape_rfc3986(s):
     """Escape non-ASCII characters as suggested by RFC 3986"""
-    if sys.version_info < (3, 0) and isinstance(s, compat_str):
-        s = s.encode('utf-8')
-    return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
+    return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
 
 
 def escape_url(url):
 
 
 def escape_url(url):
@@ -4266,9 +2876,9 @@ def _multipart_encode_impl(data, boundary):
     for k, v in data.items():
         out += b'--' + boundary.encode('ascii') + b'\r\n'
         if isinstance(k, compat_str):
     for k, v in data.items():
         out += b'--' + boundary.encode('ascii') + b'\r\n'
         if isinstance(k, compat_str):
-            k = k.encode('utf-8')
+            k = k.encode()
         if isinstance(v, compat_str):
         if isinstance(v, compat_str):
-            v = v.encode('utf-8')
+            v = v.encode()
         # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
         # suggests sending UTF-8 directly. Firefox sends UTF-8, too
         content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
         # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
         # suggests sending UTF-8 directly. Firefox sends UTF-8, too
         content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
@@ -4312,36 +2922,37 @@ def multipart_encode(data, boundary=None):
 
 
 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
 
 
 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
-    if isinstance(key_or_keys, (list, tuple)):
-        for key in key_or_keys:
-            if key not in d or d[key] is None or skip_false_values and not d[key]:
-                continue
-            return d[key]
-        return default
-    return d.get(key_or_keys, default)
+    for val in map(d.get, variadic(key_or_keys)):
+        if val is not None and (val or not skip_false_values):
+            return val
+    return default
 
 
 
 
-def try_get(src, getter, expected_type=None):
-    for get in variadic(getter):
+def try_call(*funcs, expected_type=None, args=[], kwargs={}):
+    for f in funcs:
         try:
         try:
-            v = get(src)
-        except (AttributeError, KeyError, TypeError, IndexError):
+            val = f(*args, **kwargs)
+        except (AttributeError, KeyError, TypeError, IndexError, ZeroDivisionError):
             pass
         else:
             pass
         else:
-            if expected_type is None or isinstance(v, expected_type):
-                return v
+            if expected_type is None or isinstance(val, expected_type):
+                return val
+
+
+def try_get(src, getter, expected_type=None):
+    return try_call(*variadic(getter), args=(src,), expected_type=expected_type)
+
+
+def filter_dict(dct, cndn=lambda _, v: v is not None):
+    return {k: v for k, v in dct.items() if cndn(k, v)}
 
 
 def merge_dicts(*dicts):
     merged = {}
     for a_dict in dicts:
         for k, v in a_dict.items():
 
 
 def merge_dicts(*dicts):
     merged = {}
     for a_dict in dicts:
         for k, v in a_dict.items():
-            if v is None:
-                continue
-            if (k not in merged
-                    or (isinstance(v, compat_str) and v
-                        and isinstance(merged[k], compat_str)
-                        and not merged[k])):
+            if (v is not None and k not in merged
+                    or isinstance(v, str) and merged[k] == ''):
                 merged[k] = v
     return merged
 
                 merged[k] = v
     return merged
 
@@ -4370,9 +2981,10 @@ def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
 
 
 def parse_age_limit(s):
 
 
 def parse_age_limit(s):
-    if type(s) == int:
+    # isinstance(False, int) is True. So type() must be used instead
+    if type(s) is int:  # noqa: E721
         return s if 0 <= s <= 21 else None
         return s if 0 <= s <= 21 else None
-    if not isinstance(s, compat_basestring):
+    elif not isinstance(s, str):
         return None
     m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
     if m:
         return None
     m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
     if m:
@@ -4399,10 +3011,10 @@ def strip_jsonp(code):
 def js_to_json(code, vars={}):
     # vars is a dict of var, val pairs to substitute
     COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
 def js_to_json(code, vars={}):
     # vars is a dict of var, val pairs to substitute
     COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
-    SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
+    SKIP_RE = fr'\s*(?:{COMMENT_RE})?\s*'
     INTEGER_TABLE = (
     INTEGER_TABLE = (
-        (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
-        (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
+        (fr'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16),
+        (fr'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8),
     )
 
     def fix_kv(m):
     )
 
     def fix_kv(m):
@@ -4433,6 +3045,8 @@ def fix_kv(m):
 
         return '"%s"' % v
 
 
         return '"%s"' % v
 
+    code = re.sub(r'new Date\((".+")\)', r'\g<1>', code)
+
     return re.sub(r'''(?sx)
         "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
         '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
     return re.sub(r'''(?sx)
         "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
         '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
@@ -4454,6 +3068,9 @@ def q(qid):
     return q
 
 
     return q
 
 
+POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'before_dl', 'after_move', 'post_process', 'after_video', 'playlist')
+
+
 DEFAULT_OUTTMPL = {
     'default': '%(title)s [%(id)s].%(ext)s',
     'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
 DEFAULT_OUTTMPL = {
     'default': '%(title)s [%(id)s].%(ext)s',
     'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
@@ -4465,6 +3082,8 @@ def q(qid):
     'description': 'description',
     'annotation': 'annotations.xml',
     'infojson': 'info.json',
     'description': 'description',
     'annotation': 'annotations.xml',
     'infojson': 'info.json',
+    'link': None,
+    'pl_video': None,
     'pl_thumbnail': None,
     'pl_description': 'description',
     'pl_infojson': 'info.json',
     'pl_thumbnail': None,
     'pl_description': 'description',
     'pl_infojson': 'info.json',
@@ -4515,11 +3134,10 @@ def is_outdated_version(version, limit, assume_new=True):
 
 def ytdl_is_updateable():
     """ Returns if yt-dlp can be updated with -U """
 
 def ytdl_is_updateable():
     """ Returns if yt-dlp can be updated with -U """
-    return False
 
 
-    from zipimport import zipimporter
+    from .update import is_non_updateable
 
 
-    return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
+    return not is_non_updateable()
 
 
 def args_to_str(args):
 
 
 def args_to_str(args):
@@ -4528,32 +3146,35 @@ def args_to_str(args):
 
 
 def error_to_compat_str(err):
 
 
 def error_to_compat_str(err):
-    err_str = str(err)
-    # On python 2 error byte string must be decoded with proper
-    # encoding rather than ascii
-    if sys.version_info[0] < 3:
-        err_str = err_str.decode(preferredencoding())
-    return err_str
+    return str(err)
+
+
+def error_to_str(err):
+    return f'{type(err).__name__}: {err}'
 
 
 def mimetype2ext(mt):
     if mt is None:
         return None
 
 
 
 def mimetype2ext(mt):
     if mt is None:
         return None
 
-    ext = {
+    mt, _, params = mt.partition(';')
+    mt = mt.strip()
+
+    FULL_MAP = {
         'audio/mp4': 'm4a',
         # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
         # it's the most popular one
         'audio/mpeg': 'mp3',
         'audio/x-wav': 'wav',
         'audio/mp4': 'm4a',
         # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
         # it's the most popular one
         'audio/mpeg': 'mp3',
         'audio/x-wav': 'wav',
-    }.get(mt)
+        'audio/wav': 'wav',
+        'audio/wave': 'wav',
+    }
+
+    ext = FULL_MAP.get(mt)
     if ext is not None:
         return ext
 
     if ext is not None:
         return ext
 
-    _, _, res = mt.rpartition('/')
-    res = res.split(';')[0].strip().lower()
-
-    return {
+    SUBTYPE_MAP = {
         '3gpp': '3gp',
         'smptett+xml': 'tt',
         'ttaf+xml': 'dfxp',
         '3gpp': '3gp',
         'smptett+xml': 'tt',
         'ttaf+xml': 'dfxp',
@@ -4572,7 +3193,36 @@ def mimetype2ext(mt):
         'quicktime': 'mov',
         'mp2t': 'ts',
         'x-wav': 'wav',
         'quicktime': 'mov',
         'mp2t': 'ts',
         'x-wav': 'wav',
-    }.get(res, res)
+        'filmstrip+json': 'fs',
+        'svg+xml': 'svg',
+    }
+
+    _, _, subtype = mt.rpartition('/')
+    ext = SUBTYPE_MAP.get(subtype.lower())
+    if ext is not None:
+        return ext
+
+    SUFFIX_MAP = {
+        'json': 'json',
+        'xml': 'xml',
+        'zip': 'zip',
+        'gzip': 'gz',
+    }
+
+    _, _, suffix = subtype.partition('+')
+    ext = SUFFIX_MAP.get(suffix)
+    if ext is not None:
+        return ext
+
+    return subtype.replace('+', '.')
+
+
+def ext2mimetype(ext_or_url):
+    if not ext_or_url:
+        return None
+    if '.' not in ext_or_url:
+        ext_or_url = f'file.{ext_or_url}'
+    return mimetypes.guess_type(ext_or_url)[0]
 
 
 def parse_codecs(codecs_str):
 
 
 def parse_codecs(codecs_str):
@@ -4581,27 +3231,39 @@ def parse_codecs(codecs_str):
         return {}
     split_codecs = list(filter(None, map(
         str.strip, codecs_str.strip().strip(',').split(','))))
         return {}
     split_codecs = list(filter(None, map(
         str.strip, codecs_str.strip().strip(',').split(','))))
-    vcodec, acodec = None, None
+    vcodec, acodec, scodec, hdr = None, None, None, None
     for full_codec in split_codecs:
     for full_codec in split_codecs:
-        codec = full_codec.split('.')[0]
-        if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01', 'theora'):
+        parts = full_codec.split('.')
+        codec = parts[0].replace('0', '')
+        if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
+                     'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
             if not vcodec:
             if not vcodec:
-                vcodec = full_codec
-        elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
+                vcodec = '.'.join(parts[:4]) if codec in ('vp9', 'av1', 'hvc1') else full_codec
+                if codec in ('dvh1', 'dvhe'):
+                    hdr = 'DV'
+                elif codec == 'av1' and len(parts) > 3 and parts[3] == '10':
+                    hdr = 'HDR10'
+                elif full_codec.replace('0', '').startswith('vp9.2'):
+                    hdr = 'HDR10'
+        elif codec in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
             if not acodec:
                 acodec = full_codec
             if not acodec:
                 acodec = full_codec
+        elif codec in ('stpp', 'wvtt',):
+            if not scodec:
+                scodec = full_codec
         else:
         else:
-            write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
-    if not vcodec and not acodec:
-        if len(split_codecs) == 2:
-            return {
-                'vcodec': split_codecs[0],
-                'acodec': split_codecs[1],
-            }
-    else:
+            write_string(f'WARNING: Unknown codec {full_codec}\n')
+    if vcodec or acodec or scodec:
         return {
             'vcodec': vcodec or 'none',
             'acodec': acodec or 'none',
         return {
             'vcodec': vcodec or 'none',
             'acodec': acodec or 'none',
+            'dynamic_range': hdr,
+            **({'scodec': scodec} if scodec is not None else {}),
+        }
+    elif len(split_codecs) == 2:
+        return {
+            'vcodec': split_codecs[0],
+            'acodec': split_codecs[1],
         }
     return {}
 
         }
     return {}
 
@@ -4644,14 +3306,13 @@ def is_html(first_bytes):
         (b'\xff\xfe', 'utf-16-le'),
         (b'\xfe\xff', 'utf-16-be'),
     ]
         (b'\xff\xfe', 'utf-16-le'),
         (b'\xfe\xff', 'utf-16-be'),
     ]
+
+    encoding = 'utf-8'
     for bom, enc in BOMS:
     for bom, enc in BOMS:
-        if first_bytes.startswith(bom):
-            s = first_bytes[len(bom):].decode(enc, 'replace')
-            break
-    else:
-        s = first_bytes.decode('utf-8', 'replace')
+        while first_bytes.startswith(bom):
+            encoding, first_bytes = enc, first_bytes[len(bom):]
 
 
-    return re.match(r'^\s*<', s)
+    return re.match(r'^\s*<', first_bytes.decode(encoding, 'replace'))
 
 
 def determine_protocol(info_dict):
 
 
 def determine_protocol(info_dict):
@@ -4659,7 +3320,7 @@ def determine_protocol(info_dict):
     if protocol is not None:
         return protocol
 
     if protocol is not None:
         return protocol
 
-    url = info_dict['url']
+    url = sanitize_url(info_dict['url'])
     if url.startswith('rtmp'):
         return 'rtmp'
     elif url.startswith('mms'):
     if url.startswith('rtmp'):
         return 'rtmp'
     elif url.startswith('mms'):
@@ -4676,26 +3337,36 @@ def determine_protocol(info_dict):
     return compat_urllib_parse_urlparse(url).scheme
 
 
     return compat_urllib_parse_urlparse(url).scheme
 
 
-def render_table(header_row, data, delim=False, extraGap=0, hideEmpty=False):
-    """ Render a list of rows, each as a list of values """
+def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
+    """ Render a list of rows, each as a list of values.
+    Text after a \t will be right aligned """
+    def width(string):
+        return len(remove_terminal_sequences(string).replace('\t', ''))
 
     def get_max_lens(table):
 
     def get_max_lens(table):
-        return [max(len(compat_str(v)) for v in col) for col in zip(*table)]
+        return [max(width(str(v)) for v in col) for col in zip(*table)]
 
     def filter_using_list(row, filterArray):
 
     def filter_using_list(row, filterArray):
-        return [col for (take, col) in zip(filterArray, row) if take]
+        return [col for take, col in itertools.zip_longest(filterArray, row, fillvalue=True) if take]
 
 
-    if hideEmpty:
-        max_lens = get_max_lens(data)
-        header_row = filter_using_list(header_row, max_lens)
-        data = [filter_using_list(row, max_lens) for row in data]
+    max_lens = get_max_lens(data) if hide_empty else []
+    header_row = filter_using_list(header_row, max_lens)
+    data = [filter_using_list(row, max_lens) for row in data]
 
     table = [header_row] + data
     max_lens = get_max_lens(table)
 
     table = [header_row] + data
     max_lens = get_max_lens(table)
+    extra_gap += 1
     if delim:
     if delim:
-        table = [header_row] + [['-' * ml for ml in max_lens]] + data
-    format_str = ' '.join('%-' + compat_str(ml + extraGap) + 's' for ml in max_lens[:-1]) + ' %s'
-    return '\n'.join(format_str % tuple(row) for row in table)
+        table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
+        table[1][-1] = table[1][-1][:-extra_gap * len(delim)]  # Remove extra_gap from end of delimiter
+    for row in table:
+        for pos, text in enumerate(map(str, row)):
+            if '\t' in text:
+                row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
+            else:
+                row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
+    ret = '\n'.join(''.join(row).rstrip() for row in table)
+    return ret
 
 
 def _match_one(filter_part, dct, incomplete):
 
 
 def _match_one(filter_part, dct, incomplete):
@@ -4715,11 +3386,15 @@ def _match_one(filter_part, dct, incomplete):
         '=': operator.eq,
     }
 
         '=': operator.eq,
     }
 
+    if isinstance(incomplete, bool):
+        is_incomplete = lambda _: incomplete
+    else:
+        is_incomplete = lambda k: k in incomplete
+
     operator_rex = re.compile(r'''(?x)\s*
         (?P<key>[a-z_]+)
         \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
         (?:
     operator_rex = re.compile(r'''(?x)\s*
         (?P<key>[a-z_]+)
         \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
         (?:
-            (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
             (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
             (?P<strval>.+?)
         )
             (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
             (?P<strval>.+?)
         )
@@ -4727,40 +3402,35 @@ def _match_one(filter_part, dct, incomplete):
         ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
     m = operator_rex.search(filter_part)
     if m:
         ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
     m = operator_rex.search(filter_part)
     if m:
-        unnegated_op = COMPARISON_OPERATORS[m.group('op')]
-        if m.group('negation'):
+        m = m.groupdict()
+        unnegated_op = COMPARISON_OPERATORS[m['op']]
+        if m['negation']:
             op = lambda attr, value: not unnegated_op(attr, value)
         else:
             op = unnegated_op
             op = lambda attr, value: not unnegated_op(attr, value)
         else:
             op = unnegated_op
-        actual_value = dct.get(m.group('key'))
-        if (m.group('quotedstrval') is not None
-            or m.group('strval') is not None
+        comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
+        if m['quote']:
+            comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
+        actual_value = dct.get(m['key'])
+        numeric_comparison = None
+        if isinstance(actual_value, (int, float)):
             # If the original field is a string and matching comparisonvalue is
             # a number we should respect the origin of the original field
             # and process comparison value as a string (see
             # If the original field is a string and matching comparisonvalue is
             # a number we should respect the origin of the original field
             # and process comparison value as a string (see
-            # https://github.com/ytdl-org/youtube-dl/issues/11082).
-            or actual_value is not None and m.group('intval') is not None
-                and isinstance(actual_value, compat_str)):
-            comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
-            quote = m.group('quote')
-            if quote is not None:
-                comparison_value = comparison_value.replace(r'\%s' % quote, quote)
-        else:
-            if m.group('op') in STRING_OPERATORS:
-                raise ValueError('Operator %s only supports string values!' % m.group('op'))
+            # https://github.com/ytdl-org/youtube-dl/issues/11082)
             try:
             try:
-                comparison_value = int(m.group('intval'))
+                numeric_comparison = int(comparison_value)
             except ValueError:
             except ValueError:
-                comparison_value = parse_filesize(m.group('intval'))
-                if comparison_value is None:
-                    comparison_value = parse_filesize(m.group('intval') + 'B')
-                if comparison_value is None:
-                    raise ValueError(
-                        'Invalid integer value %r in filter part %r' % (
-                            m.group('intval'), filter_part))
+                numeric_comparison = parse_filesize(comparison_value)
+                if numeric_comparison is None:
+                    numeric_comparison = parse_filesize(f'{comparison_value}B')
+                if numeric_comparison is None:
+                    numeric_comparison = parse_duration(comparison_value)
+        if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
+            raise ValueError('Operator %s only supports string values!' % m['op'])
         if actual_value is None:
         if actual_value is None:
-            return incomplete or m.group('none_inclusive')
-        return op(actual_value, comparison_value)
+            return is_incomplete(m['key']) or m['none_inclusive']
+        return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
 
     UNARY_OPERATORS = {
         '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
 
     UNARY_OPERATORS = {
         '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
@@ -4774,7 +3444,7 @@ def _match_one(filter_part, dct, incomplete):
     if m:
         op = UNARY_OPERATORS[m.group('op')]
         actual_value = dct.get(m.group('key'))
     if m:
         op = UNARY_OPERATORS[m.group('op')]
         actual_value = dct.get(m.group('key'))
-        if incomplete and actual_value is None:
+        if is_incomplete(m.group('key')) and actual_value is None:
             return True
         return op(actual_value)
 
             return True
         return op(actual_value)
 
@@ -4782,21 +3452,33 @@ def _match_one(filter_part, dct, incomplete):
 
 
 def match_str(filter_str, dct, incomplete=False):
 
 
 def match_str(filter_str, dct, incomplete=False):
-    """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false
-        When incomplete, all conditions passes on missing fields
+    """ Filter a dictionary with a simple string syntax.
+    @returns           Whether the filter passes
+    @param incomplete  Set of keys that is expected to be missing from dct.
+                       Can be True/False to indicate all/none of the keys may be missing.
+                       All conditions on incomplete keys pass if the key is missing
     """
     return all(
         _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
         for filter_part in re.split(r'(?<!\\)&', filter_str))
 
 
     """
     return all(
         _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
         for filter_part in re.split(r'(?<!\\)&', filter_str))
 
 
-def match_filter_func(filter_str):
-    def _match_func(info_dict, *args, **kwargs):
-        if match_str(filter_str, info_dict, *args, **kwargs):
-            return None
+def match_filter_func(filters):
+    if not filters:
+        return None
+    filters = set(variadic(filters))
+
+    interactive = '-' in filters
+    if interactive:
+        filters.remove('-')
+
+    def _match_func(info_dict, incomplete=False):
+        if not filters or any(match_str(f, info_dict, incomplete) for f in filters):
+            return NO_DEFAULT if interactive and not incomplete else None
         else:
         else:
-            video_title = info_dict.get('title', info_dict.get('id', 'video'))
-            return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
+            video_title = info_dict.get('title') or info_dict.get('id') or 'video'
+            filter_str = ') | ('.join(map(str.strip, filters))
+            return f'{video_title} does not pass filter ({filter_str}), skipping ..'
     return _match_func
 
 
     return _match_func
 
 
@@ -4804,7 +3486,7 @@ def parse_dfxp_time_expr(time_expr):
     if not time_expr:
         return
 
     if not time_expr:
         return
 
-    mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
+    mobj = re.match(rf'^(?P<time_offset>{NUMBER_RE})s?$', time_expr)
     if mobj:
         return float(mobj.group('time_offset'))
 
     if mobj:
         return float(mobj.group('time_offset'))
 
@@ -4814,7 +3496,12 @@ def parse_dfxp_time_expr(time_expr):
 
 
 def srt_subtitles_timecode(seconds):
 
 
 def srt_subtitles_timecode(seconds):
-    return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
+    return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
+
+
+def ass_subtitles_timecode(seconds):
+    time = timetuple_from_msec(seconds * 1000)
+    return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
 
 
 def dfxp2srt(dfxp_data):
 
 
 def dfxp2srt(dfxp_data):
@@ -4851,7 +3538,7 @@ def dfxp2srt(dfxp_data):
     styles = {}
     default_style = {}
 
     styles = {}
     default_style = {}
 
-    class TTMLPElementParser(object):
+    class TTMLPElementParser:
         _out = ''
         _unclosed_elements = []
         _applied_styles = []
         _out = ''
         _unclosed_elements = []
         _applied_styles = []
@@ -4981,26 +3668,21 @@ def parse_node(node):
     return ''.join(out)
 
 
     return ''.join(out)
 
 
-def cli_option(params, command_option, param):
+def cli_option(params, command_option, param, separator=None):
     param = params.get(param)
     param = params.get(param)
-    if param:
-        param = compat_str(param)
-    return [command_option, param] if param is not None else []
+    return ([] if param is None
+            else [command_option, str(param)] if separator is None
+            else [f'{command_option}{separator}{param}'])
 
 
 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
     param = params.get(param)
 
 
 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
     param = params.get(param)
-    if param is None:
-        return []
-    assert isinstance(param, bool)
-    if separator:
-        return [command_option + separator + (true_value if param else false_value)]
-    return [command_option, true_value if param else false_value]
+    assert param in (True, False, None)
+    return cli_option({True: true_value, False: false_value}, command_option, param, separator)
 
 
 def cli_valueless_option(params, command_option, param, expected_value=True):
 
 
 def cli_valueless_option(params, command_option, param, expected_value=True):
-    param = params.get(param)
-    return [command_option] if param == expected_value else []
+    return [command_option] if params.get(param) == expected_value else []
 
 
 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
 
 
 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
@@ -5036,7 +3718,7 @@ def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compa
     return cli_configuration_args(argdict, keys, default, use_compat)
 
 
     return cli_configuration_args(argdict, keys, default, use_compat)
 
 
-class ISO639Utils(object):
+class ISO639Utils:
     # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
     _lang_map = {
         'aa': 'aar',
     # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
     _lang_map = {
         'aa': 'aar',
@@ -5241,7 +3923,7 @@ def long2short(cls, code):
                 return short_name
 
 
                 return short_name
 
 
-class ISO3166Utils(object):
+class ISO3166Utils:
     # From http://data.okfn.org/data/core/country-list
     _country_map = {
         'AF': 'Afghanistan',
     # From http://data.okfn.org/data/core/country-list
     _country_map = {
         'AF': 'Afghanistan',
@@ -5493,6 +4175,9 @@ class ISO3166Utils(object):
         'YE': 'Yemen',
         'ZM': 'Zambia',
         'ZW': 'Zimbabwe',
         'YE': 'Yemen',
         'ZM': 'Zambia',
         'ZW': 'Zimbabwe',
+        # Not ISO 3166 codes, but used for IP blocks
+        'AP': 'Asia/Pacific Region',
+        'EU': 'Europe',
     }
 
     @classmethod
     }
 
     @classmethod
@@ -5501,7 +4186,7 @@ def short2full(cls, code):
         return cls._country_map.get(code.upper())
 
 
         return cls._country_map.get(code.upper())
 
 
-class GeoUtils(object):
+class GeoUtils:
     # Major IPv4 address blocks per country
     _country_ip_map = {
         'AD': '46.172.224.0/19',
     # Major IPv4 address blocks per country
     _country_ip_map = {
         'AD': '46.172.224.0/19',
@@ -5938,7 +4623,7 @@ def decode_png(png_data):
     header = png_data[8:]
 
     if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
     header = png_data[8:]
 
     if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
-        raise IOError('Not a valid PNG file.')
+        raise OSError('Not a valid PNG file.')
 
     int_map = {1: '>B', 2: '>H', 4: '>I'}
     unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
 
     int_map = {1: '>B', 2: '>H', 4: '>I'}
     unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
@@ -5975,7 +4660,7 @@ def decode_png(png_data):
             idat += chunk['data']
 
     if not idat:
             idat += chunk['data']
 
     if not idat:
-        raise IOError('Unable to read PNG data.')
+        raise OSError('Unable to read PNG data.')
 
     decompressed_data = bytearray(zlib.decompress(idat))
 
 
     decompressed_data = bytearray(zlib.decompress(idat))
 
@@ -6039,87 +4724,56 @@ def _get_pixel(idx):
 
 
 def write_xattr(path, key, value):
 
 
 def write_xattr(path, key, value):
-    # This mess below finds the best xattr tool for the job
-    try:
-        # try the pyxattr module...
-        import xattr
-
-        if hasattr(xattr, 'set'):  # pyxattr
-            # Unicode arguments are not supported in python-pyxattr until
-            # version 0.5.0
-            # See https://github.com/ytdl-org/youtube-dl/issues/5498
-            pyxattr_required_version = '0.5.0'
-            if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
-                # TODO: fallback to CLI tools
-                raise XAttrUnavailableError(
-                    'python-pyxattr is detected but is too old. '
-                    'yt-dlp requires %s or above while your version is %s. '
-                    'Falling back to other xattr implementations' % (
-                        pyxattr_required_version, xattr.__version__))
-
-            setxattr = xattr.set
-        else:  # xattr
-            setxattr = xattr.setxattr
+    # Windows: Write xattrs to NTFS Alternate Data Streams:
+    # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
+    if compat_os_name == 'nt':
+        assert ':' not in key
+        assert os.path.exists(path)
 
         try:
 
         try:
-            setxattr(path, key, value)
-        except EnvironmentError as e:
+            with open(f'{path}:{key}', 'wb') as f:
+                f.write(value)
+        except OSError as e:
             raise XAttrMetadataError(e.errno, e.strerror)
             raise XAttrMetadataError(e.errno, e.strerror)
+        return
 
 
-    except ImportError:
-        if compat_os_name == 'nt':
-            # Write xattrs to NTFS Alternate Data Streams:
-            # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
-            assert ':' not in key
-            assert os.path.exists(path)
-
-            ads_fn = path + ':' + key
-            try:
-                with open(ads_fn, 'wb') as f:
-                    f.write(value)
-            except EnvironmentError as e:
-                raise XAttrMetadataError(e.errno, e.strerror)
-        else:
-            user_has_setfattr = check_executable('setfattr', ['--version'])
-            user_has_xattr = check_executable('xattr', ['-h'])
-
-            if user_has_setfattr or user_has_xattr:
+    # UNIX Method 1. Use xattrs/pyxattrs modules
+    from .dependencies import xattr
 
 
-                value = value.decode('utf-8')
-                if user_has_setfattr:
-                    executable = 'setfattr'
-                    opts = ['-n', key, '-v', value]
-                elif user_has_xattr:
-                    executable = 'xattr'
-                    opts = ['-w', key, value]
+    setxattr = None
+    if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':
+        # Unicode arguments are not supported in pyxattr until version 0.5.0
+        # See https://github.com/ytdl-org/youtube-dl/issues/5498
+        if version_tuple(xattr.__version__) >= (0, 5, 0):
+            setxattr = xattr.set
+    elif xattr:
+        setxattr = xattr.setxattr
 
 
-                cmd = ([encodeFilename(executable, True)]
-                       + [encodeArgument(o) for o in opts]
-                       + [encodeFilename(path, True)])
+    if setxattr:
+        try:
+            setxattr(path, key, value)
+        except OSError as e:
+            raise XAttrMetadataError(e.errno, e.strerror)
+        return
 
 
-                try:
-                    p = subprocess.Popen(
-                        cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
-                except EnvironmentError as e:
-                    raise XAttrMetadataError(e.errno, e.strerror)
-                stdout, stderr = process_communicate_or_kill(p)
-                stderr = stderr.decode('utf-8', 'replace')
-                if p.returncode != 0:
-                    raise XAttrMetadataError(p.returncode, stderr)
+    # UNIX Method 2. Use setfattr/xattr executables
+    exe = ('setfattr' if check_executable('setfattr', ['--version'])
+           else 'xattr' if check_executable('xattr', ['-h']) else None)
+    if not exe:
+        raise XAttrUnavailableError(
+            'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the '
+            + ('"xattr" binary' if sys.platform != 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)'))
 
 
-            else:
-                # On Unix, and can't find pyxattr, setfattr, or xattr.
-                if sys.platform.startswith('linux'):
-                    raise XAttrUnavailableError(
-                        "Couldn't find a tool to set the xattrs. "
-                        "Install either the python 'pyxattr' or 'xattr' "
-                        "modules, or the GNU 'attr' package "
-                        "(which contains the 'setfattr' tool).")
-                else:
-                    raise XAttrUnavailableError(
-                        "Couldn't find a tool to set the xattrs. "
-                        "Install either the python 'xattr' module, "
-                        "or the 'xattr' binary.")
+    value = value.decode()
+    try:
+        p = Popen(
+            [exe, '-w', key, value, path] if exe == 'xattr' else [exe, '-n', key, '-v', value, path],
+            stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
+    except OSError as e:
+        raise XAttrMetadataError(e.errno, e.strerror)
+    stderr = p.communicate_or_kill()[1].decode('utf-8', 'replace')
+    if p.returncode:
+        raise XAttrMetadataError(p.returncode, stderr)
 
 
 def random_birthday(year_field, month_field, day_field):
 
 
 def random_birthday(year_field, month_field, day_field):
@@ -6135,12 +4789,12 @@ def random_birthday(year_field, month_field, day_field):
 
 
 # Templates for internet shortcut files, which are plain text files.
 
 
 # Templates for internet shortcut files, which are plain text files.
-DOT_URL_LINK_TEMPLATE = '''
+DOT_URL_LINK_TEMPLATE = '''\
 [InternetShortcut]
 URL=%(url)s
 [InternetShortcut]
 URL=%(url)s
-'''.lstrip()
+'''
 
 
-DOT_WEBLOC_LINK_TEMPLATE = '''
+DOT_WEBLOC_LINK_TEMPLATE = '''\
 <?xml version="1.0" encoding="UTF-8"?>
 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
 <plist version="1.0">
 <?xml version="1.0" encoding="UTF-8"?>
 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
 <plist version="1.0">
@@ -6149,16 +4803,22 @@ def random_birthday(year_field, month_field, day_field):
 \t<string>%(url)s</string>
 </dict>
 </plist>
 \t<string>%(url)s</string>
 </dict>
 </plist>
-'''.lstrip()
+'''
 
 
-DOT_DESKTOP_LINK_TEMPLATE = '''
+DOT_DESKTOP_LINK_TEMPLATE = '''\
 [Desktop Entry]
 Encoding=UTF-8
 Name=%(filename)s
 Type=Link
 URL=%(url)s
 Icon=text-html
 [Desktop Entry]
 Encoding=UTF-8
 Name=%(filename)s
 Type=Link
 URL=%(url)s
 Icon=text-html
-'''.lstrip()
+'''
+
+LINK_TEMPLATES = {
+    'url': DOT_URL_LINK_TEMPLATE,
+    'desktop': DOT_DESKTOP_LINK_TEMPLATE,
+    'webloc': DOT_WEBLOC_LINK_TEMPLATE,
+}
 
 
 def iri_to_uri(iri):
 
 
 def iri_to_uri(iri):
@@ -6178,29 +4838,29 @@ def iri_to_uri(iri):
 
     net_location = ''
     if iri_parts.username:
 
     net_location = ''
     if iri_parts.username:
-        net_location += compat_urllib_parse_quote(iri_parts.username, safe=r"!$%&'()*+,~")
+        net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~")
         if iri_parts.password is not None:
         if iri_parts.password is not None:
-            net_location += ':' + compat_urllib_parse_quote(iri_parts.password, safe=r"!$%&'()*+,~")
+            net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~")
         net_location += '@'
 
         net_location += '@'
 
-    net_location += iri_parts.hostname.encode('idna').decode('utf-8')  # Punycode for Unicode hostnames.
+    net_location += iri_parts.hostname.encode('idna').decode()  # Punycode for Unicode hostnames.
     # The 'idna' encoding produces ASCII text.
     if iri_parts.port is not None and iri_parts.port != 80:
         net_location += ':' + str(iri_parts.port)
 
     # The 'idna' encoding produces ASCII text.
     if iri_parts.port is not None and iri_parts.port != 80:
         net_location += ':' + str(iri_parts.port)
 
-    return compat_urllib_parse_urlunparse(
+    return urllib.parse.urlunparse(
         (iri_parts.scheme,
             net_location,
 
         (iri_parts.scheme,
             net_location,
 
-            compat_urllib_parse_quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
+            urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
 
             # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
 
             # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
-            compat_urllib_parse_quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
+            urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
 
             # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
 
             # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
-            compat_urllib_parse_quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
+            urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
 
 
-            compat_urllib_parse_quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
+            urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
 
     # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
 
 
     # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
 
@@ -6208,19 +4868,16 @@ def iri_to_uri(iri):
 def to_high_limit_path(path):
     if sys.platform in ['win32', 'cygwin']:
         # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
 def to_high_limit_path(path):
     if sys.platform in ['win32', 'cygwin']:
         # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
-        return r'\\?\ '.rstrip() + os.path.abspath(path)
+        return '\\\\?\\' + os.path.abspath(path)
 
     return path
 
 
 def format_field(obj, field=None, template='%s', ignore=(None, ''), default='', func=None):
 
     return path
 
 
 def format_field(obj, field=None, template='%s', ignore=(None, ''), default='', func=None):
-    if field is None:
-        val = obj if obj is not None else default
-    else:
-        val = obj.get(field, default)
-    if func and val not in ignore:
-        val = func(val)
-    return template % val if val not in ignore else default
+    val = traverse_obj(obj, *variadic(field))
+    if val in ignore:
+        return default
+    return template % (func(val) if func else val)
 
 
 def clean_podcast_url(url):
 
 
 def clean_podcast_url(url):
@@ -6253,43 +4910,33 @@ def make_dir(path, to_screen=None):
         if dn and not os.path.exists(dn):
             os.makedirs(dn)
         return True
         if dn and not os.path.exists(dn):
             os.makedirs(dn)
         return True
-    except (OSError, IOError) as err:
+    except OSError as err:
         if callable(to_screen) is not None:
             to_screen('unable to create directory ' + error_to_compat_str(err))
         return False
 
 
 def get_executable_path():
         if callable(to_screen) is not None:
             to_screen('unable to create directory ' + error_to_compat_str(err))
         return False
 
 
 def get_executable_path():
-    from zipimport import zipimporter
-    if hasattr(sys, 'frozen'):  # Running from PyInstaller
-        path = os.path.dirname(sys.executable)
-    elif isinstance(globals().get('__loader__'), zipimporter):  # Running from ZIP
-        path = os.path.join(os.path.dirname(__file__), '../..')
-    else:
-        path = os.path.join(os.path.dirname(__file__), '..')
-    return os.path.abspath(path)
+    from .update import _get_variant_and_executable_path
+
+    return os.path.dirname(os.path.abspath(_get_variant_and_executable_path()[1]))
 
 
 def load_plugins(name, suffix, namespace):
 
 
 def load_plugins(name, suffix, namespace):
-    plugin_info = [None]
-    classes = []
-    try:
-        plugin_info = imp.find_module(
-            name, [os.path.join(get_executable_path(), 'ytdlp_plugins')])
-        plugins = imp.load_module(name, *plugin_info)
+    classes = {}
+    with contextlib.suppress(FileNotFoundError):
+        plugins_spec = importlib.util.spec_from_file_location(
+            name, os.path.join(get_executable_path(), 'ytdlp_plugins', name, '__init__.py'))
+        plugins = importlib.util.module_from_spec(plugins_spec)
+        sys.modules[plugins_spec.name] = plugins
+        plugins_spec.loader.exec_module(plugins)
         for name in dir(plugins):
             if name in namespace:
                 continue
             if not name.endswith(suffix):
                 continue
             klass = getattr(plugins, name)
         for name in dir(plugins):
             if name in namespace:
                 continue
             if not name.endswith(suffix):
                 continue
             klass = getattr(plugins, name)
-            classes.append(klass)
-            namespace[name] = klass
-    except ImportError:
-        pass
-    finally:
-        if plugin_info[0] is not None:
-            plugin_info[0].close()
+            classes[name] = namespace[name] = klass
     return classes
 
 
     return classes
 
 
@@ -6298,10 +4945,14 @@ def traverse_obj(
         casesense=True, is_user_input=False, traverse_string=False):
     ''' Traverse nested list/dict/tuple
     @param path_list        A list of paths which are checked one by one.
         casesense=True, is_user_input=False, traverse_string=False):
     ''' Traverse nested list/dict/tuple
     @param path_list        A list of paths which are checked one by one.
-                            Each path is a list of keys where each key is a string,
-                            a tuple of strings or "...". When a tuple is given,
-                            all the keys given in the tuple are traversed, and
-                            "..." traverses all the keys in the object
+                            Each path is a list of keys where each key is a:
+                              - None:     Do nothing
+                              - string:   A dictionary key
+                              - int:      An index into a list
+                              - tuple:    A list of keys all of which will be traversed
+                              - Ellipsis: Fetch all values in the object
+                              - Function: Takes the key and value as arguments
+                                          and returns whether the key matches or not
     @param default          Default value to return
     @param expected_type    Only accept final value of this type (Can also be any callable)
     @param get_all          Return all the values obtained from a path or only the first one
     @param default          Default value to return
     @param expected_type    Only accept final value of this type (Can also be any callable)
     @param get_all          Return all the values obtained from a path or only the first one
@@ -6318,10 +4969,10 @@ def traverse_obj(
 
     def _traverse_obj(obj, path, _current_depth=0):
         nonlocal depth
 
     def _traverse_obj(obj, path, _current_depth=0):
         nonlocal depth
-        if obj is None:
-            return None
         path = tuple(variadic(path))
         for i, key in enumerate(path):
         path = tuple(variadic(path))
         for i, key in enumerate(path):
+            if None in (key, obj):
+                return obj
             if isinstance(key, (list, tuple)):
                 obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
                 key = ...
             if isinstance(key, (list, tuple)):
                 obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
                 key = ...
@@ -6332,6 +4983,18 @@ def _traverse_obj(obj, path, _current_depth=0):
                 _current_depth += 1
                 depth = max(depth, _current_depth)
                 return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
                 _current_depth += 1
                 depth = max(depth, _current_depth)
                 return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
+            elif callable(key):
+                if isinstance(obj, (list, tuple, LazyList)):
+                    obj = enumerate(obj)
+                elif isinstance(obj, dict):
+                    obj = obj.items()
+                else:
+                    if not traverse_string:
+                        return None
+                    obj = str(obj)
+                _current_depth += 1
+                depth = max(depth, _current_depth)
+                return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if try_call(key, args=(k, v))]
             elif isinstance(obj, dict) and not (is_user_input and key == ':'):
                 obj = (obj.get(key) if casesense or (key in obj)
                        else next((v for k, v in obj.items() if _lower(k) == key), None))
             elif isinstance(obj, dict) and not (is_user_input and key == ':'):
                 obj = (obj.get(key) if casesense or (key in obj)
                        else next((v for k, v in obj.items() if _lower(k) == key), None))
@@ -6378,15 +5041,35 @@ def _traverse_obj(obj, path, _current_depth=0):
 
 
 def traverse_dict(dictn, keys, casesense=True):
 
 
 def traverse_dict(dictn, keys, casesense=True):
-    ''' For backward compatibility. Do not use '''
-    return traverse_obj(dictn, keys, casesense=casesense,
-                        is_user_input=True, traverse_string=True)
+    write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
+                 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
+    return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True)
+
+
+def get_first(obj, keys, **kwargs):
+    return traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False)
 
 
 
 
-def variadic(x, allowed_types=(str, bytes)):
+def variadic(x, allowed_types=(str, bytes, dict)):
     return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
 
 
     return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
 
 
+def decode_base(value, digits):
+    # This will convert given base-x string to scalar (long or int)
+    table = {char: index for index, char in enumerate(digits)}
+    result = 0
+    base = len(digits)
+    for chr in value:
+        result *= base
+        result += table[chr]
+    return result
+
+
+def time_seconds(**kwargs):
+    t = datetime.datetime.now(datetime.timezone(datetime.timedelta(**kwargs)))
+    return t.timestamp()
+
+
 # create a JSON Web Signature (jws) with HS256 algorithm
 # the resulting format is in JWS Compact Serialization
 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
 # create a JSON Web Signature (jws) with HS256 algorithm
 # the resulting format is in JWS Compact Serialization
 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
@@ -6398,9 +5081,309 @@ def jwt_encode_hs256(payload_data, key, headers={}):
     }
     if headers:
         header_data.update(headers)
     }
     if headers:
         header_data.update(headers)
-    header_b64 = base64.b64encode(json.dumps(header_data).encode('utf-8'))
-    payload_b64 = base64.b64encode(json.dumps(payload_data).encode('utf-8'))
-    h = hmac.new(key.encode('utf-8'), header_b64 + b'.' + payload_b64, hashlib.sha256)
+    header_b64 = base64.b64encode(json.dumps(header_data).encode())
+    payload_b64 = base64.b64encode(json.dumps(payload_data).encode())
+    h = hmac.new(key.encode(), header_b64 + b'.' + payload_b64, hashlib.sha256)
     signature_b64 = base64.b64encode(h.digest())
     token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
     return token
     signature_b64 = base64.b64encode(h.digest())
     token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
     return token
+
+
+# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
+def jwt_decode_hs256(jwt):
+    header_b64, payload_b64, signature_b64 = jwt.split('.')
+    payload_data = json.loads(base64.urlsafe_b64decode(payload_b64))
+    return payload_data
+
+
+WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None
+
+
+@functools.cache
+def supports_terminal_sequences(stream):
+    if compat_os_name == 'nt':
+        if not WINDOWS_VT_MODE:
+            return False
+    elif not os.getenv('TERM'):
+        return False
+    try:
+        return stream.isatty()
+    except BaseException:
+        return False
+
+
+def windows_enable_vt_mode():  # TODO: Do this the proper way https://bugs.python.org/issue30075
+    if get_windows_version() < (10, 0, 10586):
+        return
+    global WINDOWS_VT_MODE
+    startupinfo = subprocess.STARTUPINFO()
+    startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+    try:
+        subprocess.Popen('', shell=True, startupinfo=startupinfo).wait()
+    except Exception:
+        return
+
+    WINDOWS_VT_MODE = True
+    supports_terminal_sequences.cache_clear()
+
+
+_terminal_sequences_re = re.compile('\033\\[[^m]+m')
+
+
+def remove_terminal_sequences(string):
+    return _terminal_sequences_re.sub('', string)
+
+
+def number_of_digits(number):
+    return len('%d' % number)
+
+
+def join_nonempty(*values, delim='-', from_dict=None):
+    if from_dict is not None:
+        values = map(from_dict.get, values)
+    return delim.join(map(str, filter(None, values)))
+
+
+def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re):
+    """
+    Find the largest format dimensions in terms of video width and, for each thumbnail:
+    * Modify the URL: Match the width with the provided regex and replace with the former width
+    * Update dimensions
+
+    This function is useful with video services that scale the provided thumbnails on demand
+    """
+    _keys = ('width', 'height')
+    max_dimensions = max(
+        (tuple(format.get(k) or 0 for k in _keys) for format in formats),
+        default=(0, 0))
+    if not max_dimensions[0]:
+        return thumbnails
+    return [
+        merge_dicts(
+            {'url': re.sub(url_width_re, str(max_dimensions[0]), thumbnail['url'])},
+            dict(zip(_keys, max_dimensions)), thumbnail)
+        for thumbnail in thumbnails
+    ]
+
+
+def parse_http_range(range):
+    """ Parse value of "Range" or "Content-Range" HTTP header into tuple. """
+    if not range:
+        return None, None, None
+    crg = re.search(r'bytes[ =](\d+)-(\d+)?(?:/(\d+))?', range)
+    if not crg:
+        return None, None, None
+    return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3))
+
+
+def read_stdin(what):
+    eof = 'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D'
+    write_string(f'Reading {what} from STDIN - EOF ({eof}) to end:\n')
+    return sys.stdin
+
+
+class Config:
+    own_args = None
+    parsed_args = None
+    filename = None
+    __initialized = False
+
+    def __init__(self, parser, label=None):
+        self.parser, self.label = parser, label
+        self._loaded_paths, self.configs = set(), []
+
+    def init(self, args=None, filename=None):
+        assert not self.__initialized
+        directory = ''
+        if filename:
+            location = os.path.realpath(filename)
+            directory = os.path.dirname(location)
+            if location in self._loaded_paths:
+                return False
+            self._loaded_paths.add(location)
+
+        self.own_args, self.__initialized = args, True
+        opts, _ = self.parser.parse_known_args(args)
+        self.parsed_args, self.filename = args, filename
+
+        for location in opts.config_locations or []:
+            if location == '-':
+                self.append_config(shlex.split(read_stdin('options'), comments=True), label='stdin')
+                continue
+            location = os.path.join(directory, expand_path(location))
+            if os.path.isdir(location):
+                location = os.path.join(location, 'yt-dlp.conf')
+            if not os.path.exists(location):
+                self.parser.error(f'config location {location} does not exist')
+            self.append_config(self.read_file(location), location)
+        return True
+
+    def __str__(self):
+        label = join_nonempty(
+            self.label, 'config', f'"{self.filename}"' if self.filename else '',
+            delim=' ')
+        return join_nonempty(
+            self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
+            *(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
+            delim='\n')
+
+    @staticmethod
+    def read_file(filename, default=[]):
+        try:
+            optionf = open(filename)
+        except OSError:
+            return default  # silently skip if file is not present
+        try:
+            # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
+            contents = optionf.read()
+            res = shlex.split(contents, comments=True)
+        finally:
+            optionf.close()
+        return res
+
+    @staticmethod
+    def hide_login_info(opts):
+        PRIVATE_OPTS = {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'}
+        eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
+
+        def _scrub_eq(o):
+            m = eqre.match(o)
+            if m:
+                return m.group('key') + '=PRIVATE'
+            else:
+                return o
+
+        opts = list(map(_scrub_eq, opts))
+        for idx, opt in enumerate(opts):
+            if opt in PRIVATE_OPTS and idx + 1 < len(opts):
+                opts[idx + 1] = 'PRIVATE'
+        return opts
+
+    def append_config(self, *args, label=None):
+        config = type(self)(self.parser, label)
+        config._loaded_paths = self._loaded_paths
+        if config.init(*args):
+            self.configs.append(config)
+
+    @property
+    def all_args(self):
+        for config in reversed(self.configs):
+            yield from config.all_args
+        yield from self.parsed_args or []
+
+    def parse_known_args(self, **kwargs):
+        return self.parser.parse_known_args(self.all_args, **kwargs)
+
+    def parse_args(self):
+        return self.parser.parse_args(self.all_args)
+
+
+class WebSocketsWrapper():
+    """Wraps websockets module to use in non-async scopes"""
+    pool = None
+
+    def __init__(self, url, headers=None, connect=True):
+        self.loop = asyncio.new_event_loop()
+        # XXX: "loop" is deprecated
+        self.conn = websockets.connect(
+            url, extra_headers=headers, ping_interval=None,
+            close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf'))
+        if connect:
+            self.__enter__()
+        atexit.register(self.__exit__, None, None, None)
+
+    def __enter__(self):
+        if not self.pool:
+            self.pool = self.run_with_loop(self.conn.__aenter__(), self.loop)
+        return self
+
+    def send(self, *args):
+        self.run_with_loop(self.pool.send(*args), self.loop)
+
+    def recv(self, *args):
+        return self.run_with_loop(self.pool.recv(*args), self.loop)
+
+    def __exit__(self, type, value, traceback):
+        try:
+            return self.run_with_loop(self.conn.__aexit__(type, value, traceback), self.loop)
+        finally:
+            self.loop.close()
+            self._cancel_all_tasks(self.loop)
+
+    # taken from https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py with modifications
+    # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class
+    @staticmethod
+    def run_with_loop(main, loop):
+        if not asyncio.iscoroutine(main):
+            raise ValueError(f'a coroutine was expected, got {main!r}')
+
+        try:
+            return loop.run_until_complete(main)
+        finally:
+            loop.run_until_complete(loop.shutdown_asyncgens())
+            if hasattr(loop, 'shutdown_default_executor'):
+                loop.run_until_complete(loop.shutdown_default_executor())
+
+    @staticmethod
+    def _cancel_all_tasks(loop):
+        to_cancel = asyncio.all_tasks(loop)
+
+        if not to_cancel:
+            return
+
+        for task in to_cancel:
+            task.cancel()
+
+        # XXX: "loop" is removed in python 3.10+
+        loop.run_until_complete(
+            asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
+
+        for task in to_cancel:
+            if task.cancelled():
+                continue
+            if task.exception() is not None:
+                loop.call_exception_handler({
+                    'message': 'unhandled exception during asyncio.run() shutdown',
+                    'exception': task.exception(),
+                    'task': task,
+                })
+
+
+def merge_headers(*dicts):
+    """Merge dicts of http headers case insensitively, prioritizing the latter ones"""
+    return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))}
+
+
+class classproperty:
+    """classmethod(property(func)) that works in py < 3.9"""
+
+    def __init__(self, func):
+        functools.update_wrapper(self, func)
+        self.func = func
+
+    def __get__(self, _, cls):
+        return self.func(cls)
+
+
+class Namespace:
+    """Immutable namespace"""
+
+    def __init__(self, **kwargs):
+        self._dict = kwargs
+
+    def __getattr__(self, attr):
+        return self._dict[attr]
+
+    def __contains__(self, item):
+        return item in self._dict.values()
+
+    def __iter__(self):
+        return iter(self._dict.items())
+
+    def __repr__(self):
+        return f'{type(self).__name__}({", ".join(f"{k}={v}" for k, v in self)})'
+
+
+# Deprecated
+has_certifi = bool(certifi)
+has_websockets = bool(websockets)