1 from __future__
import unicode_literals
17 import xml
.etree
.ElementTree
21 import urllib
.request
as compat_urllib_request
22 except ImportError: # Python 2
23 import urllib2
as compat_urllib_request
26 import urllib
.error
as compat_urllib_error
27 except ImportError: # Python 2
28 import urllib2
as compat_urllib_error
31 import urllib
.parse
as compat_urllib_parse
32 except ImportError: # Python 2
33 import urllib
as compat_urllib_parse
36 from urllib
.parse
import urlparse
as compat_urllib_parse_urlparse
37 except ImportError: # Python 2
38 from urlparse
import urlparse
as compat_urllib_parse_urlparse
41 import urllib
.parse
as compat_urlparse
42 except ImportError: # Python 2
43 import urlparse
as compat_urlparse
46 import urllib
.response
as compat_urllib_response
47 except ImportError: # Python 2
48 import urllib
as compat_urllib_response
51 import http
.cookiejar
as compat_cookiejar
52 except ImportError: # Python 2
53 import cookielib
as compat_cookiejar
56 import http
.cookies
as compat_cookies
57 except ImportError: # Python 2
58 import Cookie
as compat_cookies
61 import html
.entities
as compat_html_entities
62 except ImportError: # Python 2
63 import htmlentitydefs
as compat_html_entities
66 import http
.client
as compat_http_client
67 except ImportError: # Python 2
68 import httplib
as compat_http_client
71 from urllib
.error
import HTTPError
as compat_HTTPError
72 except ImportError: # Python 2
73 from urllib2
import HTTPError
as compat_HTTPError
76 from urllib
.request
import urlretrieve
as compat_urlretrieve
77 except ImportError: # Python 2
78 from urllib
import urlretrieve
as compat_urlretrieve
81 from html
.parser
import HTMLParser
as compat_HTMLParser
82 except ImportError: # Python 2
83 from HTMLParser
import HTMLParser
as compat_HTMLParser
87 from subprocess
import DEVNULL
88 compat_subprocess_get_DEVNULL
= lambda: DEVNULL
90 compat_subprocess_get_DEVNULL
= lambda: open(os
.path
.devnull
, 'w')
93 import http
.server
as compat_http_server
95 import BaseHTTPServer
as compat_http_server
98 compat_str
= unicode # Python 2
103 from urllib
.parse
import unquote_to_bytes
as compat_urllib_parse_unquote_to_bytes
104 from urllib
.parse
import unquote
as compat_urllib_parse_unquote
105 from urllib
.parse
import unquote_plus
as compat_urllib_parse_unquote_plus
106 except ImportError: # Python 2
107 _asciire
= (compat_urllib_parse
._asciire
if hasattr(compat_urllib_parse
, '_asciire')
108 else re
.compile('([\x00-\x7f]+)'))
110 # HACK: The following are the correct unquote_to_bytes, unquote and unquote_plus
111 # implementations from cpython 3.4.3's stdlib. Python 2's version
112 # is apparently broken (see https://github.com/rg3/youtube-dl/pull/6244)
114 def compat_urllib_parse_unquote_to_bytes(string
):
115 """unquote_to_bytes('abc%20def') -> b'abc def'."""
116 # Note: strings are encoded as UTF-8. This is only an issue if it contains
117 # unescaped non-ASCII characters, which URIs should not.
119 # Is it a string-like object?
122 if isinstance(string
, compat_str
):
123 string
= string
.encode('utf-8')
124 bits
= string
.split(b
'%')
129 for item
in bits
[1:]:
131 append(compat_urllib_parse
._hextochr
[item
[:2]])
138 def compat_urllib_parse_unquote(string
, encoding
='utf-8', errors
='replace'):
139 """Replace %xx escapes by their single-character equivalent. The optional
140 encoding and errors parameters specify how to decode percent-encoded
141 sequences into Unicode characters, as accepted by the bytes.decode()
143 By default, percent-encoded sequences are decoded with UTF-8, and invalid
144 sequences are replaced by a placeholder character.
146 unquote('abc%20def') -> 'abc def'.
148 if '%' not in string
:
155 bits
= _asciire
.split(string
)
158 for i
in range(1, len(bits
), 2):
159 append(compat_urllib_parse_unquote_to_bytes(bits
[i
]).decode(encoding
, errors
))
163 def compat_urllib_parse_unquote_plus(string
, encoding
='utf-8', errors
='replace'):
164 """Like unquote(), but also replace plus signs by spaces, as required for
165 unquoting HTML form values.
167 unquote_plus('%7e/abc+def') -> '~/abc def'
169 string
= string
.replace('+', ' ')
170 return compat_urllib_parse_unquote(string
, encoding
, errors
)
173 from urllib
.parse
import urlencode
as compat_urllib_parse_urlencode
174 except ImportError: # Python 2
175 # Python 2 will choke in urlencode on mixture of byte and unicode strings.
176 # Possible solutions are to either port it from python 3 with all
177 # the friends or manually ensure input query contains only byte strings.
178 # We will stick with latter thus recursively encoding the whole query.
179 def compat_urllib_parse_urlencode(query
, doseq
=0, encoding
='utf-8'):
181 if isinstance(e
, dict):
183 elif isinstance(e
, (list, tuple,)):
184 list_e
= encode_list(e
)
185 e
= tuple(list_e
) if isinstance(e
, tuple) else list_e
186 elif isinstance(e
, compat_str
):
187 e
= e
.encode(encoding
)
191 return dict((encode_elem(k
), encode_elem(v
)) for k
, v
in d
.items())
194 return [encode_elem(e
) for e
in l
]
196 return compat_urllib_parse
.urlencode(encode_elem(query
), doseq
=doseq
)
199 from urllib
.request
import DataHandler
as compat_urllib_request_DataHandler
200 except ImportError: # Python < 3.4
201 # Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
202 class compat_urllib_request_DataHandler(compat_urllib_request
.BaseHandler
):
203 def data_open(self
, req
):
204 # data URLs as specified in RFC 2397.
206 # ignores POSTed data
209 # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
210 # mediatype := [ type "/" subtype ] *( ";" parameter )
212 # parameter := attribute "=" value
213 url
= req
.get_full_url()
215 scheme
, data
= url
.split(':', 1)
216 mediatype
, data
= data
.split(',', 1)
218 # even base64 encoded data URLs might be quoted so unquote in any case:
219 data
= compat_urllib_parse_unquote_to_bytes(data
)
220 if mediatype
.endswith(';base64'):
221 data
= binascii
.a2b_base64(data
)
222 mediatype
= mediatype
[:-7]
225 mediatype
= 'text/plain;charset=US-ASCII'
227 headers
= email
.message_from_string(
228 'Content-type: %s\nContent-length: %d\n' % (mediatype
, len(data
)))
230 return compat_urllib_response
.addinfourl(io
.BytesIO(data
), headers
, url
)
233 compat_basestring
= basestring
# Python 2
235 compat_basestring
= str
238 compat_chr
= unichr # Python 2
243 from xml
.etree
.ElementTree
import ParseError
as compat_xml_parse_error
244 except ImportError: # Python 2.6
245 from xml
.parsers
.expat
import ExpatError
as compat_xml_parse_error
247 if sys
.version_info
[0] >= 3:
248 compat_etree_fromstring
= xml
.etree
.ElementTree
.fromstring
250 # python 2.x tries to encode unicode strings with ascii (see the
251 # XMLParser._fixtext method)
252 etree
= xml
.etree
.ElementTree
255 _etree_iter
= etree
.Element
.iter
256 except AttributeError: # Python <=2.6
257 def _etree_iter(root
):
258 for el
in root
.findall('*'):
260 for sub
in _etree_iter(el
):
263 # on 2.6 XML doesn't have a parser argument, function copied from CPython
265 def _XML(text
, parser
=None):
267 parser
= etree
.XMLParser(target
=etree
.TreeBuilder())
269 return parser
.close()
271 def _element_factory(*args
, **kwargs
):
272 el
= etree
.Element(*args
, **kwargs
)
273 for k
, v
in el
.items():
274 if isinstance(v
, bytes):
275 el
.set(k
, v
.decode('utf-8'))
278 def compat_etree_fromstring(text
):
279 doc
= _XML(text
, parser
=etree
.XMLParser(target
=etree
.TreeBuilder(element_factory
=_element_factory
)))
280 for el
in _etree_iter(doc
):
281 if el
.text
is not None and isinstance(el
.text
, bytes):
282 el
.text
= el
.text
.decode('utf-8')
285 if sys
.version_info
< (2, 7):
286 # Here comes the crazy part: In 2.6, if the xpath is a unicode,
287 # .//node does not match if a node is a direct child of . !
288 def compat_xpath(xpath
):
289 if isinstance(xpath
, compat_str
):
290 xpath
= xpath
.encode('ascii')
293 compat_xpath
= lambda xpath
: xpath
296 from urllib
.parse
import parse_qs
as compat_parse_qs
297 except ImportError: # Python 2
298 # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
299 # Python 2's version is apparently totally broken
301 def _parse_qsl(qs
, keep_blank_values
=False, strict_parsing
=False,
302 encoding
='utf-8', errors
='replace'):
303 qs
, _coerce_result
= qs
, compat_str
304 pairs
= [s2
for s1
in qs
.split('&') for s2
in s1
.split(';')]
306 for name_value
in pairs
:
307 if not name_value
and not strict_parsing
:
309 nv
= name_value
.split('=', 1)
312 raise ValueError('bad query field: %r' % (name_value
,))
313 # Handle case of a control-name with no equal sign
314 if keep_blank_values
:
318 if len(nv
[1]) or keep_blank_values
:
319 name
= nv
[0].replace('+', ' ')
320 name
= compat_urllib_parse_unquote(
321 name
, encoding
=encoding
, errors
=errors
)
322 name
= _coerce_result(name
)
323 value
= nv
[1].replace('+', ' ')
324 value
= compat_urllib_parse_unquote(
325 value
, encoding
=encoding
, errors
=errors
)
326 value
= _coerce_result(value
)
327 r
.append((name
, value
))
330 def compat_parse_qs(qs
, keep_blank_values
=False, strict_parsing
=False,
331 encoding
='utf-8', errors
='replace'):
333 pairs
= _parse_qsl(qs
, keep_blank_values
, strict_parsing
,
334 encoding
=encoding
, errors
=errors
)
335 for name
, value
in pairs
:
336 if name
in parsed_result
:
337 parsed_result
[name
].append(value
)
339 parsed_result
[name
] = [value
]
343 from shlex
import quote
as shlex_quote
344 except ImportError: # Python < 3.3
346 if re
.match(r
'^[-_\w./]+$', s
):
349 return "'" + s
.replace("'", "'\"'\"'") + "'"
352 if sys
.version_info
>= (2, 7, 3):
353 compat_shlex_split
= shlex
.split
355 # Working around shlex issue with unicode strings on some python 2
356 # versions (see http://bugs.python.org/issue1548891)
357 def compat_shlex_split(s
, comments
=False, posix
=True):
358 if isinstance(s
, compat_str
):
359 s
= s
.encode('utf-8')
360 return shlex
.split(s
, comments
, posix
)
370 compat_os_name
= os
._name
if os
.name
== 'java' else os
.name
373 if sys
.version_info
>= (3, 0):
374 compat_getenv
= os
.getenv
375 compat_expanduser
= os
.path
.expanduser
377 # Environment variables should be decoded with filesystem encoding.
378 # Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
380 def compat_getenv(key
, default
=None):
381 from .utils
import get_filesystem_encoding
382 env
= os
.getenv(key
, default
)
384 env
= env
.decode(get_filesystem_encoding())
387 # HACK: The default implementations of os.path.expanduser from cpython do not decode
388 # environment variables with filesystem encoding. We will work around this by
389 # providing adjusted implementations.
390 # The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
391 # for different platforms with correct environment variables decoding.
393 if compat_os_name
== 'posix':
394 def compat_expanduser(path
):
395 """Expand ~ and ~user constructions. If user or $HOME is unknown,
397 if not path
.startswith('~'):
399 i
= path
.find('/', 1)
403 if 'HOME' not in os
.environ
:
405 userhome
= pwd
.getpwuid(os
.getuid()).pw_dir
407 userhome
= compat_getenv('HOME')
411 pwent
= pwd
.getpwnam(path
[1:i
])
414 userhome
= pwent
.pw_dir
415 userhome
= userhome
.rstrip('/')
416 return (userhome
+ path
[i
:]) or '/'
417 elif compat_os_name
== 'nt' or compat_os_name
== 'ce':
418 def compat_expanduser(path
):
419 """Expand ~ and ~user constructs.
421 If user or $HOME is unknown, do nothing."""
425 while i
< n
and path
[i
] not in '/\\':
428 if 'HOME' in os
.environ
:
429 userhome
= compat_getenv('HOME')
430 elif 'USERPROFILE' in os
.environ
:
431 userhome
= compat_getenv('USERPROFILE')
432 elif 'HOMEPATH' not in os
.environ
:
436 drive
= compat_getenv('HOMEDRIVE')
439 userhome
= os
.path
.join(drive
, compat_getenv('HOMEPATH'))
442 userhome
= os
.path
.join(os
.path
.dirname(userhome
), path
[1:i
])
444 return userhome
+ path
[i
:]
446 compat_expanduser
= os
.path
.expanduser
449 if sys
.version_info
< (3, 0):
451 from .utils
import preferredencoding
452 print(s
.encode(preferredencoding(), 'xmlcharrefreplace'))
455 assert isinstance(s
, compat_str
)
460 subprocess_check_output
= subprocess
.check_output
461 except AttributeError:
462 def subprocess_check_output(*args
, **kwargs
):
463 assert 'input' not in kwargs
464 p
= subprocess
.Popen(*args
, stdout
=subprocess
.PIPE
, **kwargs
)
465 output
, _
= p
.communicate()
468 raise subprocess
.CalledProcessError(ret
, p
.args
, output
=output
)
471 if sys
.version_info
< (3, 0) and sys
.platform
== 'win32':
472 def compat_getpass(prompt
, *args
, **kwargs
):
473 if isinstance(prompt
, compat_str
):
474 from .utils
import preferredencoding
475 prompt
= prompt
.encode(preferredencoding())
476 return getpass
.getpass(prompt
, *args
, **kwargs
)
478 compat_getpass
= getpass
.getpass
480 # Python < 2.6.5 require kwargs to be bytes
484 _testfunc(**{'x': 0}
)
486 def compat_kwargs(kwargs
):
487 return dict((bytes(k
), v
) for k
, v
in kwargs
.items())
489 compat_kwargs
= lambda kwargs
: kwargs
492 if sys
.version_info
< (2, 7):
493 def compat_socket_create_connection(address
, timeout
, source_address
=None):
496 for res
in socket
.getaddrinfo(host
, port
, 0, socket
.SOCK_STREAM
):
497 af
, socktype
, proto
, canonname
, sa
= res
500 sock
= socket
.socket(af
, socktype
, proto
)
501 sock
.settimeout(timeout
)
503 sock
.bind(source_address
)
506 except socket
.error
as _
:
513 raise socket
.error('getaddrinfo returns an empty list')
515 compat_socket_create_connection
= socket
.create_connection
518 # Fix https://github.com/rg3/youtube-dl/issues/4223
519 # See http://bugs.python.org/issue9161 for what is broken
520 def workaround_optparse_bug9161():
521 op
= optparse
.OptionParser()
522 og
= optparse
.OptionGroup(op
, 'foo')
526 real_add_option
= optparse
.OptionGroup
.add_option
528 def _compat_add_option(self
, *args
, **kwargs
):
530 v
.encode('ascii', 'replace') if isinstance(v
, compat_str
)
532 bargs
= [enc(a
) for a
in args
]
534 (k
, enc(v
)) for k
, v
in kwargs
.items())
535 return real_add_option(self
, *bargs
, **bkwargs
)
536 optparse
.OptionGroup
.add_option
= _compat_add_option
538 if hasattr(shutil
, 'get_terminal_size'): # Python >= 3.3
539 compat_get_terminal_size
= shutil
.get_terminal_size
541 _terminal_size
= collections
.namedtuple('terminal_size', ['columns', 'lines'])
543 def compat_get_terminal_size(fallback
=(80, 24)):
544 columns
= compat_getenv('COLUMNS')
546 columns
= int(columns
)
549 lines
= compat_getenv('LINES')
555 if columns
is None or lines
is None or columns
<= 0 or lines
<= 0:
557 sp
= subprocess
.Popen(
559 stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
)
560 out
, err
= sp
.communicate()
561 _lines
, _columns
= map(int, out
.split())
563 _columns
, _lines
= _terminal_size(*fallback
)
565 if columns
is None or columns
<= 0:
567 if lines
is None or lines
<= 0:
569 return _terminal_size(columns
, lines
)
572 itertools
.count(start
=0, step
=1)
573 compat_itertools_count
= itertools
.count
574 except TypeError: # Python 2.6
575 def compat_itertools_count(start
=0, step
=1):
581 if sys
.version_info
>= (3, 0):
582 from tokenize
import tokenize
as compat_tokenize_tokenize
584 from tokenize
import generate_tokens
as compat_tokenize_tokenize
593 'compat_etree_fromstring',
595 'compat_get_terminal_size',
598 'compat_html_entities',
599 'compat_http_client',
600 'compat_http_server',
601 'compat_itertools_count',
607 'compat_shlex_split',
608 'compat_socket_create_connection',
610 'compat_subprocess_get_DEVNULL',
611 'compat_tokenize_tokenize',
612 'compat_urllib_error',
613 'compat_urllib_parse',
614 'compat_urllib_parse_unquote',
615 'compat_urllib_parse_unquote_plus',
616 'compat_urllib_parse_unquote_to_bytes',
617 'compat_urllib_parse_urlencode',
618 'compat_urllib_parse_urlparse',
619 'compat_urllib_request',
620 'compat_urllib_request_DataHandler',
621 'compat_urllib_response',
623 'compat_urlretrieve',
624 'compat_xml_parse_error',
627 'subprocess_check_output',
628 'workaround_optparse_bug9161',