]> jfr.im git - yt-dlp.git/blob - yt_dlp/utils.py
[postprocessor] Add plugin support
[yt-dlp.git] / yt_dlp / utils.py
1 #!/usr/bin/env python3
2 # coding: utf-8
3
4 from __future__ import unicode_literals
5
6 import base64
7 import binascii
8 import calendar
9 import codecs
10 import collections
11 import contextlib
12 import ctypes
13 import datetime
14 import email.utils
15 import email.header
16 import errno
17 import functools
18 import gzip
19 import hashlib
20 import hmac
21 import imp
22 import io
23 import itertools
24 import json
25 import locale
26 import math
27 import operator
28 import os
29 import platform
30 import random
31 import re
32 import socket
33 import ssl
34 import subprocess
35 import sys
36 import tempfile
37 import time
38 import traceback
39 import xml.etree.ElementTree
40 import zlib
41
42 from .compat import (
43 compat_HTMLParseError,
44 compat_HTMLParser,
45 compat_HTTPError,
46 compat_basestring,
47 compat_chr,
48 compat_cookiejar,
49 compat_ctypes_WINFUNCTYPE,
50 compat_etree_fromstring,
51 compat_expanduser,
52 compat_html_entities,
53 compat_html_entities_html5,
54 compat_http_client,
55 compat_integer_types,
56 compat_numeric_types,
57 compat_kwargs,
58 compat_os_name,
59 compat_parse_qs,
60 compat_shlex_quote,
61 compat_str,
62 compat_struct_pack,
63 compat_struct_unpack,
64 compat_urllib_error,
65 compat_urllib_parse,
66 compat_urllib_parse_urlencode,
67 compat_urllib_parse_urlparse,
68 compat_urllib_parse_urlunparse,
69 compat_urllib_parse_quote,
70 compat_urllib_parse_quote_plus,
71 compat_urllib_parse_unquote_plus,
72 compat_urllib_request,
73 compat_urlparse,
74 compat_xpath,
75 )
76
77 from .socks import (
78 ProxyType,
79 sockssocket,
80 )
81
82
83 def register_socks_protocols():
84 # "Register" SOCKS protocols
85 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
86 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
87 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
88 if scheme not in compat_urlparse.uses_netloc:
89 compat_urlparse.uses_netloc.append(scheme)
90
91
92 # This is not clearly defined otherwise
93 compiled_regex_type = type(re.compile(''))
94
95
96 def random_user_agent():
97 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
98 _CHROME_VERSIONS = (
99 '74.0.3729.129',
100 '76.0.3780.3',
101 '76.0.3780.2',
102 '74.0.3729.128',
103 '76.0.3780.1',
104 '76.0.3780.0',
105 '75.0.3770.15',
106 '74.0.3729.127',
107 '74.0.3729.126',
108 '76.0.3779.1',
109 '76.0.3779.0',
110 '75.0.3770.14',
111 '74.0.3729.125',
112 '76.0.3778.1',
113 '76.0.3778.0',
114 '75.0.3770.13',
115 '74.0.3729.124',
116 '74.0.3729.123',
117 '73.0.3683.121',
118 '76.0.3777.1',
119 '76.0.3777.0',
120 '75.0.3770.12',
121 '74.0.3729.122',
122 '76.0.3776.4',
123 '75.0.3770.11',
124 '74.0.3729.121',
125 '76.0.3776.3',
126 '76.0.3776.2',
127 '73.0.3683.120',
128 '74.0.3729.120',
129 '74.0.3729.119',
130 '74.0.3729.118',
131 '76.0.3776.1',
132 '76.0.3776.0',
133 '76.0.3775.5',
134 '75.0.3770.10',
135 '74.0.3729.117',
136 '76.0.3775.4',
137 '76.0.3775.3',
138 '74.0.3729.116',
139 '75.0.3770.9',
140 '76.0.3775.2',
141 '76.0.3775.1',
142 '76.0.3775.0',
143 '75.0.3770.8',
144 '74.0.3729.115',
145 '74.0.3729.114',
146 '76.0.3774.1',
147 '76.0.3774.0',
148 '75.0.3770.7',
149 '74.0.3729.113',
150 '74.0.3729.112',
151 '74.0.3729.111',
152 '76.0.3773.1',
153 '76.0.3773.0',
154 '75.0.3770.6',
155 '74.0.3729.110',
156 '74.0.3729.109',
157 '76.0.3772.1',
158 '76.0.3772.0',
159 '75.0.3770.5',
160 '74.0.3729.108',
161 '74.0.3729.107',
162 '76.0.3771.1',
163 '76.0.3771.0',
164 '75.0.3770.4',
165 '74.0.3729.106',
166 '74.0.3729.105',
167 '75.0.3770.3',
168 '74.0.3729.104',
169 '74.0.3729.103',
170 '74.0.3729.102',
171 '75.0.3770.2',
172 '74.0.3729.101',
173 '75.0.3770.1',
174 '75.0.3770.0',
175 '74.0.3729.100',
176 '75.0.3769.5',
177 '75.0.3769.4',
178 '74.0.3729.99',
179 '75.0.3769.3',
180 '75.0.3769.2',
181 '75.0.3768.6',
182 '74.0.3729.98',
183 '75.0.3769.1',
184 '75.0.3769.0',
185 '74.0.3729.97',
186 '73.0.3683.119',
187 '73.0.3683.118',
188 '74.0.3729.96',
189 '75.0.3768.5',
190 '75.0.3768.4',
191 '75.0.3768.3',
192 '75.0.3768.2',
193 '74.0.3729.95',
194 '74.0.3729.94',
195 '75.0.3768.1',
196 '75.0.3768.0',
197 '74.0.3729.93',
198 '74.0.3729.92',
199 '73.0.3683.117',
200 '74.0.3729.91',
201 '75.0.3766.3',
202 '74.0.3729.90',
203 '75.0.3767.2',
204 '75.0.3767.1',
205 '75.0.3767.0',
206 '74.0.3729.89',
207 '73.0.3683.116',
208 '75.0.3766.2',
209 '74.0.3729.88',
210 '75.0.3766.1',
211 '75.0.3766.0',
212 '74.0.3729.87',
213 '73.0.3683.115',
214 '74.0.3729.86',
215 '75.0.3765.1',
216 '75.0.3765.0',
217 '74.0.3729.85',
218 '73.0.3683.114',
219 '74.0.3729.84',
220 '75.0.3764.1',
221 '75.0.3764.0',
222 '74.0.3729.83',
223 '73.0.3683.113',
224 '75.0.3763.2',
225 '75.0.3761.4',
226 '74.0.3729.82',
227 '75.0.3763.1',
228 '75.0.3763.0',
229 '74.0.3729.81',
230 '73.0.3683.112',
231 '75.0.3762.1',
232 '75.0.3762.0',
233 '74.0.3729.80',
234 '75.0.3761.3',
235 '74.0.3729.79',
236 '73.0.3683.111',
237 '75.0.3761.2',
238 '74.0.3729.78',
239 '74.0.3729.77',
240 '75.0.3761.1',
241 '75.0.3761.0',
242 '73.0.3683.110',
243 '74.0.3729.76',
244 '74.0.3729.75',
245 '75.0.3760.0',
246 '74.0.3729.74',
247 '75.0.3759.8',
248 '75.0.3759.7',
249 '75.0.3759.6',
250 '74.0.3729.73',
251 '75.0.3759.5',
252 '74.0.3729.72',
253 '73.0.3683.109',
254 '75.0.3759.4',
255 '75.0.3759.3',
256 '74.0.3729.71',
257 '75.0.3759.2',
258 '74.0.3729.70',
259 '73.0.3683.108',
260 '74.0.3729.69',
261 '75.0.3759.1',
262 '75.0.3759.0',
263 '74.0.3729.68',
264 '73.0.3683.107',
265 '74.0.3729.67',
266 '75.0.3758.1',
267 '75.0.3758.0',
268 '74.0.3729.66',
269 '73.0.3683.106',
270 '74.0.3729.65',
271 '75.0.3757.1',
272 '75.0.3757.0',
273 '74.0.3729.64',
274 '73.0.3683.105',
275 '74.0.3729.63',
276 '75.0.3756.1',
277 '75.0.3756.0',
278 '74.0.3729.62',
279 '73.0.3683.104',
280 '75.0.3755.3',
281 '75.0.3755.2',
282 '73.0.3683.103',
283 '75.0.3755.1',
284 '75.0.3755.0',
285 '74.0.3729.61',
286 '73.0.3683.102',
287 '74.0.3729.60',
288 '75.0.3754.2',
289 '74.0.3729.59',
290 '75.0.3753.4',
291 '74.0.3729.58',
292 '75.0.3754.1',
293 '75.0.3754.0',
294 '74.0.3729.57',
295 '73.0.3683.101',
296 '75.0.3753.3',
297 '75.0.3752.2',
298 '75.0.3753.2',
299 '74.0.3729.56',
300 '75.0.3753.1',
301 '75.0.3753.0',
302 '74.0.3729.55',
303 '73.0.3683.100',
304 '74.0.3729.54',
305 '75.0.3752.1',
306 '75.0.3752.0',
307 '74.0.3729.53',
308 '73.0.3683.99',
309 '74.0.3729.52',
310 '75.0.3751.1',
311 '75.0.3751.0',
312 '74.0.3729.51',
313 '73.0.3683.98',
314 '74.0.3729.50',
315 '75.0.3750.0',
316 '74.0.3729.49',
317 '74.0.3729.48',
318 '74.0.3729.47',
319 '75.0.3749.3',
320 '74.0.3729.46',
321 '73.0.3683.97',
322 '75.0.3749.2',
323 '74.0.3729.45',
324 '75.0.3749.1',
325 '75.0.3749.0',
326 '74.0.3729.44',
327 '73.0.3683.96',
328 '74.0.3729.43',
329 '74.0.3729.42',
330 '75.0.3748.1',
331 '75.0.3748.0',
332 '74.0.3729.41',
333 '75.0.3747.1',
334 '73.0.3683.95',
335 '75.0.3746.4',
336 '74.0.3729.40',
337 '74.0.3729.39',
338 '75.0.3747.0',
339 '75.0.3746.3',
340 '75.0.3746.2',
341 '74.0.3729.38',
342 '75.0.3746.1',
343 '75.0.3746.0',
344 '74.0.3729.37',
345 '73.0.3683.94',
346 '75.0.3745.5',
347 '75.0.3745.4',
348 '75.0.3745.3',
349 '75.0.3745.2',
350 '74.0.3729.36',
351 '75.0.3745.1',
352 '75.0.3745.0',
353 '75.0.3744.2',
354 '74.0.3729.35',
355 '73.0.3683.93',
356 '74.0.3729.34',
357 '75.0.3744.1',
358 '75.0.3744.0',
359 '74.0.3729.33',
360 '73.0.3683.92',
361 '74.0.3729.32',
362 '74.0.3729.31',
363 '73.0.3683.91',
364 '75.0.3741.2',
365 '75.0.3740.5',
366 '74.0.3729.30',
367 '75.0.3741.1',
368 '75.0.3741.0',
369 '74.0.3729.29',
370 '75.0.3740.4',
371 '73.0.3683.90',
372 '74.0.3729.28',
373 '75.0.3740.3',
374 '73.0.3683.89',
375 '75.0.3740.2',
376 '74.0.3729.27',
377 '75.0.3740.1',
378 '75.0.3740.0',
379 '74.0.3729.26',
380 '73.0.3683.88',
381 '73.0.3683.87',
382 '74.0.3729.25',
383 '75.0.3739.1',
384 '75.0.3739.0',
385 '73.0.3683.86',
386 '74.0.3729.24',
387 '73.0.3683.85',
388 '75.0.3738.4',
389 '75.0.3738.3',
390 '75.0.3738.2',
391 '75.0.3738.1',
392 '75.0.3738.0',
393 '74.0.3729.23',
394 '73.0.3683.84',
395 '74.0.3729.22',
396 '74.0.3729.21',
397 '75.0.3737.1',
398 '75.0.3737.0',
399 '74.0.3729.20',
400 '73.0.3683.83',
401 '74.0.3729.19',
402 '75.0.3736.1',
403 '75.0.3736.0',
404 '74.0.3729.18',
405 '73.0.3683.82',
406 '74.0.3729.17',
407 '75.0.3735.1',
408 '75.0.3735.0',
409 '74.0.3729.16',
410 '73.0.3683.81',
411 '75.0.3734.1',
412 '75.0.3734.0',
413 '74.0.3729.15',
414 '73.0.3683.80',
415 '74.0.3729.14',
416 '75.0.3733.1',
417 '75.0.3733.0',
418 '75.0.3732.1',
419 '74.0.3729.13',
420 '74.0.3729.12',
421 '73.0.3683.79',
422 '74.0.3729.11',
423 '75.0.3732.0',
424 '74.0.3729.10',
425 '73.0.3683.78',
426 '74.0.3729.9',
427 '74.0.3729.8',
428 '74.0.3729.7',
429 '75.0.3731.3',
430 '75.0.3731.2',
431 '75.0.3731.0',
432 '74.0.3729.6',
433 '73.0.3683.77',
434 '73.0.3683.76',
435 '75.0.3730.5',
436 '75.0.3730.4',
437 '73.0.3683.75',
438 '74.0.3729.5',
439 '73.0.3683.74',
440 '75.0.3730.3',
441 '75.0.3730.2',
442 '74.0.3729.4',
443 '73.0.3683.73',
444 '73.0.3683.72',
445 '75.0.3730.1',
446 '75.0.3730.0',
447 '74.0.3729.3',
448 '73.0.3683.71',
449 '74.0.3729.2',
450 '73.0.3683.70',
451 '74.0.3729.1',
452 '74.0.3729.0',
453 '74.0.3726.4',
454 '73.0.3683.69',
455 '74.0.3726.3',
456 '74.0.3728.0',
457 '74.0.3726.2',
458 '73.0.3683.68',
459 '74.0.3726.1',
460 '74.0.3726.0',
461 '74.0.3725.4',
462 '73.0.3683.67',
463 '73.0.3683.66',
464 '74.0.3725.3',
465 '74.0.3725.2',
466 '74.0.3725.1',
467 '74.0.3724.8',
468 '74.0.3725.0',
469 '73.0.3683.65',
470 '74.0.3724.7',
471 '74.0.3724.6',
472 '74.0.3724.5',
473 '74.0.3724.4',
474 '74.0.3724.3',
475 '74.0.3724.2',
476 '74.0.3724.1',
477 '74.0.3724.0',
478 '73.0.3683.64',
479 '74.0.3723.1',
480 '74.0.3723.0',
481 '73.0.3683.63',
482 '74.0.3722.1',
483 '74.0.3722.0',
484 '73.0.3683.62',
485 '74.0.3718.9',
486 '74.0.3702.3',
487 '74.0.3721.3',
488 '74.0.3721.2',
489 '74.0.3721.1',
490 '74.0.3721.0',
491 '74.0.3720.6',
492 '73.0.3683.61',
493 '72.0.3626.122',
494 '73.0.3683.60',
495 '74.0.3720.5',
496 '72.0.3626.121',
497 '74.0.3718.8',
498 '74.0.3720.4',
499 '74.0.3720.3',
500 '74.0.3718.7',
501 '74.0.3720.2',
502 '74.0.3720.1',
503 '74.0.3720.0',
504 '74.0.3718.6',
505 '74.0.3719.5',
506 '73.0.3683.59',
507 '74.0.3718.5',
508 '74.0.3718.4',
509 '74.0.3719.4',
510 '74.0.3719.3',
511 '74.0.3719.2',
512 '74.0.3719.1',
513 '73.0.3683.58',
514 '74.0.3719.0',
515 '73.0.3683.57',
516 '73.0.3683.56',
517 '74.0.3718.3',
518 '73.0.3683.55',
519 '74.0.3718.2',
520 '74.0.3718.1',
521 '74.0.3718.0',
522 '73.0.3683.54',
523 '74.0.3717.2',
524 '73.0.3683.53',
525 '74.0.3717.1',
526 '74.0.3717.0',
527 '73.0.3683.52',
528 '74.0.3716.1',
529 '74.0.3716.0',
530 '73.0.3683.51',
531 '74.0.3715.1',
532 '74.0.3715.0',
533 '73.0.3683.50',
534 '74.0.3711.2',
535 '74.0.3714.2',
536 '74.0.3713.3',
537 '74.0.3714.1',
538 '74.0.3714.0',
539 '73.0.3683.49',
540 '74.0.3713.1',
541 '74.0.3713.0',
542 '72.0.3626.120',
543 '73.0.3683.48',
544 '74.0.3712.2',
545 '74.0.3712.1',
546 '74.0.3712.0',
547 '73.0.3683.47',
548 '72.0.3626.119',
549 '73.0.3683.46',
550 '74.0.3710.2',
551 '72.0.3626.118',
552 '74.0.3711.1',
553 '74.0.3711.0',
554 '73.0.3683.45',
555 '72.0.3626.117',
556 '74.0.3710.1',
557 '74.0.3710.0',
558 '73.0.3683.44',
559 '72.0.3626.116',
560 '74.0.3709.1',
561 '74.0.3709.0',
562 '74.0.3704.9',
563 '73.0.3683.43',
564 '72.0.3626.115',
565 '74.0.3704.8',
566 '74.0.3704.7',
567 '74.0.3708.0',
568 '74.0.3706.7',
569 '74.0.3704.6',
570 '73.0.3683.42',
571 '72.0.3626.114',
572 '74.0.3706.6',
573 '72.0.3626.113',
574 '74.0.3704.5',
575 '74.0.3706.5',
576 '74.0.3706.4',
577 '74.0.3706.3',
578 '74.0.3706.2',
579 '74.0.3706.1',
580 '74.0.3706.0',
581 '73.0.3683.41',
582 '72.0.3626.112',
583 '74.0.3705.1',
584 '74.0.3705.0',
585 '73.0.3683.40',
586 '72.0.3626.111',
587 '73.0.3683.39',
588 '74.0.3704.4',
589 '73.0.3683.38',
590 '74.0.3704.3',
591 '74.0.3704.2',
592 '74.0.3704.1',
593 '74.0.3704.0',
594 '73.0.3683.37',
595 '72.0.3626.110',
596 '72.0.3626.109',
597 '74.0.3703.3',
598 '74.0.3703.2',
599 '73.0.3683.36',
600 '74.0.3703.1',
601 '74.0.3703.0',
602 '73.0.3683.35',
603 '72.0.3626.108',
604 '74.0.3702.2',
605 '74.0.3699.3',
606 '74.0.3702.1',
607 '74.0.3702.0',
608 '73.0.3683.34',
609 '72.0.3626.107',
610 '73.0.3683.33',
611 '74.0.3701.1',
612 '74.0.3701.0',
613 '73.0.3683.32',
614 '73.0.3683.31',
615 '72.0.3626.105',
616 '74.0.3700.1',
617 '74.0.3700.0',
618 '73.0.3683.29',
619 '72.0.3626.103',
620 '74.0.3699.2',
621 '74.0.3699.1',
622 '74.0.3699.0',
623 '73.0.3683.28',
624 '72.0.3626.102',
625 '73.0.3683.27',
626 '73.0.3683.26',
627 '74.0.3698.0',
628 '74.0.3696.2',
629 '72.0.3626.101',
630 '73.0.3683.25',
631 '74.0.3696.1',
632 '74.0.3696.0',
633 '74.0.3694.8',
634 '72.0.3626.100',
635 '74.0.3694.7',
636 '74.0.3694.6',
637 '74.0.3694.5',
638 '74.0.3694.4',
639 '72.0.3626.99',
640 '72.0.3626.98',
641 '74.0.3694.3',
642 '73.0.3683.24',
643 '72.0.3626.97',
644 '72.0.3626.96',
645 '72.0.3626.95',
646 '73.0.3683.23',
647 '72.0.3626.94',
648 '73.0.3683.22',
649 '73.0.3683.21',
650 '72.0.3626.93',
651 '74.0.3694.2',
652 '72.0.3626.92',
653 '74.0.3694.1',
654 '74.0.3694.0',
655 '74.0.3693.6',
656 '73.0.3683.20',
657 '72.0.3626.91',
658 '74.0.3693.5',
659 '74.0.3693.4',
660 '74.0.3693.3',
661 '74.0.3693.2',
662 '73.0.3683.19',
663 '74.0.3693.1',
664 '74.0.3693.0',
665 '73.0.3683.18',
666 '72.0.3626.90',
667 '74.0.3692.1',
668 '74.0.3692.0',
669 '73.0.3683.17',
670 '72.0.3626.89',
671 '74.0.3687.3',
672 '74.0.3691.1',
673 '74.0.3691.0',
674 '73.0.3683.16',
675 '72.0.3626.88',
676 '72.0.3626.87',
677 '73.0.3683.15',
678 '74.0.3690.1',
679 '74.0.3690.0',
680 '73.0.3683.14',
681 '72.0.3626.86',
682 '73.0.3683.13',
683 '73.0.3683.12',
684 '74.0.3689.1',
685 '74.0.3689.0',
686 '73.0.3683.11',
687 '72.0.3626.85',
688 '73.0.3683.10',
689 '72.0.3626.84',
690 '73.0.3683.9',
691 '74.0.3688.1',
692 '74.0.3688.0',
693 '73.0.3683.8',
694 '72.0.3626.83',
695 '74.0.3687.2',
696 '74.0.3687.1',
697 '74.0.3687.0',
698 '73.0.3683.7',
699 '72.0.3626.82',
700 '74.0.3686.4',
701 '72.0.3626.81',
702 '74.0.3686.3',
703 '74.0.3686.2',
704 '74.0.3686.1',
705 '74.0.3686.0',
706 '73.0.3683.6',
707 '72.0.3626.80',
708 '74.0.3685.1',
709 '74.0.3685.0',
710 '73.0.3683.5',
711 '72.0.3626.79',
712 '74.0.3684.1',
713 '74.0.3684.0',
714 '73.0.3683.4',
715 '72.0.3626.78',
716 '72.0.3626.77',
717 '73.0.3683.3',
718 '73.0.3683.2',
719 '72.0.3626.76',
720 '73.0.3683.1',
721 '73.0.3683.0',
722 '72.0.3626.75',
723 '71.0.3578.141',
724 '73.0.3682.1',
725 '73.0.3682.0',
726 '72.0.3626.74',
727 '71.0.3578.140',
728 '73.0.3681.4',
729 '73.0.3681.3',
730 '73.0.3681.2',
731 '73.0.3681.1',
732 '73.0.3681.0',
733 '72.0.3626.73',
734 '71.0.3578.139',
735 '72.0.3626.72',
736 '72.0.3626.71',
737 '73.0.3680.1',
738 '73.0.3680.0',
739 '72.0.3626.70',
740 '71.0.3578.138',
741 '73.0.3678.2',
742 '73.0.3679.1',
743 '73.0.3679.0',
744 '72.0.3626.69',
745 '71.0.3578.137',
746 '73.0.3678.1',
747 '73.0.3678.0',
748 '71.0.3578.136',
749 '73.0.3677.1',
750 '73.0.3677.0',
751 '72.0.3626.68',
752 '72.0.3626.67',
753 '71.0.3578.135',
754 '73.0.3676.1',
755 '73.0.3676.0',
756 '73.0.3674.2',
757 '72.0.3626.66',
758 '71.0.3578.134',
759 '73.0.3674.1',
760 '73.0.3674.0',
761 '72.0.3626.65',
762 '71.0.3578.133',
763 '73.0.3673.2',
764 '73.0.3673.1',
765 '73.0.3673.0',
766 '72.0.3626.64',
767 '71.0.3578.132',
768 '72.0.3626.63',
769 '72.0.3626.62',
770 '72.0.3626.61',
771 '72.0.3626.60',
772 '73.0.3672.1',
773 '73.0.3672.0',
774 '72.0.3626.59',
775 '71.0.3578.131',
776 '73.0.3671.3',
777 '73.0.3671.2',
778 '73.0.3671.1',
779 '73.0.3671.0',
780 '72.0.3626.58',
781 '71.0.3578.130',
782 '73.0.3670.1',
783 '73.0.3670.0',
784 '72.0.3626.57',
785 '71.0.3578.129',
786 '73.0.3669.1',
787 '73.0.3669.0',
788 '72.0.3626.56',
789 '71.0.3578.128',
790 '73.0.3668.2',
791 '73.0.3668.1',
792 '73.0.3668.0',
793 '72.0.3626.55',
794 '71.0.3578.127',
795 '73.0.3667.2',
796 '73.0.3667.1',
797 '73.0.3667.0',
798 '72.0.3626.54',
799 '71.0.3578.126',
800 '73.0.3666.1',
801 '73.0.3666.0',
802 '72.0.3626.53',
803 '71.0.3578.125',
804 '73.0.3665.4',
805 '73.0.3665.3',
806 '72.0.3626.52',
807 '73.0.3665.2',
808 '73.0.3664.4',
809 '73.0.3665.1',
810 '73.0.3665.0',
811 '72.0.3626.51',
812 '71.0.3578.124',
813 '72.0.3626.50',
814 '73.0.3664.3',
815 '73.0.3664.2',
816 '73.0.3664.1',
817 '73.0.3664.0',
818 '73.0.3663.2',
819 '72.0.3626.49',
820 '71.0.3578.123',
821 '73.0.3663.1',
822 '73.0.3663.0',
823 '72.0.3626.48',
824 '71.0.3578.122',
825 '73.0.3662.1',
826 '73.0.3662.0',
827 '72.0.3626.47',
828 '71.0.3578.121',
829 '73.0.3661.1',
830 '72.0.3626.46',
831 '73.0.3661.0',
832 '72.0.3626.45',
833 '71.0.3578.120',
834 '73.0.3660.2',
835 '73.0.3660.1',
836 '73.0.3660.0',
837 '72.0.3626.44',
838 '71.0.3578.119',
839 '73.0.3659.1',
840 '73.0.3659.0',
841 '72.0.3626.43',
842 '71.0.3578.118',
843 '73.0.3658.1',
844 '73.0.3658.0',
845 '72.0.3626.42',
846 '71.0.3578.117',
847 '73.0.3657.1',
848 '73.0.3657.0',
849 '72.0.3626.41',
850 '71.0.3578.116',
851 '73.0.3656.1',
852 '73.0.3656.0',
853 '72.0.3626.40',
854 '71.0.3578.115',
855 '73.0.3655.1',
856 '73.0.3655.0',
857 '72.0.3626.39',
858 '71.0.3578.114',
859 '73.0.3654.1',
860 '73.0.3654.0',
861 '72.0.3626.38',
862 '71.0.3578.113',
863 '73.0.3653.1',
864 '73.0.3653.0',
865 '72.0.3626.37',
866 '71.0.3578.112',
867 '73.0.3652.1',
868 '73.0.3652.0',
869 '72.0.3626.36',
870 '71.0.3578.111',
871 '73.0.3651.1',
872 '73.0.3651.0',
873 '72.0.3626.35',
874 '71.0.3578.110',
875 '73.0.3650.1',
876 '73.0.3650.0',
877 '72.0.3626.34',
878 '71.0.3578.109',
879 '73.0.3649.1',
880 '73.0.3649.0',
881 '72.0.3626.33',
882 '71.0.3578.108',
883 '73.0.3648.2',
884 '73.0.3648.1',
885 '73.0.3648.0',
886 '72.0.3626.32',
887 '71.0.3578.107',
888 '73.0.3647.2',
889 '73.0.3647.1',
890 '73.0.3647.0',
891 '72.0.3626.31',
892 '71.0.3578.106',
893 '73.0.3635.3',
894 '73.0.3646.2',
895 '73.0.3646.1',
896 '73.0.3646.0',
897 '72.0.3626.30',
898 '71.0.3578.105',
899 '72.0.3626.29',
900 '73.0.3645.2',
901 '73.0.3645.1',
902 '73.0.3645.0',
903 '72.0.3626.28',
904 '71.0.3578.104',
905 '72.0.3626.27',
906 '72.0.3626.26',
907 '72.0.3626.25',
908 '72.0.3626.24',
909 '73.0.3644.0',
910 '73.0.3643.2',
911 '72.0.3626.23',
912 '71.0.3578.103',
913 '73.0.3643.1',
914 '73.0.3643.0',
915 '72.0.3626.22',
916 '71.0.3578.102',
917 '73.0.3642.1',
918 '73.0.3642.0',
919 '72.0.3626.21',
920 '71.0.3578.101',
921 '73.0.3641.1',
922 '73.0.3641.0',
923 '72.0.3626.20',
924 '71.0.3578.100',
925 '72.0.3626.19',
926 '73.0.3640.1',
927 '73.0.3640.0',
928 '72.0.3626.18',
929 '73.0.3639.1',
930 '71.0.3578.99',
931 '73.0.3639.0',
932 '72.0.3626.17',
933 '73.0.3638.2',
934 '72.0.3626.16',
935 '73.0.3638.1',
936 '73.0.3638.0',
937 '72.0.3626.15',
938 '71.0.3578.98',
939 '73.0.3635.2',
940 '71.0.3578.97',
941 '73.0.3637.1',
942 '73.0.3637.0',
943 '72.0.3626.14',
944 '71.0.3578.96',
945 '71.0.3578.95',
946 '72.0.3626.13',
947 '71.0.3578.94',
948 '73.0.3636.2',
949 '71.0.3578.93',
950 '73.0.3636.1',
951 '73.0.3636.0',
952 '72.0.3626.12',
953 '71.0.3578.92',
954 '73.0.3635.1',
955 '73.0.3635.0',
956 '72.0.3626.11',
957 '71.0.3578.91',
958 '73.0.3634.2',
959 '73.0.3634.1',
960 '73.0.3634.0',
961 '72.0.3626.10',
962 '71.0.3578.90',
963 '71.0.3578.89',
964 '73.0.3633.2',
965 '73.0.3633.1',
966 '73.0.3633.0',
967 '72.0.3610.4',
968 '72.0.3626.9',
969 '71.0.3578.88',
970 '73.0.3632.5',
971 '73.0.3632.4',
972 '73.0.3632.3',
973 '73.0.3632.2',
974 '73.0.3632.1',
975 '73.0.3632.0',
976 '72.0.3626.8',
977 '71.0.3578.87',
978 '73.0.3631.2',
979 '73.0.3631.1',
980 '73.0.3631.0',
981 '72.0.3626.7',
982 '71.0.3578.86',
983 '72.0.3626.6',
984 '73.0.3630.1',
985 '73.0.3630.0',
986 '72.0.3626.5',
987 '71.0.3578.85',
988 '72.0.3626.4',
989 '73.0.3628.3',
990 '73.0.3628.2',
991 '73.0.3629.1',
992 '73.0.3629.0',
993 '72.0.3626.3',
994 '71.0.3578.84',
995 '73.0.3628.1',
996 '73.0.3628.0',
997 '71.0.3578.83',
998 '73.0.3627.1',
999 '73.0.3627.0',
1000 '72.0.3626.2',
1001 '71.0.3578.82',
1002 '71.0.3578.81',
1003 '71.0.3578.80',
1004 '72.0.3626.1',
1005 '72.0.3626.0',
1006 '71.0.3578.79',
1007 '70.0.3538.124',
1008 '71.0.3578.78',
1009 '72.0.3623.4',
1010 '72.0.3625.2',
1011 '72.0.3625.1',
1012 '72.0.3625.0',
1013 '71.0.3578.77',
1014 '70.0.3538.123',
1015 '72.0.3624.4',
1016 '72.0.3624.3',
1017 '72.0.3624.2',
1018 '71.0.3578.76',
1019 '72.0.3624.1',
1020 '72.0.3624.0',
1021 '72.0.3623.3',
1022 '71.0.3578.75',
1023 '70.0.3538.122',
1024 '71.0.3578.74',
1025 '72.0.3623.2',
1026 '72.0.3610.3',
1027 '72.0.3623.1',
1028 '72.0.3623.0',
1029 '72.0.3622.3',
1030 '72.0.3622.2',
1031 '71.0.3578.73',
1032 '70.0.3538.121',
1033 '72.0.3622.1',
1034 '72.0.3622.0',
1035 '71.0.3578.72',
1036 '70.0.3538.120',
1037 '72.0.3621.1',
1038 '72.0.3621.0',
1039 '71.0.3578.71',
1040 '70.0.3538.119',
1041 '72.0.3620.1',
1042 '72.0.3620.0',
1043 '71.0.3578.70',
1044 '70.0.3538.118',
1045 '71.0.3578.69',
1046 '72.0.3619.1',
1047 '72.0.3619.0',
1048 '71.0.3578.68',
1049 '70.0.3538.117',
1050 '71.0.3578.67',
1051 '72.0.3618.1',
1052 '72.0.3618.0',
1053 '71.0.3578.66',
1054 '70.0.3538.116',
1055 '72.0.3617.1',
1056 '72.0.3617.0',
1057 '71.0.3578.65',
1058 '70.0.3538.115',
1059 '72.0.3602.3',
1060 '71.0.3578.64',
1061 '72.0.3616.1',
1062 '72.0.3616.0',
1063 '71.0.3578.63',
1064 '70.0.3538.114',
1065 '71.0.3578.62',
1066 '72.0.3615.1',
1067 '72.0.3615.0',
1068 '71.0.3578.61',
1069 '70.0.3538.113',
1070 '72.0.3614.1',
1071 '72.0.3614.0',
1072 '71.0.3578.60',
1073 '70.0.3538.112',
1074 '72.0.3613.1',
1075 '72.0.3613.0',
1076 '71.0.3578.59',
1077 '70.0.3538.111',
1078 '72.0.3612.2',
1079 '72.0.3612.1',
1080 '72.0.3612.0',
1081 '70.0.3538.110',
1082 '71.0.3578.58',
1083 '70.0.3538.109',
1084 '72.0.3611.2',
1085 '72.0.3611.1',
1086 '72.0.3611.0',
1087 '71.0.3578.57',
1088 '70.0.3538.108',
1089 '72.0.3610.2',
1090 '71.0.3578.56',
1091 '71.0.3578.55',
1092 '72.0.3610.1',
1093 '72.0.3610.0',
1094 '71.0.3578.54',
1095 '70.0.3538.107',
1096 '71.0.3578.53',
1097 '72.0.3609.3',
1098 '71.0.3578.52',
1099 '72.0.3609.2',
1100 '71.0.3578.51',
1101 '72.0.3608.5',
1102 '72.0.3609.1',
1103 '72.0.3609.0',
1104 '71.0.3578.50',
1105 '70.0.3538.106',
1106 '72.0.3608.4',
1107 '72.0.3608.3',
1108 '72.0.3608.2',
1109 '71.0.3578.49',
1110 '72.0.3608.1',
1111 '72.0.3608.0',
1112 '70.0.3538.105',
1113 '71.0.3578.48',
1114 '72.0.3607.1',
1115 '72.0.3607.0',
1116 '71.0.3578.47',
1117 '70.0.3538.104',
1118 '72.0.3606.2',
1119 '72.0.3606.1',
1120 '72.0.3606.0',
1121 '71.0.3578.46',
1122 '70.0.3538.103',
1123 '70.0.3538.102',
1124 '72.0.3605.3',
1125 '72.0.3605.2',
1126 '72.0.3605.1',
1127 '72.0.3605.0',
1128 '71.0.3578.45',
1129 '70.0.3538.101',
1130 '71.0.3578.44',
1131 '71.0.3578.43',
1132 '70.0.3538.100',
1133 '70.0.3538.99',
1134 '71.0.3578.42',
1135 '72.0.3604.1',
1136 '72.0.3604.0',
1137 '71.0.3578.41',
1138 '70.0.3538.98',
1139 '71.0.3578.40',
1140 '72.0.3603.2',
1141 '72.0.3603.1',
1142 '72.0.3603.0',
1143 '71.0.3578.39',
1144 '70.0.3538.97',
1145 '72.0.3602.2',
1146 '71.0.3578.38',
1147 '71.0.3578.37',
1148 '72.0.3602.1',
1149 '72.0.3602.0',
1150 '71.0.3578.36',
1151 '70.0.3538.96',
1152 '72.0.3601.1',
1153 '72.0.3601.0',
1154 '71.0.3578.35',
1155 '70.0.3538.95',
1156 '72.0.3600.1',
1157 '72.0.3600.0',
1158 '71.0.3578.34',
1159 '70.0.3538.94',
1160 '72.0.3599.3',
1161 '72.0.3599.2',
1162 '72.0.3599.1',
1163 '72.0.3599.0',
1164 '71.0.3578.33',
1165 '70.0.3538.93',
1166 '72.0.3598.1',
1167 '72.0.3598.0',
1168 '71.0.3578.32',
1169 '70.0.3538.87',
1170 '72.0.3597.1',
1171 '72.0.3597.0',
1172 '72.0.3596.2',
1173 '71.0.3578.31',
1174 '70.0.3538.86',
1175 '71.0.3578.30',
1176 '71.0.3578.29',
1177 '72.0.3596.1',
1178 '72.0.3596.0',
1179 '71.0.3578.28',
1180 '70.0.3538.85',
1181 '72.0.3595.2',
1182 '72.0.3591.3',
1183 '72.0.3595.1',
1184 '72.0.3595.0',
1185 '71.0.3578.27',
1186 '70.0.3538.84',
1187 '72.0.3594.1',
1188 '72.0.3594.0',
1189 '71.0.3578.26',
1190 '70.0.3538.83',
1191 '72.0.3593.2',
1192 '72.0.3593.1',
1193 '72.0.3593.0',
1194 '71.0.3578.25',
1195 '70.0.3538.82',
1196 '72.0.3589.3',
1197 '72.0.3592.2',
1198 '72.0.3592.1',
1199 '72.0.3592.0',
1200 '71.0.3578.24',
1201 '72.0.3589.2',
1202 '70.0.3538.81',
1203 '70.0.3538.80',
1204 '72.0.3591.2',
1205 '72.0.3591.1',
1206 '72.0.3591.0',
1207 '71.0.3578.23',
1208 '70.0.3538.79',
1209 '71.0.3578.22',
1210 '72.0.3590.1',
1211 '72.0.3590.0',
1212 '71.0.3578.21',
1213 '70.0.3538.78',
1214 '70.0.3538.77',
1215 '72.0.3589.1',
1216 '72.0.3589.0',
1217 '71.0.3578.20',
1218 '70.0.3538.76',
1219 '71.0.3578.19',
1220 '70.0.3538.75',
1221 '72.0.3588.1',
1222 '72.0.3588.0',
1223 '71.0.3578.18',
1224 '70.0.3538.74',
1225 '72.0.3586.2',
1226 '72.0.3587.0',
1227 '71.0.3578.17',
1228 '70.0.3538.73',
1229 '72.0.3586.1',
1230 '72.0.3586.0',
1231 '71.0.3578.16',
1232 '70.0.3538.72',
1233 '72.0.3585.1',
1234 '72.0.3585.0',
1235 '71.0.3578.15',
1236 '70.0.3538.71',
1237 '71.0.3578.14',
1238 '72.0.3584.1',
1239 '72.0.3584.0',
1240 '71.0.3578.13',
1241 '70.0.3538.70',
1242 '72.0.3583.2',
1243 '71.0.3578.12',
1244 '72.0.3583.1',
1245 '72.0.3583.0',
1246 '71.0.3578.11',
1247 '70.0.3538.69',
1248 '71.0.3578.10',
1249 '72.0.3582.0',
1250 '72.0.3581.4',
1251 '71.0.3578.9',
1252 '70.0.3538.67',
1253 '72.0.3581.3',
1254 '72.0.3581.2',
1255 '72.0.3581.1',
1256 '72.0.3581.0',
1257 '71.0.3578.8',
1258 '70.0.3538.66',
1259 '72.0.3580.1',
1260 '72.0.3580.0',
1261 '71.0.3578.7',
1262 '70.0.3538.65',
1263 '71.0.3578.6',
1264 '72.0.3579.1',
1265 '72.0.3579.0',
1266 '71.0.3578.5',
1267 '70.0.3538.64',
1268 '71.0.3578.4',
1269 '71.0.3578.3',
1270 '71.0.3578.2',
1271 '71.0.3578.1',
1272 '71.0.3578.0',
1273 '70.0.3538.63',
1274 '69.0.3497.128',
1275 '70.0.3538.62',
1276 '70.0.3538.61',
1277 '70.0.3538.60',
1278 '70.0.3538.59',
1279 '71.0.3577.1',
1280 '71.0.3577.0',
1281 '70.0.3538.58',
1282 '69.0.3497.127',
1283 '71.0.3576.2',
1284 '71.0.3576.1',
1285 '71.0.3576.0',
1286 '70.0.3538.57',
1287 '70.0.3538.56',
1288 '71.0.3575.2',
1289 '70.0.3538.55',
1290 '69.0.3497.126',
1291 '70.0.3538.54',
1292 '71.0.3575.1',
1293 '71.0.3575.0',
1294 '71.0.3574.1',
1295 '71.0.3574.0',
1296 '70.0.3538.53',
1297 '69.0.3497.125',
1298 '70.0.3538.52',
1299 '71.0.3573.1',
1300 '71.0.3573.0',
1301 '70.0.3538.51',
1302 '69.0.3497.124',
1303 '71.0.3572.1',
1304 '71.0.3572.0',
1305 '70.0.3538.50',
1306 '69.0.3497.123',
1307 '71.0.3571.2',
1308 '70.0.3538.49',
1309 '69.0.3497.122',
1310 '71.0.3571.1',
1311 '71.0.3571.0',
1312 '70.0.3538.48',
1313 '69.0.3497.121',
1314 '71.0.3570.1',
1315 '71.0.3570.0',
1316 '70.0.3538.47',
1317 '69.0.3497.120',
1318 '71.0.3568.2',
1319 '71.0.3569.1',
1320 '71.0.3569.0',
1321 '70.0.3538.46',
1322 '69.0.3497.119',
1323 '70.0.3538.45',
1324 '71.0.3568.1',
1325 '71.0.3568.0',
1326 '70.0.3538.44',
1327 '69.0.3497.118',
1328 '70.0.3538.43',
1329 '70.0.3538.42',
1330 '71.0.3567.1',
1331 '71.0.3567.0',
1332 '70.0.3538.41',
1333 '69.0.3497.117',
1334 '71.0.3566.1',
1335 '71.0.3566.0',
1336 '70.0.3538.40',
1337 '69.0.3497.116',
1338 '71.0.3565.1',
1339 '71.0.3565.0',
1340 '70.0.3538.39',
1341 '69.0.3497.115',
1342 '71.0.3564.1',
1343 '71.0.3564.0',
1344 '70.0.3538.38',
1345 '69.0.3497.114',
1346 '71.0.3563.0',
1347 '71.0.3562.2',
1348 '70.0.3538.37',
1349 '69.0.3497.113',
1350 '70.0.3538.36',
1351 '70.0.3538.35',
1352 '71.0.3562.1',
1353 '71.0.3562.0',
1354 '70.0.3538.34',
1355 '69.0.3497.112',
1356 '70.0.3538.33',
1357 '71.0.3561.1',
1358 '71.0.3561.0',
1359 '70.0.3538.32',
1360 '69.0.3497.111',
1361 '71.0.3559.6',
1362 '71.0.3560.1',
1363 '71.0.3560.0',
1364 '71.0.3559.5',
1365 '71.0.3559.4',
1366 '70.0.3538.31',
1367 '69.0.3497.110',
1368 '71.0.3559.3',
1369 '70.0.3538.30',
1370 '69.0.3497.109',
1371 '71.0.3559.2',
1372 '71.0.3559.1',
1373 '71.0.3559.0',
1374 '70.0.3538.29',
1375 '69.0.3497.108',
1376 '71.0.3558.2',
1377 '71.0.3558.1',
1378 '71.0.3558.0',
1379 '70.0.3538.28',
1380 '69.0.3497.107',
1381 '71.0.3557.2',
1382 '71.0.3557.1',
1383 '71.0.3557.0',
1384 '70.0.3538.27',
1385 '69.0.3497.106',
1386 '71.0.3554.4',
1387 '70.0.3538.26',
1388 '71.0.3556.1',
1389 '71.0.3556.0',
1390 '70.0.3538.25',
1391 '71.0.3554.3',
1392 '69.0.3497.105',
1393 '71.0.3554.2',
1394 '70.0.3538.24',
1395 '69.0.3497.104',
1396 '71.0.3555.2',
1397 '70.0.3538.23',
1398 '71.0.3555.1',
1399 '71.0.3555.0',
1400 '70.0.3538.22',
1401 '69.0.3497.103',
1402 '71.0.3554.1',
1403 '71.0.3554.0',
1404 '70.0.3538.21',
1405 '69.0.3497.102',
1406 '71.0.3553.3',
1407 '70.0.3538.20',
1408 '69.0.3497.101',
1409 '71.0.3553.2',
1410 '69.0.3497.100',
1411 '71.0.3553.1',
1412 '71.0.3553.0',
1413 '70.0.3538.19',
1414 '69.0.3497.99',
1415 '69.0.3497.98',
1416 '69.0.3497.97',
1417 '71.0.3552.6',
1418 '71.0.3552.5',
1419 '71.0.3552.4',
1420 '71.0.3552.3',
1421 '71.0.3552.2',
1422 '71.0.3552.1',
1423 '71.0.3552.0',
1424 '70.0.3538.18',
1425 '69.0.3497.96',
1426 '71.0.3551.3',
1427 '71.0.3551.2',
1428 '71.0.3551.1',
1429 '71.0.3551.0',
1430 '70.0.3538.17',
1431 '69.0.3497.95',
1432 '71.0.3550.3',
1433 '71.0.3550.2',
1434 '71.0.3550.1',
1435 '71.0.3550.0',
1436 '70.0.3538.16',
1437 '69.0.3497.94',
1438 '71.0.3549.1',
1439 '71.0.3549.0',
1440 '70.0.3538.15',
1441 '69.0.3497.93',
1442 '69.0.3497.92',
1443 '71.0.3548.1',
1444 '71.0.3548.0',
1445 '70.0.3538.14',
1446 '69.0.3497.91',
1447 '71.0.3547.1',
1448 '71.0.3547.0',
1449 '70.0.3538.13',
1450 '69.0.3497.90',
1451 '71.0.3546.2',
1452 '69.0.3497.89',
1453 '71.0.3546.1',
1454 '71.0.3546.0',
1455 '70.0.3538.12',
1456 '69.0.3497.88',
1457 '71.0.3545.4',
1458 '71.0.3545.3',
1459 '71.0.3545.2',
1460 '71.0.3545.1',
1461 '71.0.3545.0',
1462 '70.0.3538.11',
1463 '69.0.3497.87',
1464 '71.0.3544.5',
1465 '71.0.3544.4',
1466 '71.0.3544.3',
1467 '71.0.3544.2',
1468 '71.0.3544.1',
1469 '71.0.3544.0',
1470 '69.0.3497.86',
1471 '70.0.3538.10',
1472 '69.0.3497.85',
1473 '70.0.3538.9',
1474 '69.0.3497.84',
1475 '71.0.3543.4',
1476 '70.0.3538.8',
1477 '71.0.3543.3',
1478 '71.0.3543.2',
1479 '71.0.3543.1',
1480 '71.0.3543.0',
1481 '70.0.3538.7',
1482 '69.0.3497.83',
1483 '71.0.3542.2',
1484 '71.0.3542.1',
1485 '71.0.3542.0',
1486 '70.0.3538.6',
1487 '69.0.3497.82',
1488 '69.0.3497.81',
1489 '71.0.3541.1',
1490 '71.0.3541.0',
1491 '70.0.3538.5',
1492 '69.0.3497.80',
1493 '71.0.3540.1',
1494 '71.0.3540.0',
1495 '70.0.3538.4',
1496 '69.0.3497.79',
1497 '70.0.3538.3',
1498 '71.0.3539.1',
1499 '71.0.3539.0',
1500 '69.0.3497.78',
1501 '68.0.3440.134',
1502 '69.0.3497.77',
1503 '70.0.3538.2',
1504 '70.0.3538.1',
1505 '70.0.3538.0',
1506 '69.0.3497.76',
1507 '68.0.3440.133',
1508 '69.0.3497.75',
1509 '70.0.3537.2',
1510 '70.0.3537.1',
1511 '70.0.3537.0',
1512 '69.0.3497.74',
1513 '68.0.3440.132',
1514 '70.0.3536.0',
1515 '70.0.3535.5',
1516 '70.0.3535.4',
1517 '70.0.3535.3',
1518 '69.0.3497.73',
1519 '68.0.3440.131',
1520 '70.0.3532.8',
1521 '70.0.3532.7',
1522 '69.0.3497.72',
1523 '69.0.3497.71',
1524 '70.0.3535.2',
1525 '70.0.3535.1',
1526 '70.0.3535.0',
1527 '69.0.3497.70',
1528 '68.0.3440.130',
1529 '69.0.3497.69',
1530 '68.0.3440.129',
1531 '70.0.3534.4',
1532 '70.0.3534.3',
1533 '70.0.3534.2',
1534 '70.0.3534.1',
1535 '70.0.3534.0',
1536 '69.0.3497.68',
1537 '68.0.3440.128',
1538 '70.0.3533.2',
1539 '70.0.3533.1',
1540 '70.0.3533.0',
1541 '69.0.3497.67',
1542 '68.0.3440.127',
1543 '70.0.3532.6',
1544 '70.0.3532.5',
1545 '70.0.3532.4',
1546 '69.0.3497.66',
1547 '68.0.3440.126',
1548 '70.0.3532.3',
1549 '70.0.3532.2',
1550 '70.0.3532.1',
1551 '69.0.3497.60',
1552 '69.0.3497.65',
1553 '69.0.3497.64',
1554 '70.0.3532.0',
1555 '70.0.3531.0',
1556 '70.0.3530.4',
1557 '70.0.3530.3',
1558 '70.0.3530.2',
1559 '69.0.3497.58',
1560 '68.0.3440.125',
1561 '69.0.3497.57',
1562 '69.0.3497.56',
1563 '69.0.3497.55',
1564 '69.0.3497.54',
1565 '70.0.3530.1',
1566 '70.0.3530.0',
1567 '69.0.3497.53',
1568 '68.0.3440.124',
1569 '69.0.3497.52',
1570 '70.0.3529.3',
1571 '70.0.3529.2',
1572 '70.0.3529.1',
1573 '70.0.3529.0',
1574 '69.0.3497.51',
1575 '70.0.3528.4',
1576 '68.0.3440.123',
1577 '70.0.3528.3',
1578 '70.0.3528.2',
1579 '70.0.3528.1',
1580 '70.0.3528.0',
1581 '69.0.3497.50',
1582 '68.0.3440.122',
1583 '70.0.3527.1',
1584 '70.0.3527.0',
1585 '69.0.3497.49',
1586 '68.0.3440.121',
1587 '70.0.3526.1',
1588 '70.0.3526.0',
1589 '68.0.3440.120',
1590 '69.0.3497.48',
1591 '69.0.3497.47',
1592 '68.0.3440.119',
1593 '68.0.3440.118',
1594 '70.0.3525.5',
1595 '70.0.3525.4',
1596 '70.0.3525.3',
1597 '68.0.3440.117',
1598 '69.0.3497.46',
1599 '70.0.3525.2',
1600 '70.0.3525.1',
1601 '70.0.3525.0',
1602 '69.0.3497.45',
1603 '68.0.3440.116',
1604 '70.0.3524.4',
1605 '70.0.3524.3',
1606 '69.0.3497.44',
1607 '70.0.3524.2',
1608 '70.0.3524.1',
1609 '70.0.3524.0',
1610 '70.0.3523.2',
1611 '69.0.3497.43',
1612 '68.0.3440.115',
1613 '70.0.3505.9',
1614 '69.0.3497.42',
1615 '70.0.3505.8',
1616 '70.0.3523.1',
1617 '70.0.3523.0',
1618 '69.0.3497.41',
1619 '68.0.3440.114',
1620 '70.0.3505.7',
1621 '69.0.3497.40',
1622 '70.0.3522.1',
1623 '70.0.3522.0',
1624 '70.0.3521.2',
1625 '69.0.3497.39',
1626 '68.0.3440.113',
1627 '70.0.3505.6',
1628 '70.0.3521.1',
1629 '70.0.3521.0',
1630 '69.0.3497.38',
1631 '68.0.3440.112',
1632 '70.0.3520.1',
1633 '70.0.3520.0',
1634 '69.0.3497.37',
1635 '68.0.3440.111',
1636 '70.0.3519.3',
1637 '70.0.3519.2',
1638 '70.0.3519.1',
1639 '70.0.3519.0',
1640 '69.0.3497.36',
1641 '68.0.3440.110',
1642 '70.0.3518.1',
1643 '70.0.3518.0',
1644 '69.0.3497.35',
1645 '69.0.3497.34',
1646 '68.0.3440.109',
1647 '70.0.3517.1',
1648 '70.0.3517.0',
1649 '69.0.3497.33',
1650 '68.0.3440.108',
1651 '69.0.3497.32',
1652 '70.0.3516.3',
1653 '70.0.3516.2',
1654 '70.0.3516.1',
1655 '70.0.3516.0',
1656 '69.0.3497.31',
1657 '68.0.3440.107',
1658 '70.0.3515.4',
1659 '68.0.3440.106',
1660 '70.0.3515.3',
1661 '70.0.3515.2',
1662 '70.0.3515.1',
1663 '70.0.3515.0',
1664 '69.0.3497.30',
1665 '68.0.3440.105',
1666 '68.0.3440.104',
1667 '70.0.3514.2',
1668 '70.0.3514.1',
1669 '70.0.3514.0',
1670 '69.0.3497.29',
1671 '68.0.3440.103',
1672 '70.0.3513.1',
1673 '70.0.3513.0',
1674 '69.0.3497.28',
1675 )
1676 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
1677
1678
1679 std_headers = {
1680 'User-Agent': random_user_agent(),
1681 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
1682 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
1683 'Accept-Encoding': 'gzip, deflate',
1684 'Accept-Language': 'en-us,en;q=0.5',
1685 }
1686
1687
1688 USER_AGENTS = {
1689 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
1690 }
1691
1692
1693 NO_DEFAULT = object()
1694
1695 ENGLISH_MONTH_NAMES = [
1696 'January', 'February', 'March', 'April', 'May', 'June',
1697 'July', 'August', 'September', 'October', 'November', 'December']
1698
1699 MONTH_NAMES = {
1700 'en': ENGLISH_MONTH_NAMES,
1701 'fr': [
1702 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
1703 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
1704 }
1705
1706 KNOWN_EXTENSIONS = (
1707 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
1708 'flv', 'f4v', 'f4a', 'f4b',
1709 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
1710 'mkv', 'mka', 'mk3d',
1711 'avi', 'divx',
1712 'mov',
1713 'asf', 'wmv', 'wma',
1714 '3gp', '3g2',
1715 'mp3',
1716 'flac',
1717 'ape',
1718 'wav',
1719 'f4f', 'f4m', 'm3u8', 'smil')
1720
1721 # needed for sanitizing filenames in restricted mode
1722 ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
1723 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
1724 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
1725
1726 DATE_FORMATS = (
1727 '%d %B %Y',
1728 '%d %b %Y',
1729 '%B %d %Y',
1730 '%B %dst %Y',
1731 '%B %dnd %Y',
1732 '%B %drd %Y',
1733 '%B %dth %Y',
1734 '%b %d %Y',
1735 '%b %dst %Y',
1736 '%b %dnd %Y',
1737 '%b %drd %Y',
1738 '%b %dth %Y',
1739 '%b %dst %Y %I:%M',
1740 '%b %dnd %Y %I:%M',
1741 '%b %drd %Y %I:%M',
1742 '%b %dth %Y %I:%M',
1743 '%Y %m %d',
1744 '%Y-%m-%d',
1745 '%Y.%m.%d.',
1746 '%Y/%m/%d',
1747 '%Y/%m/%d %H:%M',
1748 '%Y/%m/%d %H:%M:%S',
1749 '%Y%m%d%H%M',
1750 '%Y%m%d%H%M%S',
1751 '%Y-%m-%d %H:%M',
1752 '%Y-%m-%d %H:%M:%S',
1753 '%Y-%m-%d %H:%M:%S.%f',
1754 '%Y-%m-%d %H:%M:%S:%f',
1755 '%d.%m.%Y %H:%M',
1756 '%d.%m.%Y %H.%M',
1757 '%Y-%m-%dT%H:%M:%SZ',
1758 '%Y-%m-%dT%H:%M:%S.%fZ',
1759 '%Y-%m-%dT%H:%M:%S.%f0Z',
1760 '%Y-%m-%dT%H:%M:%S',
1761 '%Y-%m-%dT%H:%M:%S.%f',
1762 '%Y-%m-%dT%H:%M',
1763 '%b %d %Y at %H:%M',
1764 '%b %d %Y at %H:%M:%S',
1765 '%B %d %Y at %H:%M',
1766 '%B %d %Y at %H:%M:%S',
1767 '%H:%M %d-%b-%Y',
1768 )
1769
1770 DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
1771 DATE_FORMATS_DAY_FIRST.extend([
1772 '%d-%m-%Y',
1773 '%d.%m.%Y',
1774 '%d.%m.%y',
1775 '%d/%m/%Y',
1776 '%d/%m/%y',
1777 '%d/%m/%Y %H:%M:%S',
1778 ])
1779
1780 DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
1781 DATE_FORMATS_MONTH_FIRST.extend([
1782 '%m-%d-%Y',
1783 '%m.%d.%Y',
1784 '%m/%d/%Y',
1785 '%m/%d/%y',
1786 '%m/%d/%Y %H:%M:%S',
1787 ])
1788
1789 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
1790 JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
1791
1792
1793 def preferredencoding():
1794 """Get preferred encoding.
1795
1796 Returns the best encoding scheme for the system, based on
1797 locale.getpreferredencoding() and some further tweaks.
1798 """
1799 try:
1800 pref = locale.getpreferredencoding()
1801 'TEST'.encode(pref)
1802 except Exception:
1803 pref = 'UTF-8'
1804
1805 return pref
1806
1807
1808 def write_json_file(obj, fn):
1809 """ Encode obj as JSON and write it to fn, atomically if possible """
1810
1811 fn = encodeFilename(fn)
1812 if sys.version_info < (3, 0) and sys.platform != 'win32':
1813 encoding = get_filesystem_encoding()
1814 # os.path.basename returns a bytes object, but NamedTemporaryFile
1815 # will fail if the filename contains non ascii characters unless we
1816 # use a unicode object
1817 path_basename = lambda f: os.path.basename(fn).decode(encoding)
1818 # the same for os.path.dirname
1819 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
1820 else:
1821 path_basename = os.path.basename
1822 path_dirname = os.path.dirname
1823
1824 args = {
1825 'suffix': '.tmp',
1826 'prefix': path_basename(fn) + '.',
1827 'dir': path_dirname(fn),
1828 'delete': False,
1829 }
1830
1831 # In Python 2.x, json.dump expects a bytestream.
1832 # In Python 3.x, it writes to a character stream
1833 if sys.version_info < (3, 0):
1834 args['mode'] = 'wb'
1835 else:
1836 args.update({
1837 'mode': 'w',
1838 'encoding': 'utf-8',
1839 })
1840
1841 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
1842
1843 try:
1844 with tf:
1845 json.dump(obj, tf)
1846 if sys.platform == 'win32':
1847 # Need to remove existing file on Windows, else os.rename raises
1848 # WindowsError or FileExistsError.
1849 try:
1850 os.unlink(fn)
1851 except OSError:
1852 pass
1853 try:
1854 mask = os.umask(0)
1855 os.umask(mask)
1856 os.chmod(tf.name, 0o666 & ~mask)
1857 except OSError:
1858 pass
1859 os.rename(tf.name, fn)
1860 except Exception:
1861 try:
1862 os.remove(tf.name)
1863 except OSError:
1864 pass
1865 raise
1866
1867
1868 if sys.version_info >= (2, 7):
1869 def find_xpath_attr(node, xpath, key, val=None):
1870 """ Find the xpath xpath[@key=val] """
1871 assert re.match(r'^[a-zA-Z_-]+$', key)
1872 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
1873 return node.find(expr)
1874 else:
1875 def find_xpath_attr(node, xpath, key, val=None):
1876 for f in node.findall(compat_xpath(xpath)):
1877 if key not in f.attrib:
1878 continue
1879 if val is None or f.attrib.get(key) == val:
1880 return f
1881 return None
1882
1883 # On python2.6 the xml.etree.ElementTree.Element methods don't support
1884 # the namespace parameter
1885
1886
1887 def xpath_with_ns(path, ns_map):
1888 components = [c.split(':') for c in path.split('/')]
1889 replaced = []
1890 for c in components:
1891 if len(c) == 1:
1892 replaced.append(c[0])
1893 else:
1894 ns, tag = c
1895 replaced.append('{%s}%s' % (ns_map[ns], tag))
1896 return '/'.join(replaced)
1897
1898
1899 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
1900 def _find_xpath(xpath):
1901 return node.find(compat_xpath(xpath))
1902
1903 if isinstance(xpath, (str, compat_str)):
1904 n = _find_xpath(xpath)
1905 else:
1906 for xp in xpath:
1907 n = _find_xpath(xp)
1908 if n is not None:
1909 break
1910
1911 if n is None:
1912 if default is not NO_DEFAULT:
1913 return default
1914 elif fatal:
1915 name = xpath if name is None else name
1916 raise ExtractorError('Could not find XML element %s' % name)
1917 else:
1918 return None
1919 return n
1920
1921
1922 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
1923 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
1924 if n is None or n == default:
1925 return n
1926 if n.text is None:
1927 if default is not NO_DEFAULT:
1928 return default
1929 elif fatal:
1930 name = xpath if name is None else name
1931 raise ExtractorError('Could not find XML element\'s text %s' % name)
1932 else:
1933 return None
1934 return n.text
1935
1936
1937 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
1938 n = find_xpath_attr(node, xpath, key)
1939 if n is None:
1940 if default is not NO_DEFAULT:
1941 return default
1942 elif fatal:
1943 name = '%s[@%s]' % (xpath, key) if name is None else name
1944 raise ExtractorError('Could not find XML attribute %s' % name)
1945 else:
1946 return None
1947 return n.attrib[key]
1948
1949
1950 def get_element_by_id(id, html):
1951 """Return the content of the tag with the specified ID in the passed HTML document"""
1952 return get_element_by_attribute('id', id, html)
1953
1954
1955 def get_element_by_class(class_name, html):
1956 """Return the content of the first tag with the specified class in the passed HTML document"""
1957 retval = get_elements_by_class(class_name, html)
1958 return retval[0] if retval else None
1959
1960
1961 def get_element_by_attribute(attribute, value, html, escape_value=True):
1962 retval = get_elements_by_attribute(attribute, value, html, escape_value)
1963 return retval[0] if retval else None
1964
1965
1966 def get_elements_by_class(class_name, html):
1967 """Return the content of all tags with the specified class in the passed HTML document as a list"""
1968 return get_elements_by_attribute(
1969 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
1970 html, escape_value=False)
1971
1972
1973 def get_elements_by_attribute(attribute, value, html, escape_value=True):
1974 """Return the content of the tag with the specified attribute in the passed HTML document"""
1975
1976 value = re.escape(value) if escape_value else value
1977
1978 retlist = []
1979 for m in re.finditer(r'''(?xs)
1980 <([a-zA-Z0-9:._-]+)
1981 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
1982 \s+%s=['"]?%s['"]?
1983 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
1984 \s*>
1985 (?P<content>.*?)
1986 </\1>
1987 ''' % (re.escape(attribute), value), html):
1988 res = m.group('content')
1989
1990 if res.startswith('"') or res.startswith("'"):
1991 res = res[1:-1]
1992
1993 retlist.append(unescapeHTML(res))
1994
1995 return retlist
1996
1997
1998 class HTMLAttributeParser(compat_HTMLParser):
1999 """Trivial HTML parser to gather the attributes for a single element"""
2000
2001 def __init__(self):
2002 self.attrs = {}
2003 compat_HTMLParser.__init__(self)
2004
2005 def handle_starttag(self, tag, attrs):
2006 self.attrs = dict(attrs)
2007
2008
2009 def extract_attributes(html_element):
2010 """Given a string for an HTML element such as
2011 <el
2012 a="foo" B="bar" c="&98;az" d=boz
2013 empty= noval entity="&amp;"
2014 sq='"' dq="'"
2015 >
2016 Decode and return a dictionary of attributes.
2017 {
2018 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
2019 'empty': '', 'noval': None, 'entity': '&',
2020 'sq': '"', 'dq': '\''
2021 }.
2022 NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
2023 but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
2024 """
2025 parser = HTMLAttributeParser()
2026 try:
2027 parser.feed(html_element)
2028 parser.close()
2029 # Older Python may throw HTMLParseError in case of malformed HTML
2030 except compat_HTMLParseError:
2031 pass
2032 return parser.attrs
2033
2034
2035 def clean_html(html):
2036 """Clean an HTML snippet into a readable string"""
2037
2038 if html is None: # Convenience for sanitizing descriptions etc.
2039 return html
2040
2041 # Newline vs <br />
2042 html = html.replace('\n', ' ')
2043 html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
2044 html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
2045 # Strip html tags
2046 html = re.sub('<.*?>', '', html)
2047 # Replace html entities
2048 html = unescapeHTML(html)
2049 return html.strip()
2050
2051
2052 def sanitize_open(filename, open_mode):
2053 """Try to open the given filename, and slightly tweak it if this fails.
2054
2055 Attempts to open the given filename. If this fails, it tries to change
2056 the filename slightly, step by step, until it's either able to open it
2057 or it fails and raises a final exception, like the standard open()
2058 function.
2059
2060 It returns the tuple (stream, definitive_file_name).
2061 """
2062 try:
2063 if filename == '-':
2064 if sys.platform == 'win32':
2065 import msvcrt
2066 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2067 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
2068 stream = open(encodeFilename(filename), open_mode)
2069 return (stream, filename)
2070 except (IOError, OSError) as err:
2071 if err.errno in (errno.EACCES,):
2072 raise
2073
2074 # In case of error, try to remove win32 forbidden chars
2075 alt_filename = sanitize_path(filename)
2076 if alt_filename == filename:
2077 raise
2078 else:
2079 # An exception here should be caught in the caller
2080 stream = open(encodeFilename(alt_filename), open_mode)
2081 return (stream, alt_filename)
2082
2083
2084 def timeconvert(timestr):
2085 """Convert RFC 2822 defined time string into system timestamp"""
2086 timestamp = None
2087 timetuple = email.utils.parsedate_tz(timestr)
2088 if timetuple is not None:
2089 timestamp = email.utils.mktime_tz(timetuple)
2090 return timestamp
2091
2092
2093 def sanitize_filename(s, restricted=False, is_id=False):
2094 """Sanitizes a string so it could be used as part of a filename.
2095 If restricted is set, use a stricter subset of allowed characters.
2096 Set is_id if this is not an arbitrary string, but an ID that should be kept
2097 if possible.
2098 """
2099 def replace_insane(char):
2100 if restricted and char in ACCENT_CHARS:
2101 return ACCENT_CHARS[char]
2102 elif not restricted and char == '\n':
2103 return ' '
2104 elif char == '?' or ord(char) < 32 or ord(char) == 127:
2105 return ''
2106 elif char == '"':
2107 return '' if restricted else '\''
2108 elif char == ':':
2109 return '_-' if restricted else ' -'
2110 elif char in '\\/|*<>':
2111 return '_'
2112 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
2113 return '_'
2114 if restricted and ord(char) > 127:
2115 return '_'
2116 return char
2117
2118 if s == '':
2119 return ''
2120 # Handle timestamps
2121 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
2122 result = ''.join(map(replace_insane, s))
2123 if not is_id:
2124 while '__' in result:
2125 result = result.replace('__', '_')
2126 result = result.strip('_')
2127 # Common case of "Foreign band name - English song title"
2128 if restricted and result.startswith('-_'):
2129 result = result[2:]
2130 if result.startswith('-'):
2131 result = '_' + result[len('-'):]
2132 result = result.lstrip('.')
2133 if not result:
2134 result = '_'
2135 return result
2136
2137
2138 def sanitize_path(s, force=False):
2139 """Sanitizes and normalizes path on Windows"""
2140 if sys.platform == 'win32':
2141 force = False
2142 drive_or_unc, _ = os.path.splitdrive(s)
2143 if sys.version_info < (2, 7) and not drive_or_unc:
2144 drive_or_unc, _ = os.path.splitunc(s)
2145 elif force:
2146 drive_or_unc = ''
2147 else:
2148 return s
2149
2150 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
2151 if drive_or_unc:
2152 norm_path.pop(0)
2153 sanitized_path = [
2154 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
2155 for path_part in norm_path]
2156 if drive_or_unc:
2157 sanitized_path.insert(0, drive_or_unc + os.path.sep)
2158 elif force and s[0] == os.path.sep:
2159 sanitized_path.insert(0, os.path.sep)
2160 return os.path.join(*sanitized_path)
2161
2162
2163 def sanitize_url(url):
2164 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
2165 # the number of unwanted failures due to missing protocol
2166 if url.startswith('//'):
2167 return 'http:%s' % url
2168 # Fix some common typos seen so far
2169 COMMON_TYPOS = (
2170 # https://github.com/ytdl-org/youtube-dl/issues/15649
2171 (r'^httpss://', r'https://'),
2172 # https://bx1.be/lives/direct-tv/
2173 (r'^rmtp([es]?)://', r'rtmp\1://'),
2174 )
2175 for mistake, fixup in COMMON_TYPOS:
2176 if re.match(mistake, url):
2177 return re.sub(mistake, fixup, url)
2178 return url
2179
2180
2181 def extract_basic_auth(url):
2182 parts = compat_urlparse.urlsplit(url)
2183 if parts.username is None:
2184 return url, None
2185 url = compat_urlparse.urlunsplit(parts._replace(netloc=(
2186 parts.hostname if parts.port is None
2187 else '%s:%d' % (parts.hostname, parts.port))))
2188 auth_payload = base64.b64encode(
2189 ('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
2190 return url, 'Basic ' + auth_payload.decode('utf-8')
2191
2192
2193 def sanitized_Request(url, *args, **kwargs):
2194 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
2195 if auth_header is not None:
2196 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
2197 headers['Authorization'] = auth_header
2198 return compat_urllib_request.Request(url, *args, **kwargs)
2199
2200
2201 def expand_path(s):
2202 """Expand shell variables and ~"""
2203 return os.path.expandvars(compat_expanduser(s))
2204
2205
2206 def orderedSet(iterable):
2207 """ Remove all duplicates from the input iterable """
2208 res = []
2209 for el in iterable:
2210 if el not in res:
2211 res.append(el)
2212 return res
2213
2214
2215 def _htmlentity_transform(entity_with_semicolon):
2216 """Transforms an HTML entity to a character."""
2217 entity = entity_with_semicolon[:-1]
2218
2219 # Known non-numeric HTML entity
2220 if entity in compat_html_entities.name2codepoint:
2221 return compat_chr(compat_html_entities.name2codepoint[entity])
2222
2223 # TODO: HTML5 allows entities without a semicolon. For example,
2224 # '&Eacuteric' should be decoded as 'Éric'.
2225 if entity_with_semicolon in compat_html_entities_html5:
2226 return compat_html_entities_html5[entity_with_semicolon]
2227
2228 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
2229 if mobj is not None:
2230 numstr = mobj.group(1)
2231 if numstr.startswith('x'):
2232 base = 16
2233 numstr = '0%s' % numstr
2234 else:
2235 base = 10
2236 # See https://github.com/ytdl-org/youtube-dl/issues/7518
2237 try:
2238 return compat_chr(int(numstr, base))
2239 except ValueError:
2240 pass
2241
2242 # Unknown entity in name, return its literal representation
2243 return '&%s;' % entity
2244
2245
2246 def unescapeHTML(s):
2247 if s is None:
2248 return None
2249 assert type(s) == compat_str
2250
2251 return re.sub(
2252 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
2253
2254
2255 def escapeHTML(text):
2256 return (
2257 text
2258 .replace('&', '&amp;')
2259 .replace('<', '&lt;')
2260 .replace('>', '&gt;')
2261 .replace('"', '&quot;')
2262 .replace("'", '&#39;')
2263 )
2264
2265
2266 def process_communicate_or_kill(p, *args, **kwargs):
2267 try:
2268 return p.communicate(*args, **kwargs)
2269 except BaseException: # Including KeyboardInterrupt
2270 p.kill()
2271 p.wait()
2272 raise
2273
2274
2275 def get_subprocess_encoding():
2276 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
2277 # For subprocess calls, encode with locale encoding
2278 # Refer to http://stackoverflow.com/a/9951851/35070
2279 encoding = preferredencoding()
2280 else:
2281 encoding = sys.getfilesystemencoding()
2282 if encoding is None:
2283 encoding = 'utf-8'
2284 return encoding
2285
2286
2287 def encodeFilename(s, for_subprocess=False):
2288 """
2289 @param s The name of the file
2290 """
2291
2292 assert type(s) == compat_str
2293
2294 # Python 3 has a Unicode API
2295 if sys.version_info >= (3, 0):
2296 return s
2297
2298 # Pass '' directly to use Unicode APIs on Windows 2000 and up
2299 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
2300 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
2301 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
2302 return s
2303
2304 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
2305 if sys.platform.startswith('java'):
2306 return s
2307
2308 return s.encode(get_subprocess_encoding(), 'ignore')
2309
2310
2311 def decodeFilename(b, for_subprocess=False):
2312
2313 if sys.version_info >= (3, 0):
2314 return b
2315
2316 if not isinstance(b, bytes):
2317 return b
2318
2319 return b.decode(get_subprocess_encoding(), 'ignore')
2320
2321
2322 def encodeArgument(s):
2323 if not isinstance(s, compat_str):
2324 # Legacy code that uses byte strings
2325 # Uncomment the following line after fixing all post processors
2326 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
2327 s = s.decode('ascii')
2328 return encodeFilename(s, True)
2329
2330
2331 def decodeArgument(b):
2332 return decodeFilename(b, True)
2333
2334
2335 def decodeOption(optval):
2336 if optval is None:
2337 return optval
2338 if isinstance(optval, bytes):
2339 optval = optval.decode(preferredencoding())
2340
2341 assert isinstance(optval, compat_str)
2342 return optval
2343
2344
2345 def formatSeconds(secs, delim=':', msec=False):
2346 if secs > 3600:
2347 ret = '%d%s%02d%s%02d' % (secs // 3600, delim, (secs % 3600) // 60, delim, secs % 60)
2348 elif secs > 60:
2349 ret = '%d%s%02d' % (secs // 60, delim, secs % 60)
2350 else:
2351 ret = '%d' % secs
2352 return '%s.%03d' % (ret, secs % 1) if msec else ret
2353
2354
2355 def _ssl_load_windows_store_certs(ssl_context, storename):
2356 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
2357 try:
2358 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
2359 if encoding == 'x509_asn' and (
2360 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
2361 except PermissionError:
2362 return
2363 for cert in certs:
2364 try:
2365 ssl_context.load_verify_locations(cadata=cert)
2366 except ssl.SSLError:
2367 pass
2368
2369
2370 def make_HTTPS_handler(params, **kwargs):
2371 opts_check_certificate = not params.get('nocheckcertificate')
2372 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
2373 context.check_hostname = opts_check_certificate
2374 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
2375 if opts_check_certificate:
2376 # Work around the issue in load_default_certs when there are bad certificates. See:
2377 # https://github.com/yt-dlp/yt-dlp/issues/1060,
2378 # https://bugs.python.org/issue35665, https://bugs.python.org/issue4531
2379 if sys.platform == 'win32':
2380 for storename in ('CA', 'ROOT'):
2381 _ssl_load_windows_store_certs(context, storename)
2382 context.set_default_verify_paths()
2383 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
2384
2385
2386 def bug_reports_message(before=';'):
2387 if ytdl_is_updateable():
2388 update_cmd = 'type yt-dlp -U to update'
2389 else:
2390 update_cmd = 'see https://github.com/yt-dlp/yt-dlp on how to update'
2391 msg = 'please report this issue on https://github.com/yt-dlp/yt-dlp .'
2392 msg += ' Make sure you are using the latest version; %s.' % update_cmd
2393 msg += ' Be sure to call yt-dlp with the --verbose flag and include its complete output.'
2394
2395 before = before.rstrip()
2396 if not before or before.endswith(('.', '!', '?')):
2397 msg = msg[0].title() + msg[1:]
2398
2399 return (before + ' ' if before else '') + msg
2400
2401
2402 class YoutubeDLError(Exception):
2403 """Base exception for YoutubeDL errors."""
2404 pass
2405
2406
2407 network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
2408 if hasattr(ssl, 'CertificateError'):
2409 network_exceptions.append(ssl.CertificateError)
2410 network_exceptions = tuple(network_exceptions)
2411
2412
2413 class ExtractorError(YoutubeDLError):
2414 """Error during info extraction."""
2415
2416 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
2417 """ tb, if given, is the original traceback (so that it can be printed out).
2418 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
2419 """
2420 if sys.exc_info()[0] in network_exceptions:
2421 expected = True
2422
2423 self.msg = str(msg)
2424 self.traceback = tb
2425 self.expected = expected
2426 self.cause = cause
2427 self.video_id = video_id
2428 self.ie = ie
2429 self.exc_info = sys.exc_info() # preserve original exception
2430
2431 super(ExtractorError, self).__init__(''.join((
2432 format_field(ie, template='[%s] '),
2433 format_field(video_id, template='%s: '),
2434 self.msg,
2435 format_field(cause, template=' (caused by %r)'),
2436 '' if expected else bug_reports_message())))
2437
2438 def format_traceback(self):
2439 if self.traceback is None:
2440 return None
2441 return ''.join(traceback.format_tb(self.traceback))
2442
2443
2444 class UnsupportedError(ExtractorError):
2445 def __init__(self, url):
2446 super(UnsupportedError, self).__init__(
2447 'Unsupported URL: %s' % url, expected=True)
2448 self.url = url
2449
2450
2451 class RegexNotFoundError(ExtractorError):
2452 """Error when a regex didn't match"""
2453 pass
2454
2455
2456 class GeoRestrictedError(ExtractorError):
2457 """Geographic restriction Error exception.
2458
2459 This exception may be thrown when a video is not available from your
2460 geographic location due to geographic restrictions imposed by a website.
2461 """
2462
2463 def __init__(self, msg, countries=None):
2464 super(GeoRestrictedError, self).__init__(msg, expected=True)
2465 self.msg = msg
2466 self.countries = countries
2467
2468
2469 class DownloadError(YoutubeDLError):
2470 """Download Error exception.
2471
2472 This exception may be thrown by FileDownloader objects if they are not
2473 configured to continue on errors. They will contain the appropriate
2474 error message.
2475 """
2476
2477 def __init__(self, msg, exc_info=None):
2478 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
2479 super(DownloadError, self).__init__(msg)
2480 self.exc_info = exc_info
2481
2482
2483 class EntryNotInPlaylist(YoutubeDLError):
2484 """Entry not in playlist exception.
2485
2486 This exception will be thrown by YoutubeDL when a requested entry
2487 is not found in the playlist info_dict
2488 """
2489 pass
2490
2491
2492 class SameFileError(YoutubeDLError):
2493 """Same File exception.
2494
2495 This exception will be thrown by FileDownloader objects if they detect
2496 multiple files would have to be downloaded to the same file on disk.
2497 """
2498 pass
2499
2500
2501 class PostProcessingError(YoutubeDLError):
2502 """Post Processing exception.
2503
2504 This exception may be raised by PostProcessor's .run() method to
2505 indicate an error in the postprocessing task.
2506 """
2507
2508 def __init__(self, msg):
2509 super(PostProcessingError, self).__init__(msg)
2510 self.msg = msg
2511
2512
2513 class ExistingVideoReached(YoutubeDLError):
2514 """ --max-downloads limit has been reached. """
2515 pass
2516
2517
2518 class RejectedVideoReached(YoutubeDLError):
2519 """ --max-downloads limit has been reached. """
2520 pass
2521
2522
2523 class ThrottledDownload(YoutubeDLError):
2524 """ Download speed below --throttled-rate. """
2525 pass
2526
2527
2528 class MaxDownloadsReached(YoutubeDLError):
2529 """ --max-downloads limit has been reached. """
2530 pass
2531
2532
2533 class UnavailableVideoError(YoutubeDLError):
2534 """Unavailable Format exception.
2535
2536 This exception will be thrown when a video is requested
2537 in a format that is not available for that video.
2538 """
2539 pass
2540
2541
2542 class ContentTooShortError(YoutubeDLError):
2543 """Content Too Short exception.
2544
2545 This exception may be raised by FileDownloader objects when a file they
2546 download is too small for what the server announced first, indicating
2547 the connection was probably interrupted.
2548 """
2549
2550 def __init__(self, downloaded, expected):
2551 super(ContentTooShortError, self).__init__(
2552 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
2553 )
2554 # Both in bytes
2555 self.downloaded = downloaded
2556 self.expected = expected
2557
2558
2559 class XAttrMetadataError(YoutubeDLError):
2560 def __init__(self, code=None, msg='Unknown error'):
2561 super(XAttrMetadataError, self).__init__(msg)
2562 self.code = code
2563 self.msg = msg
2564
2565 # Parsing code and msg
2566 if (self.code in (errno.ENOSPC, errno.EDQUOT)
2567 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
2568 self.reason = 'NO_SPACE'
2569 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
2570 self.reason = 'VALUE_TOO_LONG'
2571 else:
2572 self.reason = 'NOT_SUPPORTED'
2573
2574
2575 class XAttrUnavailableError(YoutubeDLError):
2576 pass
2577
2578
2579 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
2580 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
2581 # expected HTTP responses to meet HTTP/1.0 or later (see also
2582 # https://github.com/ytdl-org/youtube-dl/issues/6727)
2583 if sys.version_info < (3, 0):
2584 kwargs['strict'] = True
2585 hc = http_class(*args, **compat_kwargs(kwargs))
2586 source_address = ydl_handler._params.get('source_address')
2587
2588 if source_address is not None:
2589 # This is to workaround _create_connection() from socket where it will try all
2590 # address data from getaddrinfo() including IPv6. This filters the result from
2591 # getaddrinfo() based on the source_address value.
2592 # This is based on the cpython socket.create_connection() function.
2593 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
2594 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
2595 host, port = address
2596 err = None
2597 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
2598 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
2599 ip_addrs = [addr for addr in addrs if addr[0] == af]
2600 if addrs and not ip_addrs:
2601 ip_version = 'v4' if af == socket.AF_INET else 'v6'
2602 raise socket.error(
2603 "No remote IP%s addresses available for connect, can't use '%s' as source address"
2604 % (ip_version, source_address[0]))
2605 for res in ip_addrs:
2606 af, socktype, proto, canonname, sa = res
2607 sock = None
2608 try:
2609 sock = socket.socket(af, socktype, proto)
2610 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
2611 sock.settimeout(timeout)
2612 sock.bind(source_address)
2613 sock.connect(sa)
2614 err = None # Explicitly break reference cycle
2615 return sock
2616 except socket.error as _:
2617 err = _
2618 if sock is not None:
2619 sock.close()
2620 if err is not None:
2621 raise err
2622 else:
2623 raise socket.error('getaddrinfo returns an empty list')
2624 if hasattr(hc, '_create_connection'):
2625 hc._create_connection = _create_connection
2626 sa = (source_address, 0)
2627 if hasattr(hc, 'source_address'): # Python 2.7+
2628 hc.source_address = sa
2629 else: # Python 2.6
2630 def _hc_connect(self, *args, **kwargs):
2631 sock = _create_connection(
2632 (self.host, self.port), self.timeout, sa)
2633 if is_https:
2634 self.sock = ssl.wrap_socket(
2635 sock, self.key_file, self.cert_file,
2636 ssl_version=ssl.PROTOCOL_TLSv1)
2637 else:
2638 self.sock = sock
2639 hc.connect = functools.partial(_hc_connect, hc)
2640
2641 return hc
2642
2643
2644 def handle_youtubedl_headers(headers):
2645 filtered_headers = headers
2646
2647 if 'Youtubedl-no-compression' in filtered_headers:
2648 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
2649 del filtered_headers['Youtubedl-no-compression']
2650
2651 return filtered_headers
2652
2653
2654 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
2655 """Handler for HTTP requests and responses.
2656
2657 This class, when installed with an OpenerDirector, automatically adds
2658 the standard headers to every HTTP request and handles gzipped and
2659 deflated responses from web servers. If compression is to be avoided in
2660 a particular request, the original request in the program code only has
2661 to include the HTTP header "Youtubedl-no-compression", which will be
2662 removed before making the real request.
2663
2664 Part of this code was copied from:
2665
2666 http://techknack.net/python-urllib2-handlers/
2667
2668 Andrew Rowls, the author of that code, agreed to release it to the
2669 public domain.
2670 """
2671
2672 def __init__(self, params, *args, **kwargs):
2673 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
2674 self._params = params
2675
2676 def http_open(self, req):
2677 conn_class = compat_http_client.HTTPConnection
2678
2679 socks_proxy = req.headers.get('Ytdl-socks-proxy')
2680 if socks_proxy:
2681 conn_class = make_socks_conn_class(conn_class, socks_proxy)
2682 del req.headers['Ytdl-socks-proxy']
2683
2684 return self.do_open(functools.partial(
2685 _create_http_connection, self, conn_class, False),
2686 req)
2687
2688 @staticmethod
2689 def deflate(data):
2690 if not data:
2691 return data
2692 try:
2693 return zlib.decompress(data, -zlib.MAX_WBITS)
2694 except zlib.error:
2695 return zlib.decompress(data)
2696
2697 def http_request(self, req):
2698 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
2699 # always respected by websites, some tend to give out URLs with non percent-encoded
2700 # non-ASCII characters (see telemb.py, ard.py [#3412])
2701 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
2702 # To work around aforementioned issue we will replace request's original URL with
2703 # percent-encoded one
2704 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
2705 # the code of this workaround has been moved here from YoutubeDL.urlopen()
2706 url = req.get_full_url()
2707 url_escaped = escape_url(url)
2708
2709 # Substitute URL if any change after escaping
2710 if url != url_escaped:
2711 req = update_Request(req, url=url_escaped)
2712
2713 for h, v in std_headers.items():
2714 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
2715 # The dict keys are capitalized because of this bug by urllib
2716 if h.capitalize() not in req.headers:
2717 req.add_header(h, v)
2718
2719 req.headers = handle_youtubedl_headers(req.headers)
2720
2721 if sys.version_info < (2, 7) and '#' in req.get_full_url():
2722 # Python 2.6 is brain-dead when it comes to fragments
2723 req._Request__original = req._Request__original.partition('#')[0]
2724 req._Request__r_type = req._Request__r_type.partition('#')[0]
2725
2726 return req
2727
2728 def http_response(self, req, resp):
2729 old_resp = resp
2730 # gzip
2731 if resp.headers.get('Content-encoding', '') == 'gzip':
2732 content = resp.read()
2733 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
2734 try:
2735 uncompressed = io.BytesIO(gz.read())
2736 except IOError as original_ioerror:
2737 # There may be junk add the end of the file
2738 # See http://stackoverflow.com/q/4928560/35070 for details
2739 for i in range(1, 1024):
2740 try:
2741 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
2742 uncompressed = io.BytesIO(gz.read())
2743 except IOError:
2744 continue
2745 break
2746 else:
2747 raise original_ioerror
2748 resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
2749 resp.msg = old_resp.msg
2750 del resp.headers['Content-encoding']
2751 # deflate
2752 if resp.headers.get('Content-encoding', '') == 'deflate':
2753 gz = io.BytesIO(self.deflate(resp.read()))
2754 resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
2755 resp.msg = old_resp.msg
2756 del resp.headers['Content-encoding']
2757 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
2758 # https://github.com/ytdl-org/youtube-dl/issues/6457).
2759 if 300 <= resp.code < 400:
2760 location = resp.headers.get('Location')
2761 if location:
2762 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
2763 if sys.version_info >= (3, 0):
2764 location = location.encode('iso-8859-1').decode('utf-8')
2765 else:
2766 location = location.decode('utf-8')
2767 location_escaped = escape_url(location)
2768 if location != location_escaped:
2769 del resp.headers['Location']
2770 if sys.version_info < (3, 0):
2771 location_escaped = location_escaped.encode('utf-8')
2772 resp.headers['Location'] = location_escaped
2773 return resp
2774
2775 https_request = http_request
2776 https_response = http_response
2777
2778
2779 def make_socks_conn_class(base_class, socks_proxy):
2780 assert issubclass(base_class, (
2781 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
2782
2783 url_components = compat_urlparse.urlparse(socks_proxy)
2784 if url_components.scheme.lower() == 'socks5':
2785 socks_type = ProxyType.SOCKS5
2786 elif url_components.scheme.lower() in ('socks', 'socks4'):
2787 socks_type = ProxyType.SOCKS4
2788 elif url_components.scheme.lower() == 'socks4a':
2789 socks_type = ProxyType.SOCKS4A
2790
2791 def unquote_if_non_empty(s):
2792 if not s:
2793 return s
2794 return compat_urllib_parse_unquote_plus(s)
2795
2796 proxy_args = (
2797 socks_type,
2798 url_components.hostname, url_components.port or 1080,
2799 True, # Remote DNS
2800 unquote_if_non_empty(url_components.username),
2801 unquote_if_non_empty(url_components.password),
2802 )
2803
2804 class SocksConnection(base_class):
2805 def connect(self):
2806 self.sock = sockssocket()
2807 self.sock.setproxy(*proxy_args)
2808 if type(self.timeout) in (int, float):
2809 self.sock.settimeout(self.timeout)
2810 self.sock.connect((self.host, self.port))
2811
2812 if isinstance(self, compat_http_client.HTTPSConnection):
2813 if hasattr(self, '_context'): # Python > 2.6
2814 self.sock = self._context.wrap_socket(
2815 self.sock, server_hostname=self.host)
2816 else:
2817 self.sock = ssl.wrap_socket(self.sock)
2818
2819 return SocksConnection
2820
2821
2822 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
2823 def __init__(self, params, https_conn_class=None, *args, **kwargs):
2824 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
2825 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
2826 self._params = params
2827
2828 def https_open(self, req):
2829 kwargs = {}
2830 conn_class = self._https_conn_class
2831
2832 if hasattr(self, '_context'): # python > 2.6
2833 kwargs['context'] = self._context
2834 if hasattr(self, '_check_hostname'): # python 3.x
2835 kwargs['check_hostname'] = self._check_hostname
2836
2837 socks_proxy = req.headers.get('Ytdl-socks-proxy')
2838 if socks_proxy:
2839 conn_class = make_socks_conn_class(conn_class, socks_proxy)
2840 del req.headers['Ytdl-socks-proxy']
2841
2842 return self.do_open(functools.partial(
2843 _create_http_connection, self, conn_class, True),
2844 req, **kwargs)
2845
2846
2847 class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
2848 """
2849 See [1] for cookie file format.
2850
2851 1. https://curl.haxx.se/docs/http-cookies.html
2852 """
2853 _HTTPONLY_PREFIX = '#HttpOnly_'
2854 _ENTRY_LEN = 7
2855 _HEADER = '''# Netscape HTTP Cookie File
2856 # This file is generated by yt-dlp. Do not edit.
2857
2858 '''
2859 _CookieFileEntry = collections.namedtuple(
2860 'CookieFileEntry',
2861 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
2862
2863 def save(self, filename=None, ignore_discard=False, ignore_expires=False):
2864 """
2865 Save cookies to a file.
2866
2867 Most of the code is taken from CPython 3.8 and slightly adapted
2868 to support cookie files with UTF-8 in both python 2 and 3.
2869 """
2870 if filename is None:
2871 if self.filename is not None:
2872 filename = self.filename
2873 else:
2874 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
2875
2876 # Store session cookies with `expires` set to 0 instead of an empty
2877 # string
2878 for cookie in self:
2879 if cookie.expires is None:
2880 cookie.expires = 0
2881
2882 with io.open(filename, 'w', encoding='utf-8') as f:
2883 f.write(self._HEADER)
2884 now = time.time()
2885 for cookie in self:
2886 if not ignore_discard and cookie.discard:
2887 continue
2888 if not ignore_expires and cookie.is_expired(now):
2889 continue
2890 if cookie.secure:
2891 secure = 'TRUE'
2892 else:
2893 secure = 'FALSE'
2894 if cookie.domain.startswith('.'):
2895 initial_dot = 'TRUE'
2896 else:
2897 initial_dot = 'FALSE'
2898 if cookie.expires is not None:
2899 expires = compat_str(cookie.expires)
2900 else:
2901 expires = ''
2902 if cookie.value is None:
2903 # cookies.txt regards 'Set-Cookie: foo' as a cookie
2904 # with no name, whereas http.cookiejar regards it as a
2905 # cookie with no value.
2906 name = ''
2907 value = cookie.name
2908 else:
2909 name = cookie.name
2910 value = cookie.value
2911 f.write(
2912 '\t'.join([cookie.domain, initial_dot, cookie.path,
2913 secure, expires, name, value]) + '\n')
2914
2915 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
2916 """Load cookies from a file."""
2917 if filename is None:
2918 if self.filename is not None:
2919 filename = self.filename
2920 else:
2921 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
2922
2923 def prepare_line(line):
2924 if line.startswith(self._HTTPONLY_PREFIX):
2925 line = line[len(self._HTTPONLY_PREFIX):]
2926 # comments and empty lines are fine
2927 if line.startswith('#') or not line.strip():
2928 return line
2929 cookie_list = line.split('\t')
2930 if len(cookie_list) != self._ENTRY_LEN:
2931 raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
2932 cookie = self._CookieFileEntry(*cookie_list)
2933 if cookie.expires_at and not cookie.expires_at.isdigit():
2934 raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
2935 return line
2936
2937 cf = io.StringIO()
2938 with io.open(filename, encoding='utf-8') as f:
2939 for line in f:
2940 try:
2941 cf.write(prepare_line(line))
2942 except compat_cookiejar.LoadError as e:
2943 write_string(
2944 'WARNING: skipping cookie file entry due to %s: %r\n'
2945 % (e, line), sys.stderr)
2946 continue
2947 cf.seek(0)
2948 self._really_load(cf, filename, ignore_discard, ignore_expires)
2949 # Session cookies are denoted by either `expires` field set to
2950 # an empty string or 0. MozillaCookieJar only recognizes the former
2951 # (see [1]). So we need force the latter to be recognized as session
2952 # cookies on our own.
2953 # Session cookies may be important for cookies-based authentication,
2954 # e.g. usually, when user does not check 'Remember me' check box while
2955 # logging in on a site, some important cookies are stored as session
2956 # cookies so that not recognizing them will result in failed login.
2957 # 1. https://bugs.python.org/issue17164
2958 for cookie in self:
2959 # Treat `expires=0` cookies as session cookies
2960 if cookie.expires == 0:
2961 cookie.expires = None
2962 cookie.discard = True
2963
2964
2965 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
2966 def __init__(self, cookiejar=None):
2967 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
2968
2969 def http_response(self, request, response):
2970 # Python 2 will choke on next HTTP request in row if there are non-ASCII
2971 # characters in Set-Cookie HTTP header of last response (see
2972 # https://github.com/ytdl-org/youtube-dl/issues/6769).
2973 # In order to at least prevent crashing we will percent encode Set-Cookie
2974 # header before HTTPCookieProcessor starts processing it.
2975 # if sys.version_info < (3, 0) and response.headers:
2976 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
2977 # set_cookie = response.headers.get(set_cookie_header)
2978 # if set_cookie:
2979 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
2980 # if set_cookie != set_cookie_escaped:
2981 # del response.headers[set_cookie_header]
2982 # response.headers[set_cookie_header] = set_cookie_escaped
2983 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
2984
2985 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
2986 https_response = http_response
2987
2988
2989 class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
2990 """YoutubeDL redirect handler
2991
2992 The code is based on HTTPRedirectHandler implementation from CPython [1].
2993
2994 This redirect handler solves two issues:
2995 - ensures redirect URL is always unicode under python 2
2996 - introduces support for experimental HTTP response status code
2997 308 Permanent Redirect [2] used by some sites [3]
2998
2999 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
3000 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
3001 3. https://github.com/ytdl-org/youtube-dl/issues/28768
3002 """
3003
3004 http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
3005
3006 def redirect_request(self, req, fp, code, msg, headers, newurl):
3007 """Return a Request or None in response to a redirect.
3008
3009 This is called by the http_error_30x methods when a
3010 redirection response is received. If a redirection should
3011 take place, return a new Request to allow http_error_30x to
3012 perform the redirect. Otherwise, raise HTTPError if no-one
3013 else should try to handle this url. Return None if you can't
3014 but another Handler might.
3015 """
3016 m = req.get_method()
3017 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
3018 or code in (301, 302, 303) and m == "POST")):
3019 raise compat_HTTPError(req.full_url, code, msg, headers, fp)
3020 # Strictly (according to RFC 2616), 301 or 302 in response to
3021 # a POST MUST NOT cause a redirection without confirmation
3022 # from the user (of urllib.request, in this case). In practice,
3023 # essentially all clients do redirect in this case, so we do
3024 # the same.
3025
3026 # On python 2 urlh.geturl() may sometimes return redirect URL
3027 # as byte string instead of unicode. This workaround allows
3028 # to force it always return unicode.
3029 if sys.version_info[0] < 3:
3030 newurl = compat_str(newurl)
3031
3032 # Be conciliant with URIs containing a space. This is mainly
3033 # redundant with the more complete encoding done in http_error_302(),
3034 # but it is kept for compatibility with other callers.
3035 newurl = newurl.replace(' ', '%20')
3036
3037 CONTENT_HEADERS = ("content-length", "content-type")
3038 # NB: don't use dict comprehension for python 2.6 compatibility
3039 newheaders = dict((k, v) for k, v in req.headers.items()
3040 if k.lower() not in CONTENT_HEADERS)
3041 return compat_urllib_request.Request(
3042 newurl, headers=newheaders, origin_req_host=req.origin_req_host,
3043 unverifiable=True)
3044
3045
3046 def extract_timezone(date_str):
3047 m = re.search(
3048 r'''(?x)
3049 ^.{8,}? # >=8 char non-TZ prefix, if present
3050 (?P<tz>Z| # just the UTC Z, or
3051 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
3052 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
3053 [ ]? # optional space
3054 (?P<sign>\+|-) # +/-
3055 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
3056 $)
3057 ''', date_str)
3058 if not m:
3059 timezone = datetime.timedelta()
3060 else:
3061 date_str = date_str[:-len(m.group('tz'))]
3062 if not m.group('sign'):
3063 timezone = datetime.timedelta()
3064 else:
3065 sign = 1 if m.group('sign') == '+' else -1
3066 timezone = datetime.timedelta(
3067 hours=sign * int(m.group('hours')),
3068 minutes=sign * int(m.group('minutes')))
3069 return timezone, date_str
3070
3071
3072 def parse_iso8601(date_str, delimiter='T', timezone=None):
3073 """ Return a UNIX timestamp from the given date """
3074
3075 if date_str is None:
3076 return None
3077
3078 date_str = re.sub(r'\.[0-9]+', '', date_str)
3079
3080 if timezone is None:
3081 timezone, date_str = extract_timezone(date_str)
3082
3083 try:
3084 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
3085 dt = datetime.datetime.strptime(date_str, date_format) - timezone
3086 return calendar.timegm(dt.timetuple())
3087 except ValueError:
3088 pass
3089
3090
3091 def date_formats(day_first=True):
3092 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
3093
3094
3095 def unified_strdate(date_str, day_first=True):
3096 """Return a string with the date in the format YYYYMMDD"""
3097
3098 if date_str is None:
3099 return None
3100 upload_date = None
3101 # Replace commas
3102 date_str = date_str.replace(',', ' ')
3103 # Remove AM/PM + timezone
3104 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
3105 _, date_str = extract_timezone(date_str)
3106
3107 for expression in date_formats(day_first):
3108 try:
3109 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
3110 except ValueError:
3111 pass
3112 if upload_date is None:
3113 timetuple = email.utils.parsedate_tz(date_str)
3114 if timetuple:
3115 try:
3116 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
3117 except ValueError:
3118 pass
3119 if upload_date is not None:
3120 return compat_str(upload_date)
3121
3122
3123 def unified_timestamp(date_str, day_first=True):
3124 if date_str is None:
3125 return None
3126
3127 date_str = re.sub(r'[,|]', '', date_str)
3128
3129 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
3130 timezone, date_str = extract_timezone(date_str)
3131
3132 # Remove AM/PM + timezone
3133 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
3134
3135 # Remove unrecognized timezones from ISO 8601 alike timestamps
3136 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
3137 if m:
3138 date_str = date_str[:-len(m.group('tz'))]
3139
3140 # Python only supports microseconds, so remove nanoseconds
3141 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
3142 if m:
3143 date_str = m.group(1)
3144
3145 for expression in date_formats(day_first):
3146 try:
3147 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
3148 return calendar.timegm(dt.timetuple())
3149 except ValueError:
3150 pass
3151 timetuple = email.utils.parsedate_tz(date_str)
3152 if timetuple:
3153 return calendar.timegm(timetuple) + pm_delta * 3600
3154
3155
3156 def determine_ext(url, default_ext='unknown_video'):
3157 if url is None or '.' not in url:
3158 return default_ext
3159 guess = url.partition('?')[0].rpartition('.')[2]
3160 if re.match(r'^[A-Za-z0-9]+$', guess):
3161 return guess
3162 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
3163 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
3164 return guess.rstrip('/')
3165 else:
3166 return default_ext
3167
3168
3169 def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
3170 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
3171
3172
3173 def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
3174 """
3175 Return a datetime object from a string in the format YYYYMMDD or
3176 (now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
3177
3178 format: string date format used to return datetime object from
3179 precision: round the time portion of a datetime object.
3180 auto|microsecond|second|minute|hour|day.
3181 auto: round to the unit provided in date_str (if applicable).
3182 """
3183 auto_precision = False
3184 if precision == 'auto':
3185 auto_precision = True
3186 precision = 'microsecond'
3187 today = datetime_round(datetime.datetime.now(), precision)
3188 if date_str in ('now', 'today'):
3189 return today
3190 if date_str == 'yesterday':
3191 return today - datetime.timedelta(days=1)
3192 match = re.match(
3193 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
3194 date_str)
3195 if match is not None:
3196 start_time = datetime_from_str(match.group('start'), precision, format)
3197 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
3198 unit = match.group('unit')
3199 if unit == 'month' or unit == 'year':
3200 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
3201 unit = 'day'
3202 else:
3203 if unit == 'week':
3204 unit = 'day'
3205 time *= 7
3206 delta = datetime.timedelta(**{unit + 's': time})
3207 new_date = start_time + delta
3208 if auto_precision:
3209 return datetime_round(new_date, unit)
3210 return new_date
3211
3212 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
3213
3214
3215 def date_from_str(date_str, format='%Y%m%d'):
3216 """
3217 Return a datetime object from a string in the format YYYYMMDD or
3218 (now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
3219
3220 format: string date format used to return datetime object from
3221 """
3222 return datetime_from_str(date_str, precision='microsecond', format=format).date()
3223
3224
3225 def datetime_add_months(dt, months):
3226 """Increment/Decrement a datetime object by months."""
3227 month = dt.month + months - 1
3228 year = dt.year + month // 12
3229 month = month % 12 + 1
3230 day = min(dt.day, calendar.monthrange(year, month)[1])
3231 return dt.replace(year, month, day)
3232
3233
3234 def datetime_round(dt, precision='day'):
3235 """
3236 Round a datetime object's time to a specific precision
3237 """
3238 if precision == 'microsecond':
3239 return dt
3240
3241 unit_seconds = {
3242 'day': 86400,
3243 'hour': 3600,
3244 'minute': 60,
3245 'second': 1,
3246 }
3247 roundto = lambda x, n: ((x + n / 2) // n) * n
3248 timestamp = calendar.timegm(dt.timetuple())
3249 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
3250
3251
3252 def hyphenate_date(date_str):
3253 """
3254 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
3255 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
3256 if match is not None:
3257 return '-'.join(match.groups())
3258 else:
3259 return date_str
3260
3261
3262 class DateRange(object):
3263 """Represents a time interval between two dates"""
3264
3265 def __init__(self, start=None, end=None):
3266 """start and end must be strings in the format accepted by date"""
3267 if start is not None:
3268 self.start = date_from_str(start)
3269 else:
3270 self.start = datetime.datetime.min.date()
3271 if end is not None:
3272 self.end = date_from_str(end)
3273 else:
3274 self.end = datetime.datetime.max.date()
3275 if self.start > self.end:
3276 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
3277
3278 @classmethod
3279 def day(cls, day):
3280 """Returns a range that only contains the given day"""
3281 return cls(day, day)
3282
3283 def __contains__(self, date):
3284 """Check if the date is in the range"""
3285 if not isinstance(date, datetime.date):
3286 date = date_from_str(date)
3287 return self.start <= date <= self.end
3288
3289 def __str__(self):
3290 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
3291
3292
3293 def platform_name():
3294 """ Returns the platform name as a compat_str """
3295 res = platform.platform()
3296 if isinstance(res, bytes):
3297 res = res.decode(preferredencoding())
3298
3299 assert isinstance(res, compat_str)
3300 return res
3301
3302
3303 def get_windows_version():
3304 ''' Get Windows version. None if it's not running on Windows '''
3305 if compat_os_name == 'nt':
3306 return version_tuple(platform.win32_ver()[1])
3307 else:
3308 return None
3309
3310
3311 def _windows_write_string(s, out):
3312 """ Returns True if the string was written using special methods,
3313 False if it has yet to be written out."""
3314 # Adapted from http://stackoverflow.com/a/3259271/35070
3315
3316 import ctypes
3317 import ctypes.wintypes
3318
3319 WIN_OUTPUT_IDS = {
3320 1: -11,
3321 2: -12,
3322 }
3323
3324 try:
3325 fileno = out.fileno()
3326 except AttributeError:
3327 # If the output stream doesn't have a fileno, it's virtual
3328 return False
3329 except io.UnsupportedOperation:
3330 # Some strange Windows pseudo files?
3331 return False
3332 if fileno not in WIN_OUTPUT_IDS:
3333 return False
3334
3335 GetStdHandle = compat_ctypes_WINFUNCTYPE(
3336 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
3337 ('GetStdHandle', ctypes.windll.kernel32))
3338 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
3339
3340 WriteConsoleW = compat_ctypes_WINFUNCTYPE(
3341 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
3342 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
3343 ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
3344 written = ctypes.wintypes.DWORD(0)
3345
3346 GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
3347 FILE_TYPE_CHAR = 0x0002
3348 FILE_TYPE_REMOTE = 0x8000
3349 GetConsoleMode = compat_ctypes_WINFUNCTYPE(
3350 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
3351 ctypes.POINTER(ctypes.wintypes.DWORD))(
3352 ('GetConsoleMode', ctypes.windll.kernel32))
3353 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
3354
3355 def not_a_console(handle):
3356 if handle == INVALID_HANDLE_VALUE or handle is None:
3357 return True
3358 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
3359 or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
3360
3361 if not_a_console(h):
3362 return False
3363
3364 def next_nonbmp_pos(s):
3365 try:
3366 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
3367 except StopIteration:
3368 return len(s)
3369
3370 while s:
3371 count = min(next_nonbmp_pos(s), 1024)
3372
3373 ret = WriteConsoleW(
3374 h, s, count if count else 2, ctypes.byref(written), None)
3375 if ret == 0:
3376 raise OSError('Failed to write string')
3377 if not count: # We just wrote a non-BMP character
3378 assert written.value == 2
3379 s = s[1:]
3380 else:
3381 assert written.value > 0
3382 s = s[written.value:]
3383 return True
3384
3385
3386 def write_string(s, out=None, encoding=None):
3387 if out is None:
3388 out = sys.stderr
3389 assert type(s) == compat_str
3390
3391 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
3392 if _windows_write_string(s, out):
3393 return
3394
3395 if ('b' in getattr(out, 'mode', '')
3396 or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
3397 byt = s.encode(encoding or preferredencoding(), 'ignore')
3398 out.write(byt)
3399 elif hasattr(out, 'buffer'):
3400 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
3401 byt = s.encode(enc, 'ignore')
3402 out.buffer.write(byt)
3403 else:
3404 out.write(s)
3405 out.flush()
3406
3407
3408 def bytes_to_intlist(bs):
3409 if not bs:
3410 return []
3411 if isinstance(bs[0], int): # Python 3
3412 return list(bs)
3413 else:
3414 return [ord(c) for c in bs]
3415
3416
3417 def intlist_to_bytes(xs):
3418 if not xs:
3419 return b''
3420 return compat_struct_pack('%dB' % len(xs), *xs)
3421
3422
3423 # Cross-platform file locking
3424 if sys.platform == 'win32':
3425 import ctypes.wintypes
3426 import msvcrt
3427
3428 class OVERLAPPED(ctypes.Structure):
3429 _fields_ = [
3430 ('Internal', ctypes.wintypes.LPVOID),
3431 ('InternalHigh', ctypes.wintypes.LPVOID),
3432 ('Offset', ctypes.wintypes.DWORD),
3433 ('OffsetHigh', ctypes.wintypes.DWORD),
3434 ('hEvent', ctypes.wintypes.HANDLE),
3435 ]
3436
3437 kernel32 = ctypes.windll.kernel32
3438 LockFileEx = kernel32.LockFileEx
3439 LockFileEx.argtypes = [
3440 ctypes.wintypes.HANDLE, # hFile
3441 ctypes.wintypes.DWORD, # dwFlags
3442 ctypes.wintypes.DWORD, # dwReserved
3443 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
3444 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
3445 ctypes.POINTER(OVERLAPPED) # Overlapped
3446 ]
3447 LockFileEx.restype = ctypes.wintypes.BOOL
3448 UnlockFileEx = kernel32.UnlockFileEx
3449 UnlockFileEx.argtypes = [
3450 ctypes.wintypes.HANDLE, # hFile
3451 ctypes.wintypes.DWORD, # dwReserved
3452 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
3453 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
3454 ctypes.POINTER(OVERLAPPED) # Overlapped
3455 ]
3456 UnlockFileEx.restype = ctypes.wintypes.BOOL
3457 whole_low = 0xffffffff
3458 whole_high = 0x7fffffff
3459
3460 def _lock_file(f, exclusive):
3461 overlapped = OVERLAPPED()
3462 overlapped.Offset = 0
3463 overlapped.OffsetHigh = 0
3464 overlapped.hEvent = 0
3465 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
3466 handle = msvcrt.get_osfhandle(f.fileno())
3467 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
3468 whole_low, whole_high, f._lock_file_overlapped_p):
3469 raise OSError('Locking file failed: %r' % ctypes.FormatError())
3470
3471 def _unlock_file(f):
3472 assert f._lock_file_overlapped_p
3473 handle = msvcrt.get_osfhandle(f.fileno())
3474 if not UnlockFileEx(handle, 0,
3475 whole_low, whole_high, f._lock_file_overlapped_p):
3476 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
3477
3478 else:
3479 # Some platforms, such as Jython, is missing fcntl
3480 try:
3481 import fcntl
3482
3483 def _lock_file(f, exclusive):
3484 fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
3485
3486 def _unlock_file(f):
3487 fcntl.flock(f, fcntl.LOCK_UN)
3488 except ImportError:
3489 UNSUPPORTED_MSG = 'file locking is not supported on this platform'
3490
3491 def _lock_file(f, exclusive):
3492 raise IOError(UNSUPPORTED_MSG)
3493
3494 def _unlock_file(f):
3495 raise IOError(UNSUPPORTED_MSG)
3496
3497
3498 class locked_file(object):
3499 def __init__(self, filename, mode, encoding=None):
3500 assert mode in ['r', 'a', 'w']
3501 self.f = io.open(filename, mode, encoding=encoding)
3502 self.mode = mode
3503
3504 def __enter__(self):
3505 exclusive = self.mode != 'r'
3506 try:
3507 _lock_file(self.f, exclusive)
3508 except IOError:
3509 self.f.close()
3510 raise
3511 return self
3512
3513 def __exit__(self, etype, value, traceback):
3514 try:
3515 _unlock_file(self.f)
3516 finally:
3517 self.f.close()
3518
3519 def __iter__(self):
3520 return iter(self.f)
3521
3522 def write(self, *args):
3523 return self.f.write(*args)
3524
3525 def read(self, *args):
3526 return self.f.read(*args)
3527
3528
3529 def get_filesystem_encoding():
3530 encoding = sys.getfilesystemencoding()
3531 return encoding if encoding is not None else 'utf-8'
3532
3533
3534 def shell_quote(args):
3535 quoted_args = []
3536 encoding = get_filesystem_encoding()
3537 for a in args:
3538 if isinstance(a, bytes):
3539 # We may get a filename encoded with 'encodeFilename'
3540 a = a.decode(encoding)
3541 quoted_args.append(compat_shlex_quote(a))
3542 return ' '.join(quoted_args)
3543
3544
3545 def smuggle_url(url, data):
3546 """ Pass additional data in a URL for internal use. """
3547
3548 url, idata = unsmuggle_url(url, {})
3549 data.update(idata)
3550 sdata = compat_urllib_parse_urlencode(
3551 {'__youtubedl_smuggle': json.dumps(data)})
3552 return url + '#' + sdata
3553
3554
3555 def unsmuggle_url(smug_url, default=None):
3556 if '#__youtubedl_smuggle' not in smug_url:
3557 return smug_url, default
3558 url, _, sdata = smug_url.rpartition('#')
3559 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
3560 data = json.loads(jsond)
3561 return url, data
3562
3563
3564 def format_bytes(bytes):
3565 if bytes is None:
3566 return 'N/A'
3567 if type(bytes) is str:
3568 bytes = float(bytes)
3569 if bytes == 0.0:
3570 exponent = 0
3571 else:
3572 exponent = int(math.log(bytes, 1024.0))
3573 suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
3574 converted = float(bytes) / float(1024 ** exponent)
3575 return '%.2f%s' % (converted, suffix)
3576
3577
3578 def lookup_unit_table(unit_table, s):
3579 units_re = '|'.join(re.escape(u) for u in unit_table)
3580 m = re.match(
3581 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
3582 if not m:
3583 return None
3584 num_str = m.group('num').replace(',', '.')
3585 mult = unit_table[m.group('unit')]
3586 return int(float(num_str) * mult)
3587
3588
3589 def parse_filesize(s):
3590 if s is None:
3591 return None
3592
3593 # The lower-case forms are of course incorrect and unofficial,
3594 # but we support those too
3595 _UNIT_TABLE = {
3596 'B': 1,
3597 'b': 1,
3598 'bytes': 1,
3599 'KiB': 1024,
3600 'KB': 1000,
3601 'kB': 1024,
3602 'Kb': 1000,
3603 'kb': 1000,
3604 'kilobytes': 1000,
3605 'kibibytes': 1024,
3606 'MiB': 1024 ** 2,
3607 'MB': 1000 ** 2,
3608 'mB': 1024 ** 2,
3609 'Mb': 1000 ** 2,
3610 'mb': 1000 ** 2,
3611 'megabytes': 1000 ** 2,
3612 'mebibytes': 1024 ** 2,
3613 'GiB': 1024 ** 3,
3614 'GB': 1000 ** 3,
3615 'gB': 1024 ** 3,
3616 'Gb': 1000 ** 3,
3617 'gb': 1000 ** 3,
3618 'gigabytes': 1000 ** 3,
3619 'gibibytes': 1024 ** 3,
3620 'TiB': 1024 ** 4,
3621 'TB': 1000 ** 4,
3622 'tB': 1024 ** 4,
3623 'Tb': 1000 ** 4,
3624 'tb': 1000 ** 4,
3625 'terabytes': 1000 ** 4,
3626 'tebibytes': 1024 ** 4,
3627 'PiB': 1024 ** 5,
3628 'PB': 1000 ** 5,
3629 'pB': 1024 ** 5,
3630 'Pb': 1000 ** 5,
3631 'pb': 1000 ** 5,
3632 'petabytes': 1000 ** 5,
3633 'pebibytes': 1024 ** 5,
3634 'EiB': 1024 ** 6,
3635 'EB': 1000 ** 6,
3636 'eB': 1024 ** 6,
3637 'Eb': 1000 ** 6,
3638 'eb': 1000 ** 6,
3639 'exabytes': 1000 ** 6,
3640 'exbibytes': 1024 ** 6,
3641 'ZiB': 1024 ** 7,
3642 'ZB': 1000 ** 7,
3643 'zB': 1024 ** 7,
3644 'Zb': 1000 ** 7,
3645 'zb': 1000 ** 7,
3646 'zettabytes': 1000 ** 7,
3647 'zebibytes': 1024 ** 7,
3648 'YiB': 1024 ** 8,
3649 'YB': 1000 ** 8,
3650 'yB': 1024 ** 8,
3651 'Yb': 1000 ** 8,
3652 'yb': 1000 ** 8,
3653 'yottabytes': 1000 ** 8,
3654 'yobibytes': 1024 ** 8,
3655 }
3656
3657 return lookup_unit_table(_UNIT_TABLE, s)
3658
3659
3660 def parse_count(s):
3661 if s is None:
3662 return None
3663
3664 s = s.strip()
3665
3666 if re.match(r'^[\d,.]+$', s):
3667 return str_to_int(s)
3668
3669 _UNIT_TABLE = {
3670 'k': 1000,
3671 'K': 1000,
3672 'm': 1000 ** 2,
3673 'M': 1000 ** 2,
3674 'kk': 1000 ** 2,
3675 'KK': 1000 ** 2,
3676 }
3677
3678 return lookup_unit_table(_UNIT_TABLE, s)
3679
3680
3681 def parse_resolution(s):
3682 if s is None:
3683 return {}
3684
3685 mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s)
3686 if mobj:
3687 return {
3688 'width': int(mobj.group('w')),
3689 'height': int(mobj.group('h')),
3690 }
3691
3692 mobj = re.search(r'\b(\d+)[pPiI]\b', s)
3693 if mobj:
3694 return {'height': int(mobj.group(1))}
3695
3696 mobj = re.search(r'\b([48])[kK]\b', s)
3697 if mobj:
3698 return {'height': int(mobj.group(1)) * 540}
3699
3700 return {}
3701
3702
3703 def parse_bitrate(s):
3704 if not isinstance(s, compat_str):
3705 return
3706 mobj = re.search(r'\b(\d+)\s*kbps', s)
3707 if mobj:
3708 return int(mobj.group(1))
3709
3710
3711 def month_by_name(name, lang='en'):
3712 """ Return the number of a month by (locale-independently) English name """
3713
3714 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
3715
3716 try:
3717 return month_names.index(name) + 1
3718 except ValueError:
3719 return None
3720
3721
3722 def month_by_abbreviation(abbrev):
3723 """ Return the number of a month by (locale-independently) English
3724 abbreviations """
3725
3726 try:
3727 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
3728 except ValueError:
3729 return None
3730
3731
3732 def fix_xml_ampersands(xml_str):
3733 """Replace all the '&' by '&amp;' in XML"""
3734 return re.sub(
3735 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
3736 '&amp;',
3737 xml_str)
3738
3739
3740 def setproctitle(title):
3741 assert isinstance(title, compat_str)
3742
3743 # ctypes in Jython is not complete
3744 # http://bugs.jython.org/issue2148
3745 if sys.platform.startswith('java'):
3746 return
3747
3748 try:
3749 libc = ctypes.cdll.LoadLibrary('libc.so.6')
3750 except OSError:
3751 return
3752 except TypeError:
3753 # LoadLibrary in Windows Python 2.7.13 only expects
3754 # a bytestring, but since unicode_literals turns
3755 # every string into a unicode string, it fails.
3756 return
3757 title_bytes = title.encode('utf-8')
3758 buf = ctypes.create_string_buffer(len(title_bytes))
3759 buf.value = title_bytes
3760 try:
3761 libc.prctl(15, buf, 0, 0, 0)
3762 except AttributeError:
3763 return # Strange libc, just skip this
3764
3765
3766 def remove_start(s, start):
3767 return s[len(start):] if s is not None and s.startswith(start) else s
3768
3769
3770 def remove_end(s, end):
3771 return s[:-len(end)] if s is not None and s.endswith(end) else s
3772
3773
3774 def remove_quotes(s):
3775 if s is None or len(s) < 2:
3776 return s
3777 for quote in ('"', "'", ):
3778 if s[0] == quote and s[-1] == quote:
3779 return s[1:-1]
3780 return s
3781
3782
3783 def get_domain(url):
3784 domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url)
3785 return domain.group('domain') if domain else None
3786
3787
3788 def url_basename(url):
3789 path = compat_urlparse.urlparse(url).path
3790 return path.strip('/').split('/')[-1]
3791
3792
3793 def base_url(url):
3794 return re.match(r'https?://[^?#&]+/', url).group()
3795
3796
3797 def urljoin(base, path):
3798 if isinstance(path, bytes):
3799 path = path.decode('utf-8')
3800 if not isinstance(path, compat_str) or not path:
3801 return None
3802 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
3803 return path
3804 if isinstance(base, bytes):
3805 base = base.decode('utf-8')
3806 if not isinstance(base, compat_str) or not re.match(
3807 r'^(?:https?:)?//', base):
3808 return None
3809 return compat_urlparse.urljoin(base, path)
3810
3811
3812 class HEADRequest(compat_urllib_request.Request):
3813 def get_method(self):
3814 return 'HEAD'
3815
3816
3817 class PUTRequest(compat_urllib_request.Request):
3818 def get_method(self):
3819 return 'PUT'
3820
3821
3822 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
3823 if get_attr:
3824 if v is not None:
3825 v = getattr(v, get_attr, None)
3826 if v == '':
3827 v = None
3828 if v is None:
3829 return default
3830 try:
3831 return int(v) * invscale // scale
3832 except (ValueError, TypeError):
3833 return default
3834
3835
3836 def str_or_none(v, default=None):
3837 return default if v is None else compat_str(v)
3838
3839
3840 def str_to_int(int_str):
3841 """ A more relaxed version of int_or_none """
3842 if isinstance(int_str, compat_integer_types):
3843 return int_str
3844 elif isinstance(int_str, compat_str):
3845 int_str = re.sub(r'[,\.\+]', '', int_str)
3846 return int_or_none(int_str)
3847
3848
3849 def float_or_none(v, scale=1, invscale=1, default=None):
3850 if v is None:
3851 return default
3852 try:
3853 return float(v) * invscale / scale
3854 except (ValueError, TypeError):
3855 return default
3856
3857
3858 def bool_or_none(v, default=None):
3859 return v if isinstance(v, bool) else default
3860
3861
3862 def strip_or_none(v, default=None):
3863 return v.strip() if isinstance(v, compat_str) else default
3864
3865
3866 def url_or_none(url):
3867 if not url or not isinstance(url, compat_str):
3868 return None
3869 url = url.strip()
3870 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
3871
3872
3873 def strftime_or_none(timestamp, date_format, default=None):
3874 datetime_object = None
3875 try:
3876 if isinstance(timestamp, compat_numeric_types): # unix timestamp
3877 datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
3878 elif isinstance(timestamp, compat_str): # assume YYYYMMDD
3879 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
3880 return datetime_object.strftime(date_format)
3881 except (ValueError, TypeError, AttributeError):
3882 return default
3883
3884
3885 def parse_duration(s):
3886 if not isinstance(s, compat_basestring):
3887 return None
3888
3889 s = s.strip()
3890
3891 days, hours, mins, secs, ms = [None] * 5
3892 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
3893 if m:
3894 days, hours, mins, secs, ms = m.groups()
3895 else:
3896 m = re.match(
3897 r'''(?ix)(?:P?
3898 (?:
3899 [0-9]+\s*y(?:ears?)?\s*
3900 )?
3901 (?:
3902 [0-9]+\s*m(?:onths?)?\s*
3903 )?
3904 (?:
3905 [0-9]+\s*w(?:eeks?)?\s*
3906 )?
3907 (?:
3908 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
3909 )?
3910 T)?
3911 (?:
3912 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
3913 )?
3914 (?:
3915 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
3916 )?
3917 (?:
3918 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
3919 )?Z?$''', s)
3920 if m:
3921 days, hours, mins, secs, ms = m.groups()
3922 else:
3923 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
3924 if m:
3925 hours, mins = m.groups()
3926 else:
3927 return None
3928
3929 duration = 0
3930 if secs:
3931 duration += float(secs)
3932 if mins:
3933 duration += float(mins) * 60
3934 if hours:
3935 duration += float(hours) * 60 * 60
3936 if days:
3937 duration += float(days) * 24 * 60 * 60
3938 if ms:
3939 duration += float(ms)
3940 return duration
3941
3942
3943 def prepend_extension(filename, ext, expected_real_ext=None):
3944 name, real_ext = os.path.splitext(filename)
3945 return (
3946 '{0}.{1}{2}'.format(name, ext, real_ext)
3947 if not expected_real_ext or real_ext[1:] == expected_real_ext
3948 else '{0}.{1}'.format(filename, ext))
3949
3950
3951 def replace_extension(filename, ext, expected_real_ext=None):
3952 name, real_ext = os.path.splitext(filename)
3953 return '{0}.{1}'.format(
3954 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
3955 ext)
3956
3957
3958 def check_executable(exe, args=[]):
3959 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
3960 args can be a list of arguments for a short output (like -version) """
3961 try:
3962 process_communicate_or_kill(subprocess.Popen(
3963 [exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
3964 except OSError:
3965 return False
3966 return exe
3967
3968
3969 def get_exe_version(exe, args=['--version'],
3970 version_re=None, unrecognized='present'):
3971 """ Returns the version of the specified executable,
3972 or False if the executable is not present """
3973 try:
3974 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
3975 # SIGTTOU if yt-dlp is run in the background.
3976 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
3977 out, _ = process_communicate_or_kill(subprocess.Popen(
3978 [encodeArgument(exe)] + args,
3979 stdin=subprocess.PIPE,
3980 stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
3981 except OSError:
3982 return False
3983 if isinstance(out, bytes): # Python 2.x
3984 out = out.decode('ascii', 'ignore')
3985 return detect_exe_version(out, version_re, unrecognized)
3986
3987
3988 def detect_exe_version(output, version_re=None, unrecognized='present'):
3989 assert isinstance(output, compat_str)
3990 if version_re is None:
3991 version_re = r'version\s+([-0-9._a-zA-Z]+)'
3992 m = re.search(version_re, output)
3993 if m:
3994 return m.group(1)
3995 else:
3996 return unrecognized
3997
3998
3999 class LazyList(collections.abc.Sequence):
4000 ''' Lazy immutable list from an iterable
4001 Note that slices of a LazyList are lists and not LazyList'''
4002
4003 class IndexError(IndexError):
4004 pass
4005
4006 def __init__(self, iterable):
4007 self.__iterable = iter(iterable)
4008 self.__cache = []
4009 self.__reversed = False
4010
4011 def __iter__(self):
4012 if self.__reversed:
4013 # We need to consume the entire iterable to iterate in reverse
4014 yield from self.exhaust()
4015 return
4016 yield from self.__cache
4017 for item in self.__iterable:
4018 self.__cache.append(item)
4019 yield item
4020
4021 def __exhaust(self):
4022 self.__cache.extend(self.__iterable)
4023 return self.__cache
4024
4025 def exhaust(self):
4026 ''' Evaluate the entire iterable '''
4027 return self.__exhaust()[::-1 if self.__reversed else 1]
4028
4029 @staticmethod
4030 def __reverse_index(x):
4031 return None if x is None else -(x + 1)
4032
4033 def __getitem__(self, idx):
4034 if isinstance(idx, slice):
4035 if self.__reversed:
4036 idx = slice(self.__reverse_index(idx.start), self.__reverse_index(idx.stop), -(idx.step or 1))
4037 start, stop, step = idx.start, idx.stop, idx.step or 1
4038 elif isinstance(idx, int):
4039 if self.__reversed:
4040 idx = self.__reverse_index(idx)
4041 start, stop, step = idx, idx, 0
4042 else:
4043 raise TypeError('indices must be integers or slices')
4044 if ((start or 0) < 0 or (stop or 0) < 0
4045 or (start is None and step < 0)
4046 or (stop is None and step > 0)):
4047 # We need to consume the entire iterable to be able to slice from the end
4048 # Obviously, never use this with infinite iterables
4049 self.__exhaust()
4050 try:
4051 return self.__cache[idx]
4052 except IndexError as e:
4053 raise self.IndexError(e) from e
4054 n = max(start or 0, stop or 0) - len(self.__cache) + 1
4055 if n > 0:
4056 self.__cache.extend(itertools.islice(self.__iterable, n))
4057 try:
4058 return self.__cache[idx]
4059 except IndexError as e:
4060 raise self.IndexError(e) from e
4061
4062 def __bool__(self):
4063 try:
4064 self[-1] if self.__reversed else self[0]
4065 except self.IndexError:
4066 return False
4067 return True
4068
4069 def __len__(self):
4070 self.__exhaust()
4071 return len(self.__cache)
4072
4073 def reverse(self):
4074 self.__reversed = not self.__reversed
4075 return self
4076
4077 def __repr__(self):
4078 # repr and str should mimic a list. So we exhaust the iterable
4079 return repr(self.exhaust())
4080
4081 def __str__(self):
4082 return repr(self.exhaust())
4083
4084
4085 class PagedList:
4086 def __len__(self):
4087 # This is only useful for tests
4088 return len(self.getslice())
4089
4090 def __init__(self, pagefunc, pagesize, use_cache=True):
4091 self._pagefunc = pagefunc
4092 self._pagesize = pagesize
4093 self._use_cache = use_cache
4094 self._cache = {}
4095
4096 def getpage(self, pagenum):
4097 page_results = self._cache.get(pagenum) or list(self._pagefunc(pagenum))
4098 if self._use_cache:
4099 self._cache[pagenum] = page_results
4100 return page_results
4101
4102 def getslice(self, start=0, end=None):
4103 return list(self._getslice(start, end))
4104
4105 def _getslice(self, start, end):
4106 raise NotImplementedError('This method must be implemented by subclasses')
4107
4108 def __getitem__(self, idx):
4109 # NOTE: cache must be enabled if this is used
4110 if not isinstance(idx, int) or idx < 0:
4111 raise TypeError('indices must be non-negative integers')
4112 entries = self.getslice(idx, idx + 1)
4113 return entries[0] if entries else None
4114
4115
4116 class OnDemandPagedList(PagedList):
4117 def _getslice(self, start, end):
4118 for pagenum in itertools.count(start // self._pagesize):
4119 firstid = pagenum * self._pagesize
4120 nextfirstid = pagenum * self._pagesize + self._pagesize
4121 if start >= nextfirstid:
4122 continue
4123
4124 startv = (
4125 start % self._pagesize
4126 if firstid <= start < nextfirstid
4127 else 0)
4128 endv = (
4129 ((end - 1) % self._pagesize) + 1
4130 if (end is not None and firstid <= end <= nextfirstid)
4131 else None)
4132
4133 page_results = self.getpage(pagenum)
4134 if startv != 0 or endv is not None:
4135 page_results = page_results[startv:endv]
4136 yield from page_results
4137
4138 # A little optimization - if current page is not "full", ie. does
4139 # not contain page_size videos then we can assume that this page
4140 # is the last one - there are no more ids on further pages -
4141 # i.e. no need to query again.
4142 if len(page_results) + startv < self._pagesize:
4143 break
4144
4145 # If we got the whole page, but the next page is not interesting,
4146 # break out early as well
4147 if end == nextfirstid:
4148 break
4149
4150
4151 class InAdvancePagedList(PagedList):
4152 def __init__(self, pagefunc, pagecount, pagesize):
4153 self._pagecount = pagecount
4154 PagedList.__init__(self, pagefunc, pagesize, True)
4155
4156 def _getslice(self, start, end):
4157 start_page = start // self._pagesize
4158 end_page = (
4159 self._pagecount if end is None else (end // self._pagesize + 1))
4160 skip_elems = start - start_page * self._pagesize
4161 only_more = None if end is None else end - start
4162 for pagenum in range(start_page, end_page):
4163 page_results = self.getpage(pagenum)
4164 if skip_elems:
4165 page_results = page_results[skip_elems:]
4166 skip_elems = None
4167 if only_more is not None:
4168 if len(page_results) < only_more:
4169 only_more -= len(page_results)
4170 else:
4171 yield from page_results[:only_more]
4172 break
4173 yield from page_results
4174
4175
4176 def uppercase_escape(s):
4177 unicode_escape = codecs.getdecoder('unicode_escape')
4178 return re.sub(
4179 r'\\U[0-9a-fA-F]{8}',
4180 lambda m: unicode_escape(m.group(0))[0],
4181 s)
4182
4183
4184 def lowercase_escape(s):
4185 unicode_escape = codecs.getdecoder('unicode_escape')
4186 return re.sub(
4187 r'\\u[0-9a-fA-F]{4}',
4188 lambda m: unicode_escape(m.group(0))[0],
4189 s)
4190
4191
4192 def escape_rfc3986(s):
4193 """Escape non-ASCII characters as suggested by RFC 3986"""
4194 if sys.version_info < (3, 0) and isinstance(s, compat_str):
4195 s = s.encode('utf-8')
4196 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
4197
4198
4199 def escape_url(url):
4200 """Escape URL as suggested by RFC 3986"""
4201 url_parsed = compat_urllib_parse_urlparse(url)
4202 return url_parsed._replace(
4203 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
4204 path=escape_rfc3986(url_parsed.path),
4205 params=escape_rfc3986(url_parsed.params),
4206 query=escape_rfc3986(url_parsed.query),
4207 fragment=escape_rfc3986(url_parsed.fragment)
4208 ).geturl()
4209
4210
4211 def parse_qs(url):
4212 return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
4213
4214
4215 def read_batch_urls(batch_fd):
4216 def fixup(url):
4217 if not isinstance(url, compat_str):
4218 url = url.decode('utf-8', 'replace')
4219 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
4220 for bom in BOM_UTF8:
4221 if url.startswith(bom):
4222 url = url[len(bom):]
4223 url = url.lstrip()
4224 if not url or url.startswith(('#', ';', ']')):
4225 return False
4226 # "#" cannot be stripped out since it is part of the URI
4227 # However, it can be safely stipped out if follwing a whitespace
4228 return re.split(r'\s#', url, 1)[0].rstrip()
4229
4230 with contextlib.closing(batch_fd) as fd:
4231 return [url for url in map(fixup, fd) if url]
4232
4233
4234 def urlencode_postdata(*args, **kargs):
4235 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
4236
4237
4238 def update_url_query(url, query):
4239 if not query:
4240 return url
4241 parsed_url = compat_urlparse.urlparse(url)
4242 qs = compat_parse_qs(parsed_url.query)
4243 qs.update(query)
4244 return compat_urlparse.urlunparse(parsed_url._replace(
4245 query=compat_urllib_parse_urlencode(qs, True)))
4246
4247
4248 def update_Request(req, url=None, data=None, headers={}, query={}):
4249 req_headers = req.headers.copy()
4250 req_headers.update(headers)
4251 req_data = data or req.data
4252 req_url = update_url_query(url or req.get_full_url(), query)
4253 req_get_method = req.get_method()
4254 if req_get_method == 'HEAD':
4255 req_type = HEADRequest
4256 elif req_get_method == 'PUT':
4257 req_type = PUTRequest
4258 else:
4259 req_type = compat_urllib_request.Request
4260 new_req = req_type(
4261 req_url, data=req_data, headers=req_headers,
4262 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
4263 if hasattr(req, 'timeout'):
4264 new_req.timeout = req.timeout
4265 return new_req
4266
4267
4268 def _multipart_encode_impl(data, boundary):
4269 content_type = 'multipart/form-data; boundary=%s' % boundary
4270
4271 out = b''
4272 for k, v in data.items():
4273 out += b'--' + boundary.encode('ascii') + b'\r\n'
4274 if isinstance(k, compat_str):
4275 k = k.encode('utf-8')
4276 if isinstance(v, compat_str):
4277 v = v.encode('utf-8')
4278 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
4279 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
4280 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
4281 if boundary.encode('ascii') in content:
4282 raise ValueError('Boundary overlaps with data')
4283 out += content
4284
4285 out += b'--' + boundary.encode('ascii') + b'--\r\n'
4286
4287 return out, content_type
4288
4289
4290 def multipart_encode(data, boundary=None):
4291 '''
4292 Encode a dict to RFC 7578-compliant form-data
4293
4294 data:
4295 A dict where keys and values can be either Unicode or bytes-like
4296 objects.
4297 boundary:
4298 If specified a Unicode object, it's used as the boundary. Otherwise
4299 a random boundary is generated.
4300
4301 Reference: https://tools.ietf.org/html/rfc7578
4302 '''
4303 has_specified_boundary = boundary is not None
4304
4305 while True:
4306 if boundary is None:
4307 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
4308
4309 try:
4310 out, content_type = _multipart_encode_impl(data, boundary)
4311 break
4312 except ValueError:
4313 if has_specified_boundary:
4314 raise
4315 boundary = None
4316
4317 return out, content_type
4318
4319
4320 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
4321 if isinstance(key_or_keys, (list, tuple)):
4322 for key in key_or_keys:
4323 if key not in d or d[key] is None or skip_false_values and not d[key]:
4324 continue
4325 return d[key]
4326 return default
4327 return d.get(key_or_keys, default)
4328
4329
4330 def try_get(src, getter, expected_type=None):
4331 for get in variadic(getter):
4332 try:
4333 v = get(src)
4334 except (AttributeError, KeyError, TypeError, IndexError):
4335 pass
4336 else:
4337 if expected_type is None or isinstance(v, expected_type):
4338 return v
4339
4340
4341 def merge_dicts(*dicts):
4342 merged = {}
4343 for a_dict in dicts:
4344 for k, v in a_dict.items():
4345 if v is None:
4346 continue
4347 if (k not in merged
4348 or (isinstance(v, compat_str) and v
4349 and isinstance(merged[k], compat_str)
4350 and not merged[k])):
4351 merged[k] = v
4352 return merged
4353
4354
4355 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
4356 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
4357
4358
4359 US_RATINGS = {
4360 'G': 0,
4361 'PG': 10,
4362 'PG-13': 13,
4363 'R': 16,
4364 'NC': 18,
4365 }
4366
4367
4368 TV_PARENTAL_GUIDELINES = {
4369 'TV-Y': 0,
4370 'TV-Y7': 7,
4371 'TV-G': 0,
4372 'TV-PG': 0,
4373 'TV-14': 14,
4374 'TV-MA': 17,
4375 }
4376
4377
4378 def parse_age_limit(s):
4379 if type(s) == int:
4380 return s if 0 <= s <= 21 else None
4381 if not isinstance(s, compat_basestring):
4382 return None
4383 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
4384 if m:
4385 return int(m.group('age'))
4386 s = s.upper()
4387 if s in US_RATINGS:
4388 return US_RATINGS[s]
4389 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
4390 if m:
4391 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
4392 return None
4393
4394
4395 def strip_jsonp(code):
4396 return re.sub(
4397 r'''(?sx)^
4398 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
4399 (?:\s*&&\s*(?P=func_name))?
4400 \s*\(\s*(?P<callback_data>.*)\);?
4401 \s*?(?://[^\n]*)*$''',
4402 r'\g<callback_data>', code)
4403
4404
4405 def js_to_json(code, vars={}):
4406 # vars is a dict of var, val pairs to substitute
4407 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
4408 SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
4409 INTEGER_TABLE = (
4410 (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
4411 (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
4412 )
4413
4414 def fix_kv(m):
4415 v = m.group(0)
4416 if v in ('true', 'false', 'null'):
4417 return v
4418 elif v in ('undefined', 'void 0'):
4419 return 'null'
4420 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
4421 return ""
4422
4423 if v[0] in ("'", '"'):
4424 v = re.sub(r'(?s)\\.|"', lambda m: {
4425 '"': '\\"',
4426 "\\'": "'",
4427 '\\\n': '',
4428 '\\x': '\\u00',
4429 }.get(m.group(0), m.group(0)), v[1:-1])
4430 else:
4431 for regex, base in INTEGER_TABLE:
4432 im = re.match(regex, v)
4433 if im:
4434 i = int(im.group(1), base)
4435 return '"%d":' % i if v.endswith(':') else '%d' % i
4436
4437 if v in vars:
4438 return vars[v]
4439
4440 return '"%s"' % v
4441
4442 return re.sub(r'''(?sx)
4443 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
4444 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
4445 {comment}|,(?={skip}[\]}}])|
4446 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
4447 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
4448 [0-9]+(?={skip}:)|
4449 !+
4450 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
4451
4452
4453 def qualities(quality_ids):
4454 """ Get a numeric quality value out of a list of possible values """
4455 def q(qid):
4456 try:
4457 return quality_ids.index(qid)
4458 except ValueError:
4459 return -1
4460 return q
4461
4462
4463 DEFAULT_OUTTMPL = {
4464 'default': '%(title)s [%(id)s].%(ext)s',
4465 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
4466 }
4467 OUTTMPL_TYPES = {
4468 'chapter': None,
4469 'subtitle': None,
4470 'thumbnail': None,
4471 'description': 'description',
4472 'annotation': 'annotations.xml',
4473 'infojson': 'info.json',
4474 'pl_thumbnail': None,
4475 'pl_description': 'description',
4476 'pl_infojson': 'info.json',
4477 }
4478
4479 # As of [1] format syntax is:
4480 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
4481 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
4482 STR_FORMAT_RE_TMPL = r'''(?x)
4483 (?<!%)(?P<prefix>(?:%%)*)
4484 %
4485 (?P<has_key>\((?P<key>{0})\))?
4486 (?P<format>
4487 (?P<conversion>[#0\-+ ]+)?
4488 (?P<min_width>\d+)?
4489 (?P<precision>\.\d+)?
4490 (?P<len_mod>[hlL])? # unused in python
4491 {1} # conversion type
4492 )
4493 '''
4494
4495
4496 STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
4497
4498
4499 def limit_length(s, length):
4500 """ Add ellipses to overly long strings """
4501 if s is None:
4502 return None
4503 ELLIPSES = '...'
4504 if len(s) > length:
4505 return s[:length - len(ELLIPSES)] + ELLIPSES
4506 return s
4507
4508
4509 def version_tuple(v):
4510 return tuple(int(e) for e in re.split(r'[-.]', v))
4511
4512
4513 def is_outdated_version(version, limit, assume_new=True):
4514 if not version:
4515 return not assume_new
4516 try:
4517 return version_tuple(version) < version_tuple(limit)
4518 except ValueError:
4519 return not assume_new
4520
4521
4522 def ytdl_is_updateable():
4523 """ Returns if yt-dlp can be updated with -U """
4524 return False
4525
4526 from zipimport import zipimporter
4527
4528 return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
4529
4530
4531 def args_to_str(args):
4532 # Get a short string representation for a subprocess command
4533 return ' '.join(compat_shlex_quote(a) for a in args)
4534
4535
4536 def error_to_compat_str(err):
4537 err_str = str(err)
4538 # On python 2 error byte string must be decoded with proper
4539 # encoding rather than ascii
4540 if sys.version_info[0] < 3:
4541 err_str = err_str.decode(preferredencoding())
4542 return err_str
4543
4544
4545 def mimetype2ext(mt):
4546 if mt is None:
4547 return None
4548
4549 ext = {
4550 'audio/mp4': 'm4a',
4551 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
4552 # it's the most popular one
4553 'audio/mpeg': 'mp3',
4554 'audio/x-wav': 'wav',
4555 }.get(mt)
4556 if ext is not None:
4557 return ext
4558
4559 _, _, res = mt.rpartition('/')
4560 res = res.split(';')[0].strip().lower()
4561
4562 return {
4563 '3gpp': '3gp',
4564 'smptett+xml': 'tt',
4565 'ttaf+xml': 'dfxp',
4566 'ttml+xml': 'ttml',
4567 'x-flv': 'flv',
4568 'x-mp4-fragmented': 'mp4',
4569 'x-ms-sami': 'sami',
4570 'x-ms-wmv': 'wmv',
4571 'mpegurl': 'm3u8',
4572 'x-mpegurl': 'm3u8',
4573 'vnd.apple.mpegurl': 'm3u8',
4574 'dash+xml': 'mpd',
4575 'f4m+xml': 'f4m',
4576 'hds+xml': 'f4m',
4577 'vnd.ms-sstr+xml': 'ism',
4578 'quicktime': 'mov',
4579 'mp2t': 'ts',
4580 'x-wav': 'wav',
4581 }.get(res, res)
4582
4583
4584 def parse_codecs(codecs_str):
4585 # http://tools.ietf.org/html/rfc6381
4586 if not codecs_str:
4587 return {}
4588 split_codecs = list(filter(None, map(
4589 str.strip, codecs_str.strip().strip(',').split(','))))
4590 vcodec, acodec = None, None
4591 for full_codec in split_codecs:
4592 codec = full_codec.split('.')[0]
4593 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01', 'theora'):
4594 if not vcodec:
4595 vcodec = full_codec
4596 elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
4597 if not acodec:
4598 acodec = full_codec
4599 else:
4600 write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
4601 if not vcodec and not acodec:
4602 if len(split_codecs) == 2:
4603 return {
4604 'vcodec': split_codecs[0],
4605 'acodec': split_codecs[1],
4606 }
4607 else:
4608 return {
4609 'vcodec': vcodec or 'none',
4610 'acodec': acodec or 'none',
4611 }
4612 return {}
4613
4614
4615 def urlhandle_detect_ext(url_handle):
4616 getheader = url_handle.headers.get
4617
4618 cd = getheader('Content-Disposition')
4619 if cd:
4620 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
4621 if m:
4622 e = determine_ext(m.group('filename'), default_ext=None)
4623 if e:
4624 return e
4625
4626 return mimetype2ext(getheader('Content-Type'))
4627
4628
4629 def encode_data_uri(data, mime_type):
4630 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
4631
4632
4633 def age_restricted(content_limit, age_limit):
4634 """ Returns True iff the content should be blocked """
4635
4636 if age_limit is None: # No limit set
4637 return False
4638 if content_limit is None:
4639 return False # Content available for everyone
4640 return age_limit < content_limit
4641
4642
4643 def is_html(first_bytes):
4644 """ Detect whether a file contains HTML by examining its first bytes. """
4645
4646 BOMS = [
4647 (b'\xef\xbb\xbf', 'utf-8'),
4648 (b'\x00\x00\xfe\xff', 'utf-32-be'),
4649 (b'\xff\xfe\x00\x00', 'utf-32-le'),
4650 (b'\xff\xfe', 'utf-16-le'),
4651 (b'\xfe\xff', 'utf-16-be'),
4652 ]
4653 for bom, enc in BOMS:
4654 if first_bytes.startswith(bom):
4655 s = first_bytes[len(bom):].decode(enc, 'replace')
4656 break
4657 else:
4658 s = first_bytes.decode('utf-8', 'replace')
4659
4660 return re.match(r'^\s*<', s)
4661
4662
4663 def determine_protocol(info_dict):
4664 protocol = info_dict.get('protocol')
4665 if protocol is not None:
4666 return protocol
4667
4668 url = info_dict['url']
4669 if url.startswith('rtmp'):
4670 return 'rtmp'
4671 elif url.startswith('mms'):
4672 return 'mms'
4673 elif url.startswith('rtsp'):
4674 return 'rtsp'
4675
4676 ext = determine_ext(url)
4677 if ext == 'm3u8':
4678 return 'm3u8'
4679 elif ext == 'f4m':
4680 return 'f4m'
4681
4682 return compat_urllib_parse_urlparse(url).scheme
4683
4684
4685 def render_table(header_row, data, delim=False, extraGap=0, hideEmpty=False):
4686 """ Render a list of rows, each as a list of values """
4687
4688 def get_max_lens(table):
4689 return [max(len(compat_str(v)) for v in col) for col in zip(*table)]
4690
4691 def filter_using_list(row, filterArray):
4692 return [col for (take, col) in zip(filterArray, row) if take]
4693
4694 if hideEmpty:
4695 max_lens = get_max_lens(data)
4696 header_row = filter_using_list(header_row, max_lens)
4697 data = [filter_using_list(row, max_lens) for row in data]
4698
4699 table = [header_row] + data
4700 max_lens = get_max_lens(table)
4701 if delim:
4702 table = [header_row] + [['-' * ml for ml in max_lens]] + data
4703 format_str = ' '.join('%-' + compat_str(ml + extraGap) + 's' for ml in max_lens[:-1]) + ' %s'
4704 return '\n'.join(format_str % tuple(row) for row in table)
4705
4706
4707 def _match_one(filter_part, dct, incomplete):
4708 # TODO: Generalize code with YoutubeDL._build_format_filter
4709 STRING_OPERATORS = {
4710 '*=': operator.contains,
4711 '^=': lambda attr, value: attr.startswith(value),
4712 '$=': lambda attr, value: attr.endswith(value),
4713 '~=': lambda attr, value: re.search(value, attr),
4714 }
4715 COMPARISON_OPERATORS = {
4716 **STRING_OPERATORS,
4717 '<=': operator.le, # "<=" must be defined above "<"
4718 '<': operator.lt,
4719 '>=': operator.ge,
4720 '>': operator.gt,
4721 '=': operator.eq,
4722 }
4723
4724 operator_rex = re.compile(r'''(?x)\s*
4725 (?P<key>[a-z_]+)
4726 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
4727 (?:
4728 (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
4729 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
4730 (?P<strval>.+?)
4731 )
4732 \s*$
4733 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
4734 m = operator_rex.search(filter_part)
4735 if m:
4736 unnegated_op = COMPARISON_OPERATORS[m.group('op')]
4737 if m.group('negation'):
4738 op = lambda attr, value: not unnegated_op(attr, value)
4739 else:
4740 op = unnegated_op
4741 actual_value = dct.get(m.group('key'))
4742 if (m.group('quotedstrval') is not None
4743 or m.group('strval') is not None
4744 # If the original field is a string and matching comparisonvalue is
4745 # a number we should respect the origin of the original field
4746 # and process comparison value as a string (see
4747 # https://github.com/ytdl-org/youtube-dl/issues/11082).
4748 or actual_value is not None and m.group('intval') is not None
4749 and isinstance(actual_value, compat_str)):
4750 comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
4751 quote = m.group('quote')
4752 if quote is not None:
4753 comparison_value = comparison_value.replace(r'\%s' % quote, quote)
4754 else:
4755 if m.group('op') in STRING_OPERATORS:
4756 raise ValueError('Operator %s only supports string values!' % m.group('op'))
4757 try:
4758 comparison_value = int(m.group('intval'))
4759 except ValueError:
4760 comparison_value = parse_filesize(m.group('intval'))
4761 if comparison_value is None:
4762 comparison_value = parse_filesize(m.group('intval') + 'B')
4763 if comparison_value is None:
4764 raise ValueError(
4765 'Invalid integer value %r in filter part %r' % (
4766 m.group('intval'), filter_part))
4767 if actual_value is None:
4768 return incomplete or m.group('none_inclusive')
4769 return op(actual_value, comparison_value)
4770
4771 UNARY_OPERATORS = {
4772 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
4773 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
4774 }
4775 operator_rex = re.compile(r'''(?x)\s*
4776 (?P<op>%s)\s*(?P<key>[a-z_]+)
4777 \s*$
4778 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
4779 m = operator_rex.search(filter_part)
4780 if m:
4781 op = UNARY_OPERATORS[m.group('op')]
4782 actual_value = dct.get(m.group('key'))
4783 if incomplete and actual_value is None:
4784 return True
4785 return op(actual_value)
4786
4787 raise ValueError('Invalid filter part %r' % filter_part)
4788
4789
4790 def match_str(filter_str, dct, incomplete=False):
4791 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false
4792 When incomplete, all conditions passes on missing fields
4793 """
4794 return all(
4795 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
4796 for filter_part in re.split(r'(?<!\\)&', filter_str))
4797
4798
4799 def match_filter_func(filter_str):
4800 def _match_func(info_dict, *args, **kwargs):
4801 if match_str(filter_str, info_dict, *args, **kwargs):
4802 return None
4803 else:
4804 video_title = info_dict.get('title', info_dict.get('id', 'video'))
4805 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
4806 return _match_func
4807
4808
4809 def parse_dfxp_time_expr(time_expr):
4810 if not time_expr:
4811 return
4812
4813 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
4814 if mobj:
4815 return float(mobj.group('time_offset'))
4816
4817 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
4818 if mobj:
4819 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
4820
4821
4822 def srt_subtitles_timecode(seconds):
4823 return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
4824
4825
4826 def dfxp2srt(dfxp_data):
4827 '''
4828 @param dfxp_data A bytes-like object containing DFXP data
4829 @returns A unicode object containing converted SRT data
4830 '''
4831 LEGACY_NAMESPACES = (
4832 (b'http://www.w3.org/ns/ttml', [
4833 b'http://www.w3.org/2004/11/ttaf1',
4834 b'http://www.w3.org/2006/04/ttaf1',
4835 b'http://www.w3.org/2006/10/ttaf1',
4836 ]),
4837 (b'http://www.w3.org/ns/ttml#styling', [
4838 b'http://www.w3.org/ns/ttml#style',
4839 ]),
4840 )
4841
4842 SUPPORTED_STYLING = [
4843 'color',
4844 'fontFamily',
4845 'fontSize',
4846 'fontStyle',
4847 'fontWeight',
4848 'textDecoration'
4849 ]
4850
4851 _x = functools.partial(xpath_with_ns, ns_map={
4852 'xml': 'http://www.w3.org/XML/1998/namespace',
4853 'ttml': 'http://www.w3.org/ns/ttml',
4854 'tts': 'http://www.w3.org/ns/ttml#styling',
4855 })
4856
4857 styles = {}
4858 default_style = {}
4859
4860 class TTMLPElementParser(object):
4861 _out = ''
4862 _unclosed_elements = []
4863 _applied_styles = []
4864
4865 def start(self, tag, attrib):
4866 if tag in (_x('ttml:br'), 'br'):
4867 self._out += '\n'
4868 else:
4869 unclosed_elements = []
4870 style = {}
4871 element_style_id = attrib.get('style')
4872 if default_style:
4873 style.update(default_style)
4874 if element_style_id:
4875 style.update(styles.get(element_style_id, {}))
4876 for prop in SUPPORTED_STYLING:
4877 prop_val = attrib.get(_x('tts:' + prop))
4878 if prop_val:
4879 style[prop] = prop_val
4880 if style:
4881 font = ''
4882 for k, v in sorted(style.items()):
4883 if self._applied_styles and self._applied_styles[-1].get(k) == v:
4884 continue
4885 if k == 'color':
4886 font += ' color="%s"' % v
4887 elif k == 'fontSize':
4888 font += ' size="%s"' % v
4889 elif k == 'fontFamily':
4890 font += ' face="%s"' % v
4891 elif k == 'fontWeight' and v == 'bold':
4892 self._out += '<b>'
4893 unclosed_elements.append('b')
4894 elif k == 'fontStyle' and v == 'italic':
4895 self._out += '<i>'
4896 unclosed_elements.append('i')
4897 elif k == 'textDecoration' and v == 'underline':
4898 self._out += '<u>'
4899 unclosed_elements.append('u')
4900 if font:
4901 self._out += '<font' + font + '>'
4902 unclosed_elements.append('font')
4903 applied_style = {}
4904 if self._applied_styles:
4905 applied_style.update(self._applied_styles[-1])
4906 applied_style.update(style)
4907 self._applied_styles.append(applied_style)
4908 self._unclosed_elements.append(unclosed_elements)
4909
4910 def end(self, tag):
4911 if tag not in (_x('ttml:br'), 'br'):
4912 unclosed_elements = self._unclosed_elements.pop()
4913 for element in reversed(unclosed_elements):
4914 self._out += '</%s>' % element
4915 if unclosed_elements and self._applied_styles:
4916 self._applied_styles.pop()
4917
4918 def data(self, data):
4919 self._out += data
4920
4921 def close(self):
4922 return self._out.strip()
4923
4924 def parse_node(node):
4925 target = TTMLPElementParser()
4926 parser = xml.etree.ElementTree.XMLParser(target=target)
4927 parser.feed(xml.etree.ElementTree.tostring(node))
4928 return parser.close()
4929
4930 for k, v in LEGACY_NAMESPACES:
4931 for ns in v:
4932 dfxp_data = dfxp_data.replace(ns, k)
4933
4934 dfxp = compat_etree_fromstring(dfxp_data)
4935 out = []
4936 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
4937
4938 if not paras:
4939 raise ValueError('Invalid dfxp/TTML subtitle')
4940
4941 repeat = False
4942 while True:
4943 for style in dfxp.findall(_x('.//ttml:style')):
4944 style_id = style.get('id') or style.get(_x('xml:id'))
4945 if not style_id:
4946 continue
4947 parent_style_id = style.get('style')
4948 if parent_style_id:
4949 if parent_style_id not in styles:
4950 repeat = True
4951 continue
4952 styles[style_id] = styles[parent_style_id].copy()
4953 for prop in SUPPORTED_STYLING:
4954 prop_val = style.get(_x('tts:' + prop))
4955 if prop_val:
4956 styles.setdefault(style_id, {})[prop] = prop_val
4957 if repeat:
4958 repeat = False
4959 else:
4960 break
4961
4962 for p in ('body', 'div'):
4963 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
4964 if ele is None:
4965 continue
4966 style = styles.get(ele.get('style'))
4967 if not style:
4968 continue
4969 default_style.update(style)
4970
4971 for para, index in zip(paras, itertools.count(1)):
4972 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
4973 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
4974 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
4975 if begin_time is None:
4976 continue
4977 if not end_time:
4978 if not dur:
4979 continue
4980 end_time = begin_time + dur
4981 out.append('%d\n%s --> %s\n%s\n\n' % (
4982 index,
4983 srt_subtitles_timecode(begin_time),
4984 srt_subtitles_timecode(end_time),
4985 parse_node(para)))
4986
4987 return ''.join(out)
4988
4989
4990 def cli_option(params, command_option, param):
4991 param = params.get(param)
4992 if param:
4993 param = compat_str(param)
4994 return [command_option, param] if param is not None else []
4995
4996
4997 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
4998 param = params.get(param)
4999 if param is None:
5000 return []
5001 assert isinstance(param, bool)
5002 if separator:
5003 return [command_option + separator + (true_value if param else false_value)]
5004 return [command_option, true_value if param else false_value]
5005
5006
5007 def cli_valueless_option(params, command_option, param, expected_value=True):
5008 param = params.get(param)
5009 return [command_option] if param == expected_value else []
5010
5011
5012 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
5013 if isinstance(argdict, (list, tuple)): # for backward compatibility
5014 if use_compat:
5015 return argdict
5016 else:
5017 argdict = None
5018 if argdict is None:
5019 return default
5020 assert isinstance(argdict, dict)
5021
5022 assert isinstance(keys, (list, tuple))
5023 for key_list in keys:
5024 arg_list = list(filter(
5025 lambda x: x is not None,
5026 [argdict.get(key.lower()) for key in variadic(key_list)]))
5027 if arg_list:
5028 return [arg for args in arg_list for arg in args]
5029 return default
5030
5031
5032 def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
5033 main_key, exe = main_key.lower(), exe.lower()
5034 root_key = exe if main_key == exe else f'{main_key}+{exe}'
5035 keys = [f'{root_key}{k}' for k in (keys or [''])]
5036 if root_key in keys:
5037 if main_key != exe:
5038 keys.append((main_key, exe))
5039 keys.append('default')
5040 else:
5041 use_compat = False
5042 return cli_configuration_args(argdict, keys, default, use_compat)
5043
5044
5045 class ISO639Utils(object):
5046 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
5047 _lang_map = {
5048 'aa': 'aar',
5049 'ab': 'abk',
5050 'ae': 'ave',
5051 'af': 'afr',
5052 'ak': 'aka',
5053 'am': 'amh',
5054 'an': 'arg',
5055 'ar': 'ara',
5056 'as': 'asm',
5057 'av': 'ava',
5058 'ay': 'aym',
5059 'az': 'aze',
5060 'ba': 'bak',
5061 'be': 'bel',
5062 'bg': 'bul',
5063 'bh': 'bih',
5064 'bi': 'bis',
5065 'bm': 'bam',
5066 'bn': 'ben',
5067 'bo': 'bod',
5068 'br': 'bre',
5069 'bs': 'bos',
5070 'ca': 'cat',
5071 'ce': 'che',
5072 'ch': 'cha',
5073 'co': 'cos',
5074 'cr': 'cre',
5075 'cs': 'ces',
5076 'cu': 'chu',
5077 'cv': 'chv',
5078 'cy': 'cym',
5079 'da': 'dan',
5080 'de': 'deu',
5081 'dv': 'div',
5082 'dz': 'dzo',
5083 'ee': 'ewe',
5084 'el': 'ell',
5085 'en': 'eng',
5086 'eo': 'epo',
5087 'es': 'spa',
5088 'et': 'est',
5089 'eu': 'eus',
5090 'fa': 'fas',
5091 'ff': 'ful',
5092 'fi': 'fin',
5093 'fj': 'fij',
5094 'fo': 'fao',
5095 'fr': 'fra',
5096 'fy': 'fry',
5097 'ga': 'gle',
5098 'gd': 'gla',
5099 'gl': 'glg',
5100 'gn': 'grn',
5101 'gu': 'guj',
5102 'gv': 'glv',
5103 'ha': 'hau',
5104 'he': 'heb',
5105 'iw': 'heb', # Replaced by he in 1989 revision
5106 'hi': 'hin',
5107 'ho': 'hmo',
5108 'hr': 'hrv',
5109 'ht': 'hat',
5110 'hu': 'hun',
5111 'hy': 'hye',
5112 'hz': 'her',
5113 'ia': 'ina',
5114 'id': 'ind',
5115 'in': 'ind', # Replaced by id in 1989 revision
5116 'ie': 'ile',
5117 'ig': 'ibo',
5118 'ii': 'iii',
5119 'ik': 'ipk',
5120 'io': 'ido',
5121 'is': 'isl',
5122 'it': 'ita',
5123 'iu': 'iku',
5124 'ja': 'jpn',
5125 'jv': 'jav',
5126 'ka': 'kat',
5127 'kg': 'kon',
5128 'ki': 'kik',
5129 'kj': 'kua',
5130 'kk': 'kaz',
5131 'kl': 'kal',
5132 'km': 'khm',
5133 'kn': 'kan',
5134 'ko': 'kor',
5135 'kr': 'kau',
5136 'ks': 'kas',
5137 'ku': 'kur',
5138 'kv': 'kom',
5139 'kw': 'cor',
5140 'ky': 'kir',
5141 'la': 'lat',
5142 'lb': 'ltz',
5143 'lg': 'lug',
5144 'li': 'lim',
5145 'ln': 'lin',
5146 'lo': 'lao',
5147 'lt': 'lit',
5148 'lu': 'lub',
5149 'lv': 'lav',
5150 'mg': 'mlg',
5151 'mh': 'mah',
5152 'mi': 'mri',
5153 'mk': 'mkd',
5154 'ml': 'mal',
5155 'mn': 'mon',
5156 'mr': 'mar',
5157 'ms': 'msa',
5158 'mt': 'mlt',
5159 'my': 'mya',
5160 'na': 'nau',
5161 'nb': 'nob',
5162 'nd': 'nde',
5163 'ne': 'nep',
5164 'ng': 'ndo',
5165 'nl': 'nld',
5166 'nn': 'nno',
5167 'no': 'nor',
5168 'nr': 'nbl',
5169 'nv': 'nav',
5170 'ny': 'nya',
5171 'oc': 'oci',
5172 'oj': 'oji',
5173 'om': 'orm',
5174 'or': 'ori',
5175 'os': 'oss',
5176 'pa': 'pan',
5177 'pi': 'pli',
5178 'pl': 'pol',
5179 'ps': 'pus',
5180 'pt': 'por',
5181 'qu': 'que',
5182 'rm': 'roh',
5183 'rn': 'run',
5184 'ro': 'ron',
5185 'ru': 'rus',
5186 'rw': 'kin',
5187 'sa': 'san',
5188 'sc': 'srd',
5189 'sd': 'snd',
5190 'se': 'sme',
5191 'sg': 'sag',
5192 'si': 'sin',
5193 'sk': 'slk',
5194 'sl': 'slv',
5195 'sm': 'smo',
5196 'sn': 'sna',
5197 'so': 'som',
5198 'sq': 'sqi',
5199 'sr': 'srp',
5200 'ss': 'ssw',
5201 'st': 'sot',
5202 'su': 'sun',
5203 'sv': 'swe',
5204 'sw': 'swa',
5205 'ta': 'tam',
5206 'te': 'tel',
5207 'tg': 'tgk',
5208 'th': 'tha',
5209 'ti': 'tir',
5210 'tk': 'tuk',
5211 'tl': 'tgl',
5212 'tn': 'tsn',
5213 'to': 'ton',
5214 'tr': 'tur',
5215 'ts': 'tso',
5216 'tt': 'tat',
5217 'tw': 'twi',
5218 'ty': 'tah',
5219 'ug': 'uig',
5220 'uk': 'ukr',
5221 'ur': 'urd',
5222 'uz': 'uzb',
5223 've': 'ven',
5224 'vi': 'vie',
5225 'vo': 'vol',
5226 'wa': 'wln',
5227 'wo': 'wol',
5228 'xh': 'xho',
5229 'yi': 'yid',
5230 'ji': 'yid', # Replaced by yi in 1989 revision
5231 'yo': 'yor',
5232 'za': 'zha',
5233 'zh': 'zho',
5234 'zu': 'zul',
5235 }
5236
5237 @classmethod
5238 def short2long(cls, code):
5239 """Convert language code from ISO 639-1 to ISO 639-2/T"""
5240 return cls._lang_map.get(code[:2])
5241
5242 @classmethod
5243 def long2short(cls, code):
5244 """Convert language code from ISO 639-2/T to ISO 639-1"""
5245 for short_name, long_name in cls._lang_map.items():
5246 if long_name == code:
5247 return short_name
5248
5249
5250 class ISO3166Utils(object):
5251 # From http://data.okfn.org/data/core/country-list
5252 _country_map = {
5253 'AF': 'Afghanistan',
5254 'AX': 'Åland Islands',
5255 'AL': 'Albania',
5256 'DZ': 'Algeria',
5257 'AS': 'American Samoa',
5258 'AD': 'Andorra',
5259 'AO': 'Angola',
5260 'AI': 'Anguilla',
5261 'AQ': 'Antarctica',
5262 'AG': 'Antigua and Barbuda',
5263 'AR': 'Argentina',
5264 'AM': 'Armenia',
5265 'AW': 'Aruba',
5266 'AU': 'Australia',
5267 'AT': 'Austria',
5268 'AZ': 'Azerbaijan',
5269 'BS': 'Bahamas',
5270 'BH': 'Bahrain',
5271 'BD': 'Bangladesh',
5272 'BB': 'Barbados',
5273 'BY': 'Belarus',
5274 'BE': 'Belgium',
5275 'BZ': 'Belize',
5276 'BJ': 'Benin',
5277 'BM': 'Bermuda',
5278 'BT': 'Bhutan',
5279 'BO': 'Bolivia, Plurinational State of',
5280 'BQ': 'Bonaire, Sint Eustatius and Saba',
5281 'BA': 'Bosnia and Herzegovina',
5282 'BW': 'Botswana',
5283 'BV': 'Bouvet Island',
5284 'BR': 'Brazil',
5285 'IO': 'British Indian Ocean Territory',
5286 'BN': 'Brunei Darussalam',
5287 'BG': 'Bulgaria',
5288 'BF': 'Burkina Faso',
5289 'BI': 'Burundi',
5290 'KH': 'Cambodia',
5291 'CM': 'Cameroon',
5292 'CA': 'Canada',
5293 'CV': 'Cape Verde',
5294 'KY': 'Cayman Islands',
5295 'CF': 'Central African Republic',
5296 'TD': 'Chad',
5297 'CL': 'Chile',
5298 'CN': 'China',
5299 'CX': 'Christmas Island',
5300 'CC': 'Cocos (Keeling) Islands',
5301 'CO': 'Colombia',
5302 'KM': 'Comoros',
5303 'CG': 'Congo',
5304 'CD': 'Congo, the Democratic Republic of the',
5305 'CK': 'Cook Islands',
5306 'CR': 'Costa Rica',
5307 'CI': 'Côte d\'Ivoire',
5308 'HR': 'Croatia',
5309 'CU': 'Cuba',
5310 'CW': 'Curaçao',
5311 'CY': 'Cyprus',
5312 'CZ': 'Czech Republic',
5313 'DK': 'Denmark',
5314 'DJ': 'Djibouti',
5315 'DM': 'Dominica',
5316 'DO': 'Dominican Republic',
5317 'EC': 'Ecuador',
5318 'EG': 'Egypt',
5319 'SV': 'El Salvador',
5320 'GQ': 'Equatorial Guinea',
5321 'ER': 'Eritrea',
5322 'EE': 'Estonia',
5323 'ET': 'Ethiopia',
5324 'FK': 'Falkland Islands (Malvinas)',
5325 'FO': 'Faroe Islands',
5326 'FJ': 'Fiji',
5327 'FI': 'Finland',
5328 'FR': 'France',
5329 'GF': 'French Guiana',
5330 'PF': 'French Polynesia',
5331 'TF': 'French Southern Territories',
5332 'GA': 'Gabon',
5333 'GM': 'Gambia',
5334 'GE': 'Georgia',
5335 'DE': 'Germany',
5336 'GH': 'Ghana',
5337 'GI': 'Gibraltar',
5338 'GR': 'Greece',
5339 'GL': 'Greenland',
5340 'GD': 'Grenada',
5341 'GP': 'Guadeloupe',
5342 'GU': 'Guam',
5343 'GT': 'Guatemala',
5344 'GG': 'Guernsey',
5345 'GN': 'Guinea',
5346 'GW': 'Guinea-Bissau',
5347 'GY': 'Guyana',
5348 'HT': 'Haiti',
5349 'HM': 'Heard Island and McDonald Islands',
5350 'VA': 'Holy See (Vatican City State)',
5351 'HN': 'Honduras',
5352 'HK': 'Hong Kong',
5353 'HU': 'Hungary',
5354 'IS': 'Iceland',
5355 'IN': 'India',
5356 'ID': 'Indonesia',
5357 'IR': 'Iran, Islamic Republic of',
5358 'IQ': 'Iraq',
5359 'IE': 'Ireland',
5360 'IM': 'Isle of Man',
5361 'IL': 'Israel',
5362 'IT': 'Italy',
5363 'JM': 'Jamaica',
5364 'JP': 'Japan',
5365 'JE': 'Jersey',
5366 'JO': 'Jordan',
5367 'KZ': 'Kazakhstan',
5368 'KE': 'Kenya',
5369 'KI': 'Kiribati',
5370 'KP': 'Korea, Democratic People\'s Republic of',
5371 'KR': 'Korea, Republic of',
5372 'KW': 'Kuwait',
5373 'KG': 'Kyrgyzstan',
5374 'LA': 'Lao People\'s Democratic Republic',
5375 'LV': 'Latvia',
5376 'LB': 'Lebanon',
5377 'LS': 'Lesotho',
5378 'LR': 'Liberia',
5379 'LY': 'Libya',
5380 'LI': 'Liechtenstein',
5381 'LT': 'Lithuania',
5382 'LU': 'Luxembourg',
5383 'MO': 'Macao',
5384 'MK': 'Macedonia, the Former Yugoslav Republic of',
5385 'MG': 'Madagascar',
5386 'MW': 'Malawi',
5387 'MY': 'Malaysia',
5388 'MV': 'Maldives',
5389 'ML': 'Mali',
5390 'MT': 'Malta',
5391 'MH': 'Marshall Islands',
5392 'MQ': 'Martinique',
5393 'MR': 'Mauritania',
5394 'MU': 'Mauritius',
5395 'YT': 'Mayotte',
5396 'MX': 'Mexico',
5397 'FM': 'Micronesia, Federated States of',
5398 'MD': 'Moldova, Republic of',
5399 'MC': 'Monaco',
5400 'MN': 'Mongolia',
5401 'ME': 'Montenegro',
5402 'MS': 'Montserrat',
5403 'MA': 'Morocco',
5404 'MZ': 'Mozambique',
5405 'MM': 'Myanmar',
5406 'NA': 'Namibia',
5407 'NR': 'Nauru',
5408 'NP': 'Nepal',
5409 'NL': 'Netherlands',
5410 'NC': 'New Caledonia',
5411 'NZ': 'New Zealand',
5412 'NI': 'Nicaragua',
5413 'NE': 'Niger',
5414 'NG': 'Nigeria',
5415 'NU': 'Niue',
5416 'NF': 'Norfolk Island',
5417 'MP': 'Northern Mariana Islands',
5418 'NO': 'Norway',
5419 'OM': 'Oman',
5420 'PK': 'Pakistan',
5421 'PW': 'Palau',
5422 'PS': 'Palestine, State of',
5423 'PA': 'Panama',
5424 'PG': 'Papua New Guinea',
5425 'PY': 'Paraguay',
5426 'PE': 'Peru',
5427 'PH': 'Philippines',
5428 'PN': 'Pitcairn',
5429 'PL': 'Poland',
5430 'PT': 'Portugal',
5431 'PR': 'Puerto Rico',
5432 'QA': 'Qatar',
5433 'RE': 'Réunion',
5434 'RO': 'Romania',
5435 'RU': 'Russian Federation',
5436 'RW': 'Rwanda',
5437 'BL': 'Saint Barthélemy',
5438 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
5439 'KN': 'Saint Kitts and Nevis',
5440 'LC': 'Saint Lucia',
5441 'MF': 'Saint Martin (French part)',
5442 'PM': 'Saint Pierre and Miquelon',
5443 'VC': 'Saint Vincent and the Grenadines',
5444 'WS': 'Samoa',
5445 'SM': 'San Marino',
5446 'ST': 'Sao Tome and Principe',
5447 'SA': 'Saudi Arabia',
5448 'SN': 'Senegal',
5449 'RS': 'Serbia',
5450 'SC': 'Seychelles',
5451 'SL': 'Sierra Leone',
5452 'SG': 'Singapore',
5453 'SX': 'Sint Maarten (Dutch part)',
5454 'SK': 'Slovakia',
5455 'SI': 'Slovenia',
5456 'SB': 'Solomon Islands',
5457 'SO': 'Somalia',
5458 'ZA': 'South Africa',
5459 'GS': 'South Georgia and the South Sandwich Islands',
5460 'SS': 'South Sudan',
5461 'ES': 'Spain',
5462 'LK': 'Sri Lanka',
5463 'SD': 'Sudan',
5464 'SR': 'Suriname',
5465 'SJ': 'Svalbard and Jan Mayen',
5466 'SZ': 'Swaziland',
5467 'SE': 'Sweden',
5468 'CH': 'Switzerland',
5469 'SY': 'Syrian Arab Republic',
5470 'TW': 'Taiwan, Province of China',
5471 'TJ': 'Tajikistan',
5472 'TZ': 'Tanzania, United Republic of',
5473 'TH': 'Thailand',
5474 'TL': 'Timor-Leste',
5475 'TG': 'Togo',
5476 'TK': 'Tokelau',
5477 'TO': 'Tonga',
5478 'TT': 'Trinidad and Tobago',
5479 'TN': 'Tunisia',
5480 'TR': 'Turkey',
5481 'TM': 'Turkmenistan',
5482 'TC': 'Turks and Caicos Islands',
5483 'TV': 'Tuvalu',
5484 'UG': 'Uganda',
5485 'UA': 'Ukraine',
5486 'AE': 'United Arab Emirates',
5487 'GB': 'United Kingdom',
5488 'US': 'United States',
5489 'UM': 'United States Minor Outlying Islands',
5490 'UY': 'Uruguay',
5491 'UZ': 'Uzbekistan',
5492 'VU': 'Vanuatu',
5493 'VE': 'Venezuela, Bolivarian Republic of',
5494 'VN': 'Viet Nam',
5495 'VG': 'Virgin Islands, British',
5496 'VI': 'Virgin Islands, U.S.',
5497 'WF': 'Wallis and Futuna',
5498 'EH': 'Western Sahara',
5499 'YE': 'Yemen',
5500 'ZM': 'Zambia',
5501 'ZW': 'Zimbabwe',
5502 }
5503
5504 @classmethod
5505 def short2full(cls, code):
5506 """Convert an ISO 3166-2 country code to the corresponding full name"""
5507 return cls._country_map.get(code.upper())
5508
5509
5510 class GeoUtils(object):
5511 # Major IPv4 address blocks per country
5512 _country_ip_map = {
5513 'AD': '46.172.224.0/19',
5514 'AE': '94.200.0.0/13',
5515 'AF': '149.54.0.0/17',
5516 'AG': '209.59.64.0/18',
5517 'AI': '204.14.248.0/21',
5518 'AL': '46.99.0.0/16',
5519 'AM': '46.70.0.0/15',
5520 'AO': '105.168.0.0/13',
5521 'AP': '182.50.184.0/21',
5522 'AQ': '23.154.160.0/24',
5523 'AR': '181.0.0.0/12',
5524 'AS': '202.70.112.0/20',
5525 'AT': '77.116.0.0/14',
5526 'AU': '1.128.0.0/11',
5527 'AW': '181.41.0.0/18',
5528 'AX': '185.217.4.0/22',
5529 'AZ': '5.197.0.0/16',
5530 'BA': '31.176.128.0/17',
5531 'BB': '65.48.128.0/17',
5532 'BD': '114.130.0.0/16',
5533 'BE': '57.0.0.0/8',
5534 'BF': '102.178.0.0/15',
5535 'BG': '95.42.0.0/15',
5536 'BH': '37.131.0.0/17',
5537 'BI': '154.117.192.0/18',
5538 'BJ': '137.255.0.0/16',
5539 'BL': '185.212.72.0/23',
5540 'BM': '196.12.64.0/18',
5541 'BN': '156.31.0.0/16',
5542 'BO': '161.56.0.0/16',
5543 'BQ': '161.0.80.0/20',
5544 'BR': '191.128.0.0/12',
5545 'BS': '24.51.64.0/18',
5546 'BT': '119.2.96.0/19',
5547 'BW': '168.167.0.0/16',
5548 'BY': '178.120.0.0/13',
5549 'BZ': '179.42.192.0/18',
5550 'CA': '99.224.0.0/11',
5551 'CD': '41.243.0.0/16',
5552 'CF': '197.242.176.0/21',
5553 'CG': '160.113.0.0/16',
5554 'CH': '85.0.0.0/13',
5555 'CI': '102.136.0.0/14',
5556 'CK': '202.65.32.0/19',
5557 'CL': '152.172.0.0/14',
5558 'CM': '102.244.0.0/14',
5559 'CN': '36.128.0.0/10',
5560 'CO': '181.240.0.0/12',
5561 'CR': '201.192.0.0/12',
5562 'CU': '152.206.0.0/15',
5563 'CV': '165.90.96.0/19',
5564 'CW': '190.88.128.0/17',
5565 'CY': '31.153.0.0/16',
5566 'CZ': '88.100.0.0/14',
5567 'DE': '53.0.0.0/8',
5568 'DJ': '197.241.0.0/17',
5569 'DK': '87.48.0.0/12',
5570 'DM': '192.243.48.0/20',
5571 'DO': '152.166.0.0/15',
5572 'DZ': '41.96.0.0/12',
5573 'EC': '186.68.0.0/15',
5574 'EE': '90.190.0.0/15',
5575 'EG': '156.160.0.0/11',
5576 'ER': '196.200.96.0/20',
5577 'ES': '88.0.0.0/11',
5578 'ET': '196.188.0.0/14',
5579 'EU': '2.16.0.0/13',
5580 'FI': '91.152.0.0/13',
5581 'FJ': '144.120.0.0/16',
5582 'FK': '80.73.208.0/21',
5583 'FM': '119.252.112.0/20',
5584 'FO': '88.85.32.0/19',
5585 'FR': '90.0.0.0/9',
5586 'GA': '41.158.0.0/15',
5587 'GB': '25.0.0.0/8',
5588 'GD': '74.122.88.0/21',
5589 'GE': '31.146.0.0/16',
5590 'GF': '161.22.64.0/18',
5591 'GG': '62.68.160.0/19',
5592 'GH': '154.160.0.0/12',
5593 'GI': '95.164.0.0/16',
5594 'GL': '88.83.0.0/19',
5595 'GM': '160.182.0.0/15',
5596 'GN': '197.149.192.0/18',
5597 'GP': '104.250.0.0/19',
5598 'GQ': '105.235.224.0/20',
5599 'GR': '94.64.0.0/13',
5600 'GT': '168.234.0.0/16',
5601 'GU': '168.123.0.0/16',
5602 'GW': '197.214.80.0/20',
5603 'GY': '181.41.64.0/18',
5604 'HK': '113.252.0.0/14',
5605 'HN': '181.210.0.0/16',
5606 'HR': '93.136.0.0/13',
5607 'HT': '148.102.128.0/17',
5608 'HU': '84.0.0.0/14',
5609 'ID': '39.192.0.0/10',
5610 'IE': '87.32.0.0/12',
5611 'IL': '79.176.0.0/13',
5612 'IM': '5.62.80.0/20',
5613 'IN': '117.192.0.0/10',
5614 'IO': '203.83.48.0/21',
5615 'IQ': '37.236.0.0/14',
5616 'IR': '2.176.0.0/12',
5617 'IS': '82.221.0.0/16',
5618 'IT': '79.0.0.0/10',
5619 'JE': '87.244.64.0/18',
5620 'JM': '72.27.0.0/17',
5621 'JO': '176.29.0.0/16',
5622 'JP': '133.0.0.0/8',
5623 'KE': '105.48.0.0/12',
5624 'KG': '158.181.128.0/17',
5625 'KH': '36.37.128.0/17',
5626 'KI': '103.25.140.0/22',
5627 'KM': '197.255.224.0/20',
5628 'KN': '198.167.192.0/19',
5629 'KP': '175.45.176.0/22',
5630 'KR': '175.192.0.0/10',
5631 'KW': '37.36.0.0/14',
5632 'KY': '64.96.0.0/15',
5633 'KZ': '2.72.0.0/13',
5634 'LA': '115.84.64.0/18',
5635 'LB': '178.135.0.0/16',
5636 'LC': '24.92.144.0/20',
5637 'LI': '82.117.0.0/19',
5638 'LK': '112.134.0.0/15',
5639 'LR': '102.183.0.0/16',
5640 'LS': '129.232.0.0/17',
5641 'LT': '78.56.0.0/13',
5642 'LU': '188.42.0.0/16',
5643 'LV': '46.109.0.0/16',
5644 'LY': '41.252.0.0/14',
5645 'MA': '105.128.0.0/11',
5646 'MC': '88.209.64.0/18',
5647 'MD': '37.246.0.0/16',
5648 'ME': '178.175.0.0/17',
5649 'MF': '74.112.232.0/21',
5650 'MG': '154.126.0.0/17',
5651 'MH': '117.103.88.0/21',
5652 'MK': '77.28.0.0/15',
5653 'ML': '154.118.128.0/18',
5654 'MM': '37.111.0.0/17',
5655 'MN': '49.0.128.0/17',
5656 'MO': '60.246.0.0/16',
5657 'MP': '202.88.64.0/20',
5658 'MQ': '109.203.224.0/19',
5659 'MR': '41.188.64.0/18',
5660 'MS': '208.90.112.0/22',
5661 'MT': '46.11.0.0/16',
5662 'MU': '105.16.0.0/12',
5663 'MV': '27.114.128.0/18',
5664 'MW': '102.70.0.0/15',
5665 'MX': '187.192.0.0/11',
5666 'MY': '175.136.0.0/13',
5667 'MZ': '197.218.0.0/15',
5668 'NA': '41.182.0.0/16',
5669 'NC': '101.101.0.0/18',
5670 'NE': '197.214.0.0/18',
5671 'NF': '203.17.240.0/22',
5672 'NG': '105.112.0.0/12',
5673 'NI': '186.76.0.0/15',
5674 'NL': '145.96.0.0/11',
5675 'NO': '84.208.0.0/13',
5676 'NP': '36.252.0.0/15',
5677 'NR': '203.98.224.0/19',
5678 'NU': '49.156.48.0/22',
5679 'NZ': '49.224.0.0/14',
5680 'OM': '5.36.0.0/15',
5681 'PA': '186.72.0.0/15',
5682 'PE': '186.160.0.0/14',
5683 'PF': '123.50.64.0/18',
5684 'PG': '124.240.192.0/19',
5685 'PH': '49.144.0.0/13',
5686 'PK': '39.32.0.0/11',
5687 'PL': '83.0.0.0/11',
5688 'PM': '70.36.0.0/20',
5689 'PR': '66.50.0.0/16',
5690 'PS': '188.161.0.0/16',
5691 'PT': '85.240.0.0/13',
5692 'PW': '202.124.224.0/20',
5693 'PY': '181.120.0.0/14',
5694 'QA': '37.210.0.0/15',
5695 'RE': '102.35.0.0/16',
5696 'RO': '79.112.0.0/13',
5697 'RS': '93.86.0.0/15',
5698 'RU': '5.136.0.0/13',
5699 'RW': '41.186.0.0/16',
5700 'SA': '188.48.0.0/13',
5701 'SB': '202.1.160.0/19',
5702 'SC': '154.192.0.0/11',
5703 'SD': '102.120.0.0/13',
5704 'SE': '78.64.0.0/12',
5705 'SG': '8.128.0.0/10',
5706 'SI': '188.196.0.0/14',
5707 'SK': '78.98.0.0/15',
5708 'SL': '102.143.0.0/17',
5709 'SM': '89.186.32.0/19',
5710 'SN': '41.82.0.0/15',
5711 'SO': '154.115.192.0/18',
5712 'SR': '186.179.128.0/17',
5713 'SS': '105.235.208.0/21',
5714 'ST': '197.159.160.0/19',
5715 'SV': '168.243.0.0/16',
5716 'SX': '190.102.0.0/20',
5717 'SY': '5.0.0.0/16',
5718 'SZ': '41.84.224.0/19',
5719 'TC': '65.255.48.0/20',
5720 'TD': '154.68.128.0/19',
5721 'TG': '196.168.0.0/14',
5722 'TH': '171.96.0.0/13',
5723 'TJ': '85.9.128.0/18',
5724 'TK': '27.96.24.0/21',
5725 'TL': '180.189.160.0/20',
5726 'TM': '95.85.96.0/19',
5727 'TN': '197.0.0.0/11',
5728 'TO': '175.176.144.0/21',
5729 'TR': '78.160.0.0/11',
5730 'TT': '186.44.0.0/15',
5731 'TV': '202.2.96.0/19',
5732 'TW': '120.96.0.0/11',
5733 'TZ': '156.156.0.0/14',
5734 'UA': '37.52.0.0/14',
5735 'UG': '102.80.0.0/13',
5736 'US': '6.0.0.0/8',
5737 'UY': '167.56.0.0/13',
5738 'UZ': '84.54.64.0/18',
5739 'VA': '212.77.0.0/19',
5740 'VC': '207.191.240.0/21',
5741 'VE': '186.88.0.0/13',
5742 'VG': '66.81.192.0/20',
5743 'VI': '146.226.0.0/16',
5744 'VN': '14.160.0.0/11',
5745 'VU': '202.80.32.0/20',
5746 'WF': '117.20.32.0/21',
5747 'WS': '202.4.32.0/19',
5748 'YE': '134.35.0.0/16',
5749 'YT': '41.242.116.0/22',
5750 'ZA': '41.0.0.0/11',
5751 'ZM': '102.144.0.0/13',
5752 'ZW': '102.177.192.0/18',
5753 }
5754
5755 @classmethod
5756 def random_ipv4(cls, code_or_block):
5757 if len(code_or_block) == 2:
5758 block = cls._country_ip_map.get(code_or_block.upper())
5759 if not block:
5760 return None
5761 else:
5762 block = code_or_block
5763 addr, preflen = block.split('/')
5764 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
5765 addr_max = addr_min | (0xffffffff >> int(preflen))
5766 return compat_str(socket.inet_ntoa(
5767 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
5768
5769
5770 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
5771 def __init__(self, proxies=None):
5772 # Set default handlers
5773 for type in ('http', 'https'):
5774 setattr(self, '%s_open' % type,
5775 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
5776 meth(r, proxy, type))
5777 compat_urllib_request.ProxyHandler.__init__(self, proxies)
5778
5779 def proxy_open(self, req, proxy, type):
5780 req_proxy = req.headers.get('Ytdl-request-proxy')
5781 if req_proxy is not None:
5782 proxy = req_proxy
5783 del req.headers['Ytdl-request-proxy']
5784
5785 if proxy == '__noproxy__':
5786 return None # No Proxy
5787 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
5788 req.add_header('Ytdl-socks-proxy', proxy)
5789 # yt-dlp's http/https handlers do wrapping the socket with socks
5790 return None
5791 return compat_urllib_request.ProxyHandler.proxy_open(
5792 self, req, proxy, type)
5793
5794
5795 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
5796 # released into Public Domain
5797 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
5798
5799 def long_to_bytes(n, blocksize=0):
5800 """long_to_bytes(n:long, blocksize:int) : string
5801 Convert a long integer to a byte string.
5802
5803 If optional blocksize is given and greater than zero, pad the front of the
5804 byte string with binary zeros so that the length is a multiple of
5805 blocksize.
5806 """
5807 # after much testing, this algorithm was deemed to be the fastest
5808 s = b''
5809 n = int(n)
5810 while n > 0:
5811 s = compat_struct_pack('>I', n & 0xffffffff) + s
5812 n = n >> 32
5813 # strip off leading zeros
5814 for i in range(len(s)):
5815 if s[i] != b'\000'[0]:
5816 break
5817 else:
5818 # only happens when n == 0
5819 s = b'\000'
5820 i = 0
5821 s = s[i:]
5822 # add back some pad bytes. this could be done more efficiently w.r.t. the
5823 # de-padding being done above, but sigh...
5824 if blocksize > 0 and len(s) % blocksize:
5825 s = (blocksize - len(s) % blocksize) * b'\000' + s
5826 return s
5827
5828
5829 def bytes_to_long(s):
5830 """bytes_to_long(string) : long
5831 Convert a byte string to a long integer.
5832
5833 This is (essentially) the inverse of long_to_bytes().
5834 """
5835 acc = 0
5836 length = len(s)
5837 if length % 4:
5838 extra = (4 - length % 4)
5839 s = b'\000' * extra + s
5840 length = length + extra
5841 for i in range(0, length, 4):
5842 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
5843 return acc
5844
5845
5846 def ohdave_rsa_encrypt(data, exponent, modulus):
5847 '''
5848 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
5849
5850 Input:
5851 data: data to encrypt, bytes-like object
5852 exponent, modulus: parameter e and N of RSA algorithm, both integer
5853 Output: hex string of encrypted data
5854
5855 Limitation: supports one block encryption only
5856 '''
5857
5858 payload = int(binascii.hexlify(data[::-1]), 16)
5859 encrypted = pow(payload, exponent, modulus)
5860 return '%x' % encrypted
5861
5862
5863 def pkcs1pad(data, length):
5864 """
5865 Padding input data with PKCS#1 scheme
5866
5867 @param {int[]} data input data
5868 @param {int} length target length
5869 @returns {int[]} padded data
5870 """
5871 if len(data) > length - 11:
5872 raise ValueError('Input data too long for PKCS#1 padding')
5873
5874 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
5875 return [0, 2] + pseudo_random + [0] + data
5876
5877
5878 def encode_base_n(num, n, table=None):
5879 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
5880 if not table:
5881 table = FULL_TABLE[:n]
5882
5883 if n > len(table):
5884 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
5885
5886 if num == 0:
5887 return table[0]
5888
5889 ret = ''
5890 while num:
5891 ret = table[num % n] + ret
5892 num = num // n
5893 return ret
5894
5895
5896 def decode_packed_codes(code):
5897 mobj = re.search(PACKED_CODES_RE, code)
5898 obfuscated_code, base, count, symbols = mobj.groups()
5899 base = int(base)
5900 count = int(count)
5901 symbols = symbols.split('|')
5902 symbol_table = {}
5903
5904 while count:
5905 count -= 1
5906 base_n_count = encode_base_n(count, base)
5907 symbol_table[base_n_count] = symbols[count] or base_n_count
5908
5909 return re.sub(
5910 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
5911 obfuscated_code)
5912
5913
5914 def caesar(s, alphabet, shift):
5915 if shift == 0:
5916 return s
5917 l = len(alphabet)
5918 return ''.join(
5919 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
5920 for c in s)
5921
5922
5923 def rot47(s):
5924 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
5925
5926
5927 def parse_m3u8_attributes(attrib):
5928 info = {}
5929 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
5930 if val.startswith('"'):
5931 val = val[1:-1]
5932 info[key] = val
5933 return info
5934
5935
5936 def urshift(val, n):
5937 return val >> n if val >= 0 else (val + 0x100000000) >> n
5938
5939
5940 # Based on png2str() written by @gdkchan and improved by @yokrysty
5941 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
5942 def decode_png(png_data):
5943 # Reference: https://www.w3.org/TR/PNG/
5944 header = png_data[8:]
5945
5946 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
5947 raise IOError('Not a valid PNG file.')
5948
5949 int_map = {1: '>B', 2: '>H', 4: '>I'}
5950 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
5951
5952 chunks = []
5953
5954 while header:
5955 length = unpack_integer(header[:4])
5956 header = header[4:]
5957
5958 chunk_type = header[:4]
5959 header = header[4:]
5960
5961 chunk_data = header[:length]
5962 header = header[length:]
5963
5964 header = header[4:] # Skip CRC
5965
5966 chunks.append({
5967 'type': chunk_type,
5968 'length': length,
5969 'data': chunk_data
5970 })
5971
5972 ihdr = chunks[0]['data']
5973
5974 width = unpack_integer(ihdr[:4])
5975 height = unpack_integer(ihdr[4:8])
5976
5977 idat = b''
5978
5979 for chunk in chunks:
5980 if chunk['type'] == b'IDAT':
5981 idat += chunk['data']
5982
5983 if not idat:
5984 raise IOError('Unable to read PNG data.')
5985
5986 decompressed_data = bytearray(zlib.decompress(idat))
5987
5988 stride = width * 3
5989 pixels = []
5990
5991 def _get_pixel(idx):
5992 x = idx % stride
5993 y = idx // stride
5994 return pixels[y][x]
5995
5996 for y in range(height):
5997 basePos = y * (1 + stride)
5998 filter_type = decompressed_data[basePos]
5999
6000 current_row = []
6001
6002 pixels.append(current_row)
6003
6004 for x in range(stride):
6005 color = decompressed_data[1 + basePos + x]
6006 basex = y * stride + x
6007 left = 0
6008 up = 0
6009
6010 if x > 2:
6011 left = _get_pixel(basex - 3)
6012 if y > 0:
6013 up = _get_pixel(basex - stride)
6014
6015 if filter_type == 1: # Sub
6016 color = (color + left) & 0xff
6017 elif filter_type == 2: # Up
6018 color = (color + up) & 0xff
6019 elif filter_type == 3: # Average
6020 color = (color + ((left + up) >> 1)) & 0xff
6021 elif filter_type == 4: # Paeth
6022 a = left
6023 b = up
6024 c = 0
6025
6026 if x > 2 and y > 0:
6027 c = _get_pixel(basex - stride - 3)
6028
6029 p = a + b - c
6030
6031 pa = abs(p - a)
6032 pb = abs(p - b)
6033 pc = abs(p - c)
6034
6035 if pa <= pb and pa <= pc:
6036 color = (color + a) & 0xff
6037 elif pb <= pc:
6038 color = (color + b) & 0xff
6039 else:
6040 color = (color + c) & 0xff
6041
6042 current_row.append(color)
6043
6044 return width, height, pixels
6045
6046
6047 def write_xattr(path, key, value):
6048 # This mess below finds the best xattr tool for the job
6049 try:
6050 # try the pyxattr module...
6051 import xattr
6052
6053 if hasattr(xattr, 'set'): # pyxattr
6054 # Unicode arguments are not supported in python-pyxattr until
6055 # version 0.5.0
6056 # See https://github.com/ytdl-org/youtube-dl/issues/5498
6057 pyxattr_required_version = '0.5.0'
6058 if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
6059 # TODO: fallback to CLI tools
6060 raise XAttrUnavailableError(
6061 'python-pyxattr is detected but is too old. '
6062 'yt-dlp requires %s or above while your version is %s. '
6063 'Falling back to other xattr implementations' % (
6064 pyxattr_required_version, xattr.__version__))
6065
6066 setxattr = xattr.set
6067 else: # xattr
6068 setxattr = xattr.setxattr
6069
6070 try:
6071 setxattr(path, key, value)
6072 except EnvironmentError as e:
6073 raise XAttrMetadataError(e.errno, e.strerror)
6074
6075 except ImportError:
6076 if compat_os_name == 'nt':
6077 # Write xattrs to NTFS Alternate Data Streams:
6078 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
6079 assert ':' not in key
6080 assert os.path.exists(path)
6081
6082 ads_fn = path + ':' + key
6083 try:
6084 with open(ads_fn, 'wb') as f:
6085 f.write(value)
6086 except EnvironmentError as e:
6087 raise XAttrMetadataError(e.errno, e.strerror)
6088 else:
6089 user_has_setfattr = check_executable('setfattr', ['--version'])
6090 user_has_xattr = check_executable('xattr', ['-h'])
6091
6092 if user_has_setfattr or user_has_xattr:
6093
6094 value = value.decode('utf-8')
6095 if user_has_setfattr:
6096 executable = 'setfattr'
6097 opts = ['-n', key, '-v', value]
6098 elif user_has_xattr:
6099 executable = 'xattr'
6100 opts = ['-w', key, value]
6101
6102 cmd = ([encodeFilename(executable, True)]
6103 + [encodeArgument(o) for o in opts]
6104 + [encodeFilename(path, True)])
6105
6106 try:
6107 p = subprocess.Popen(
6108 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
6109 except EnvironmentError as e:
6110 raise XAttrMetadataError(e.errno, e.strerror)
6111 stdout, stderr = process_communicate_or_kill(p)
6112 stderr = stderr.decode('utf-8', 'replace')
6113 if p.returncode != 0:
6114 raise XAttrMetadataError(p.returncode, stderr)
6115
6116 else:
6117 # On Unix, and can't find pyxattr, setfattr, or xattr.
6118 if sys.platform.startswith('linux'):
6119 raise XAttrUnavailableError(
6120 "Couldn't find a tool to set the xattrs. "
6121 "Install either the python 'pyxattr' or 'xattr' "
6122 "modules, or the GNU 'attr' package "
6123 "(which contains the 'setfattr' tool).")
6124 else:
6125 raise XAttrUnavailableError(
6126 "Couldn't find a tool to set the xattrs. "
6127 "Install either the python 'xattr' module, "
6128 "or the 'xattr' binary.")
6129
6130
6131 def random_birthday(year_field, month_field, day_field):
6132 start_date = datetime.date(1950, 1, 1)
6133 end_date = datetime.date(1995, 12, 31)
6134 offset = random.randint(0, (end_date - start_date).days)
6135 random_date = start_date + datetime.timedelta(offset)
6136 return {
6137 year_field: str(random_date.year),
6138 month_field: str(random_date.month),
6139 day_field: str(random_date.day),
6140 }
6141
6142
6143 # Templates for internet shortcut files, which are plain text files.
6144 DOT_URL_LINK_TEMPLATE = '''
6145 [InternetShortcut]
6146 URL=%(url)s
6147 '''.lstrip()
6148
6149 DOT_WEBLOC_LINK_TEMPLATE = '''
6150 <?xml version="1.0" encoding="UTF-8"?>
6151 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
6152 <plist version="1.0">
6153 <dict>
6154 \t<key>URL</key>
6155 \t<string>%(url)s</string>
6156 </dict>
6157 </plist>
6158 '''.lstrip()
6159
6160 DOT_DESKTOP_LINK_TEMPLATE = '''
6161 [Desktop Entry]
6162 Encoding=UTF-8
6163 Name=%(filename)s
6164 Type=Link
6165 URL=%(url)s
6166 Icon=text-html
6167 '''.lstrip()
6168
6169
6170 def iri_to_uri(iri):
6171 """
6172 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
6173
6174 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
6175 """
6176
6177 iri_parts = compat_urllib_parse_urlparse(iri)
6178
6179 if '[' in iri_parts.netloc:
6180 raise ValueError('IPv6 URIs are not, yet, supported.')
6181 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
6182
6183 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
6184
6185 net_location = ''
6186 if iri_parts.username:
6187 net_location += compat_urllib_parse_quote(iri_parts.username, safe=r"!$%&'()*+,~")
6188 if iri_parts.password is not None:
6189 net_location += ':' + compat_urllib_parse_quote(iri_parts.password, safe=r"!$%&'()*+,~")
6190 net_location += '@'
6191
6192 net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
6193 # The 'idna' encoding produces ASCII text.
6194 if iri_parts.port is not None and iri_parts.port != 80:
6195 net_location += ':' + str(iri_parts.port)
6196
6197 return compat_urllib_parse_urlunparse(
6198 (iri_parts.scheme,
6199 net_location,
6200
6201 compat_urllib_parse_quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
6202
6203 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
6204 compat_urllib_parse_quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
6205
6206 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
6207 compat_urllib_parse_quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
6208
6209 compat_urllib_parse_quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
6210
6211 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
6212
6213
6214 def to_high_limit_path(path):
6215 if sys.platform in ['win32', 'cygwin']:
6216 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
6217 return r'\\?\ '.rstrip() + os.path.abspath(path)
6218
6219 return path
6220
6221
6222 def format_field(obj, field=None, template='%s', ignore=(None, ''), default='', func=None):
6223 if field is None:
6224 val = obj if obj is not None else default
6225 else:
6226 val = obj.get(field, default)
6227 if func and val not in ignore:
6228 val = func(val)
6229 return template % val if val not in ignore else default
6230
6231
6232 def clean_podcast_url(url):
6233 return re.sub(r'''(?x)
6234 (?:
6235 (?:
6236 chtbl\.com/track|
6237 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
6238 play\.podtrac\.com
6239 )/[^/]+|
6240 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
6241 flex\.acast\.com|
6242 pd(?:
6243 cn\.co| # https://podcorn.com/analytics-prefix/
6244 st\.fm # https://podsights.com/docs/
6245 )/e
6246 )/''', '', url)
6247
6248
6249 _HEX_TABLE = '0123456789abcdef'
6250
6251
6252 def random_uuidv4():
6253 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
6254
6255
6256 def make_dir(path, to_screen=None):
6257 try:
6258 dn = os.path.dirname(path)
6259 if dn and not os.path.exists(dn):
6260 os.makedirs(dn)
6261 return True
6262 except (OSError, IOError) as err:
6263 if callable(to_screen) is not None:
6264 to_screen('unable to create directory ' + error_to_compat_str(err))
6265 return False
6266
6267
6268 def get_executable_path():
6269 from zipimport import zipimporter
6270 if hasattr(sys, 'frozen'): # Running from PyInstaller
6271 path = os.path.dirname(sys.executable)
6272 elif isinstance(globals().get('__loader__'), zipimporter): # Running from ZIP
6273 path = os.path.join(os.path.dirname(__file__), '../..')
6274 else:
6275 path = os.path.join(os.path.dirname(__file__), '..')
6276 return os.path.abspath(path)
6277
6278
6279 def load_plugins(name, suffix, namespace):
6280 plugin_info = [None]
6281 classes = {}
6282 try:
6283 plugin_info = imp.find_module(
6284 name, [os.path.join(get_executable_path(), 'ytdlp_plugins')])
6285 plugins = imp.load_module(name, *plugin_info)
6286 for name in dir(plugins):
6287 if name in namespace:
6288 continue
6289 if not name.endswith(suffix):
6290 continue
6291 klass = getattr(plugins, name)
6292 classes[name] = namespace[name] = klass
6293 except ImportError:
6294 pass
6295 finally:
6296 if plugin_info[0] is not None:
6297 plugin_info[0].close()
6298 return classes
6299
6300
6301 def traverse_obj(
6302 obj, *path_list, default=None, expected_type=None, get_all=True,
6303 casesense=True, is_user_input=False, traverse_string=False):
6304 ''' Traverse nested list/dict/tuple
6305 @param path_list A list of paths which are checked one by one.
6306 Each path is a list of keys where each key is a string,
6307 a tuple of strings or "...". When a tuple is given,
6308 all the keys given in the tuple are traversed, and
6309 "..." traverses all the keys in the object
6310 @param default Default value to return
6311 @param expected_type Only accept final value of this type (Can also be any callable)
6312 @param get_all Return all the values obtained from a path or only the first one
6313 @param casesense Whether to consider dictionary keys as case sensitive
6314 @param is_user_input Whether the keys are generated from user input. If True,
6315 strings are converted to int/slice if necessary
6316 @param traverse_string Whether to traverse inside strings. If True, any
6317 non-compatible object will also be converted into a string
6318 # TODO: Write tests
6319 '''
6320 if not casesense:
6321 _lower = lambda k: (k.lower() if isinstance(k, str) else k)
6322 path_list = (map(_lower, variadic(path)) for path in path_list)
6323
6324 def _traverse_obj(obj, path, _current_depth=0):
6325 nonlocal depth
6326 if obj is None:
6327 return None
6328 path = tuple(variadic(path))
6329 for i, key in enumerate(path):
6330 if isinstance(key, (list, tuple)):
6331 obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
6332 key = ...
6333 if key is ...:
6334 obj = (obj.values() if isinstance(obj, dict)
6335 else obj if isinstance(obj, (list, tuple, LazyList))
6336 else str(obj) if traverse_string else [])
6337 _current_depth += 1
6338 depth = max(depth, _current_depth)
6339 return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
6340 elif isinstance(obj, dict) and not (is_user_input and key == ':'):
6341 obj = (obj.get(key) if casesense or (key in obj)
6342 else next((v for k, v in obj.items() if _lower(k) == key), None))
6343 else:
6344 if is_user_input:
6345 key = (int_or_none(key) if ':' not in key
6346 else slice(*map(int_or_none, key.split(':'))))
6347 if key == slice(None):
6348 return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth)
6349 if not isinstance(key, (int, slice)):
6350 return None
6351 if not isinstance(obj, (list, tuple, LazyList)):
6352 if not traverse_string:
6353 return None
6354 obj = str(obj)
6355 try:
6356 obj = obj[key]
6357 except IndexError:
6358 return None
6359 return obj
6360
6361 if isinstance(expected_type, type):
6362 type_test = lambda val: val if isinstance(val, expected_type) else None
6363 elif expected_type is not None:
6364 type_test = expected_type
6365 else:
6366 type_test = lambda val: val
6367
6368 for path in path_list:
6369 depth = 0
6370 val = _traverse_obj(obj, path)
6371 if val is not None:
6372 if depth:
6373 for _ in range(depth - 1):
6374 val = itertools.chain.from_iterable(v for v in val if v is not None)
6375 val = [v for v in map(type_test, val) if v is not None]
6376 if val:
6377 return val if get_all else val[0]
6378 else:
6379 val = type_test(val)
6380 if val is not None:
6381 return val
6382 return default
6383
6384
6385 def traverse_dict(dictn, keys, casesense=True):
6386 ''' For backward compatibility. Do not use '''
6387 return traverse_obj(dictn, keys, casesense=casesense,
6388 is_user_input=True, traverse_string=True)
6389
6390
6391 def variadic(x, allowed_types=(str, bytes)):
6392 return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
6393
6394
6395 # create a JSON Web Signature (jws) with HS256 algorithm
6396 # the resulting format is in JWS Compact Serialization
6397 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
6398 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
6399 def jwt_encode_hs256(payload_data, key, headers={}):
6400 header_data = {
6401 'alg': 'HS256',
6402 'typ': 'JWT',
6403 }
6404 if headers:
6405 header_data.update(headers)
6406 header_b64 = base64.b64encode(json.dumps(header_data).encode('utf-8'))
6407 payload_b64 = base64.b64encode(json.dumps(payload_data).encode('utf-8'))
6408 h = hmac.new(key.encode('utf-8'), header_b64 + b'.' + payload_b64, hashlib.sha256)
6409 signature_b64 = base64.b64encode(h.digest())
6410 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
6411 return token