]> jfr.im git - yt-dlp.git/blob - yt_dlp/utils.py
[build] Allow building with py2exe (and misc fixes)
[yt-dlp.git] / yt_dlp / utils.py
1 #!/usr/bin/env python3
2 # coding: utf-8
3
4 from __future__ import unicode_literals
5
6 import base64
7 import binascii
8 import calendar
9 import codecs
10 import collections
11 import contextlib
12 import ctypes
13 import datetime
14 import email.utils
15 import email.header
16 import errno
17 import functools
18 import gzip
19 import hashlib
20 import hmac
21 import imp
22 import io
23 import itertools
24 import json
25 import locale
26 import math
27 import operator
28 import os
29 import platform
30 import random
31 import re
32 import socket
33 import ssl
34 import subprocess
35 import sys
36 import tempfile
37 import time
38 import traceback
39 import xml.etree.ElementTree
40 import zlib
41
42 from .compat import (
43 compat_HTMLParseError,
44 compat_HTMLParser,
45 compat_HTTPError,
46 compat_basestring,
47 compat_chr,
48 compat_cookiejar,
49 compat_ctypes_WINFUNCTYPE,
50 compat_etree_fromstring,
51 compat_expanduser,
52 compat_html_entities,
53 compat_html_entities_html5,
54 compat_http_client,
55 compat_integer_types,
56 compat_numeric_types,
57 compat_kwargs,
58 compat_os_name,
59 compat_parse_qs,
60 compat_shlex_quote,
61 compat_str,
62 compat_struct_pack,
63 compat_struct_unpack,
64 compat_urllib_error,
65 compat_urllib_parse,
66 compat_urllib_parse_urlencode,
67 compat_urllib_parse_urlparse,
68 compat_urllib_parse_urlunparse,
69 compat_urllib_parse_quote,
70 compat_urllib_parse_quote_plus,
71 compat_urllib_parse_unquote_plus,
72 compat_urllib_request,
73 compat_urlparse,
74 compat_xpath,
75 )
76
77 from .socks import (
78 ProxyType,
79 sockssocket,
80 )
81
82
83 def register_socks_protocols():
84 # "Register" SOCKS protocols
85 # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
86 # URLs with protocols not in urlparse.uses_netloc are not handled correctly
87 for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
88 if scheme not in compat_urlparse.uses_netloc:
89 compat_urlparse.uses_netloc.append(scheme)
90
91
92 # This is not clearly defined otherwise
93 compiled_regex_type = type(re.compile(''))
94
95
96 def random_user_agent():
97 _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
98 _CHROME_VERSIONS = (
99 '74.0.3729.129',
100 '76.0.3780.3',
101 '76.0.3780.2',
102 '74.0.3729.128',
103 '76.0.3780.1',
104 '76.0.3780.0',
105 '75.0.3770.15',
106 '74.0.3729.127',
107 '74.0.3729.126',
108 '76.0.3779.1',
109 '76.0.3779.0',
110 '75.0.3770.14',
111 '74.0.3729.125',
112 '76.0.3778.1',
113 '76.0.3778.0',
114 '75.0.3770.13',
115 '74.0.3729.124',
116 '74.0.3729.123',
117 '73.0.3683.121',
118 '76.0.3777.1',
119 '76.0.3777.0',
120 '75.0.3770.12',
121 '74.0.3729.122',
122 '76.0.3776.4',
123 '75.0.3770.11',
124 '74.0.3729.121',
125 '76.0.3776.3',
126 '76.0.3776.2',
127 '73.0.3683.120',
128 '74.0.3729.120',
129 '74.0.3729.119',
130 '74.0.3729.118',
131 '76.0.3776.1',
132 '76.0.3776.0',
133 '76.0.3775.5',
134 '75.0.3770.10',
135 '74.0.3729.117',
136 '76.0.3775.4',
137 '76.0.3775.3',
138 '74.0.3729.116',
139 '75.0.3770.9',
140 '76.0.3775.2',
141 '76.0.3775.1',
142 '76.0.3775.0',
143 '75.0.3770.8',
144 '74.0.3729.115',
145 '74.0.3729.114',
146 '76.0.3774.1',
147 '76.0.3774.0',
148 '75.0.3770.7',
149 '74.0.3729.113',
150 '74.0.3729.112',
151 '74.0.3729.111',
152 '76.0.3773.1',
153 '76.0.3773.0',
154 '75.0.3770.6',
155 '74.0.3729.110',
156 '74.0.3729.109',
157 '76.0.3772.1',
158 '76.0.3772.0',
159 '75.0.3770.5',
160 '74.0.3729.108',
161 '74.0.3729.107',
162 '76.0.3771.1',
163 '76.0.3771.0',
164 '75.0.3770.4',
165 '74.0.3729.106',
166 '74.0.3729.105',
167 '75.0.3770.3',
168 '74.0.3729.104',
169 '74.0.3729.103',
170 '74.0.3729.102',
171 '75.0.3770.2',
172 '74.0.3729.101',
173 '75.0.3770.1',
174 '75.0.3770.0',
175 '74.0.3729.100',
176 '75.0.3769.5',
177 '75.0.3769.4',
178 '74.0.3729.99',
179 '75.0.3769.3',
180 '75.0.3769.2',
181 '75.0.3768.6',
182 '74.0.3729.98',
183 '75.0.3769.1',
184 '75.0.3769.0',
185 '74.0.3729.97',
186 '73.0.3683.119',
187 '73.0.3683.118',
188 '74.0.3729.96',
189 '75.0.3768.5',
190 '75.0.3768.4',
191 '75.0.3768.3',
192 '75.0.3768.2',
193 '74.0.3729.95',
194 '74.0.3729.94',
195 '75.0.3768.1',
196 '75.0.3768.0',
197 '74.0.3729.93',
198 '74.0.3729.92',
199 '73.0.3683.117',
200 '74.0.3729.91',
201 '75.0.3766.3',
202 '74.0.3729.90',
203 '75.0.3767.2',
204 '75.0.3767.1',
205 '75.0.3767.0',
206 '74.0.3729.89',
207 '73.0.3683.116',
208 '75.0.3766.2',
209 '74.0.3729.88',
210 '75.0.3766.1',
211 '75.0.3766.0',
212 '74.0.3729.87',
213 '73.0.3683.115',
214 '74.0.3729.86',
215 '75.0.3765.1',
216 '75.0.3765.0',
217 '74.0.3729.85',
218 '73.0.3683.114',
219 '74.0.3729.84',
220 '75.0.3764.1',
221 '75.0.3764.0',
222 '74.0.3729.83',
223 '73.0.3683.113',
224 '75.0.3763.2',
225 '75.0.3761.4',
226 '74.0.3729.82',
227 '75.0.3763.1',
228 '75.0.3763.0',
229 '74.0.3729.81',
230 '73.0.3683.112',
231 '75.0.3762.1',
232 '75.0.3762.0',
233 '74.0.3729.80',
234 '75.0.3761.3',
235 '74.0.3729.79',
236 '73.0.3683.111',
237 '75.0.3761.2',
238 '74.0.3729.78',
239 '74.0.3729.77',
240 '75.0.3761.1',
241 '75.0.3761.0',
242 '73.0.3683.110',
243 '74.0.3729.76',
244 '74.0.3729.75',
245 '75.0.3760.0',
246 '74.0.3729.74',
247 '75.0.3759.8',
248 '75.0.3759.7',
249 '75.0.3759.6',
250 '74.0.3729.73',
251 '75.0.3759.5',
252 '74.0.3729.72',
253 '73.0.3683.109',
254 '75.0.3759.4',
255 '75.0.3759.3',
256 '74.0.3729.71',
257 '75.0.3759.2',
258 '74.0.3729.70',
259 '73.0.3683.108',
260 '74.0.3729.69',
261 '75.0.3759.1',
262 '75.0.3759.0',
263 '74.0.3729.68',
264 '73.0.3683.107',
265 '74.0.3729.67',
266 '75.0.3758.1',
267 '75.0.3758.0',
268 '74.0.3729.66',
269 '73.0.3683.106',
270 '74.0.3729.65',
271 '75.0.3757.1',
272 '75.0.3757.0',
273 '74.0.3729.64',
274 '73.0.3683.105',
275 '74.0.3729.63',
276 '75.0.3756.1',
277 '75.0.3756.0',
278 '74.0.3729.62',
279 '73.0.3683.104',
280 '75.0.3755.3',
281 '75.0.3755.2',
282 '73.0.3683.103',
283 '75.0.3755.1',
284 '75.0.3755.0',
285 '74.0.3729.61',
286 '73.0.3683.102',
287 '74.0.3729.60',
288 '75.0.3754.2',
289 '74.0.3729.59',
290 '75.0.3753.4',
291 '74.0.3729.58',
292 '75.0.3754.1',
293 '75.0.3754.0',
294 '74.0.3729.57',
295 '73.0.3683.101',
296 '75.0.3753.3',
297 '75.0.3752.2',
298 '75.0.3753.2',
299 '74.0.3729.56',
300 '75.0.3753.1',
301 '75.0.3753.0',
302 '74.0.3729.55',
303 '73.0.3683.100',
304 '74.0.3729.54',
305 '75.0.3752.1',
306 '75.0.3752.0',
307 '74.0.3729.53',
308 '73.0.3683.99',
309 '74.0.3729.52',
310 '75.0.3751.1',
311 '75.0.3751.0',
312 '74.0.3729.51',
313 '73.0.3683.98',
314 '74.0.3729.50',
315 '75.0.3750.0',
316 '74.0.3729.49',
317 '74.0.3729.48',
318 '74.0.3729.47',
319 '75.0.3749.3',
320 '74.0.3729.46',
321 '73.0.3683.97',
322 '75.0.3749.2',
323 '74.0.3729.45',
324 '75.0.3749.1',
325 '75.0.3749.0',
326 '74.0.3729.44',
327 '73.0.3683.96',
328 '74.0.3729.43',
329 '74.0.3729.42',
330 '75.0.3748.1',
331 '75.0.3748.0',
332 '74.0.3729.41',
333 '75.0.3747.1',
334 '73.0.3683.95',
335 '75.0.3746.4',
336 '74.0.3729.40',
337 '74.0.3729.39',
338 '75.0.3747.0',
339 '75.0.3746.3',
340 '75.0.3746.2',
341 '74.0.3729.38',
342 '75.0.3746.1',
343 '75.0.3746.0',
344 '74.0.3729.37',
345 '73.0.3683.94',
346 '75.0.3745.5',
347 '75.0.3745.4',
348 '75.0.3745.3',
349 '75.0.3745.2',
350 '74.0.3729.36',
351 '75.0.3745.1',
352 '75.0.3745.0',
353 '75.0.3744.2',
354 '74.0.3729.35',
355 '73.0.3683.93',
356 '74.0.3729.34',
357 '75.0.3744.1',
358 '75.0.3744.0',
359 '74.0.3729.33',
360 '73.0.3683.92',
361 '74.0.3729.32',
362 '74.0.3729.31',
363 '73.0.3683.91',
364 '75.0.3741.2',
365 '75.0.3740.5',
366 '74.0.3729.30',
367 '75.0.3741.1',
368 '75.0.3741.0',
369 '74.0.3729.29',
370 '75.0.3740.4',
371 '73.0.3683.90',
372 '74.0.3729.28',
373 '75.0.3740.3',
374 '73.0.3683.89',
375 '75.0.3740.2',
376 '74.0.3729.27',
377 '75.0.3740.1',
378 '75.0.3740.0',
379 '74.0.3729.26',
380 '73.0.3683.88',
381 '73.0.3683.87',
382 '74.0.3729.25',
383 '75.0.3739.1',
384 '75.0.3739.0',
385 '73.0.3683.86',
386 '74.0.3729.24',
387 '73.0.3683.85',
388 '75.0.3738.4',
389 '75.0.3738.3',
390 '75.0.3738.2',
391 '75.0.3738.1',
392 '75.0.3738.0',
393 '74.0.3729.23',
394 '73.0.3683.84',
395 '74.0.3729.22',
396 '74.0.3729.21',
397 '75.0.3737.1',
398 '75.0.3737.0',
399 '74.0.3729.20',
400 '73.0.3683.83',
401 '74.0.3729.19',
402 '75.0.3736.1',
403 '75.0.3736.0',
404 '74.0.3729.18',
405 '73.0.3683.82',
406 '74.0.3729.17',
407 '75.0.3735.1',
408 '75.0.3735.0',
409 '74.0.3729.16',
410 '73.0.3683.81',
411 '75.0.3734.1',
412 '75.0.3734.0',
413 '74.0.3729.15',
414 '73.0.3683.80',
415 '74.0.3729.14',
416 '75.0.3733.1',
417 '75.0.3733.0',
418 '75.0.3732.1',
419 '74.0.3729.13',
420 '74.0.3729.12',
421 '73.0.3683.79',
422 '74.0.3729.11',
423 '75.0.3732.0',
424 '74.0.3729.10',
425 '73.0.3683.78',
426 '74.0.3729.9',
427 '74.0.3729.8',
428 '74.0.3729.7',
429 '75.0.3731.3',
430 '75.0.3731.2',
431 '75.0.3731.0',
432 '74.0.3729.6',
433 '73.0.3683.77',
434 '73.0.3683.76',
435 '75.0.3730.5',
436 '75.0.3730.4',
437 '73.0.3683.75',
438 '74.0.3729.5',
439 '73.0.3683.74',
440 '75.0.3730.3',
441 '75.0.3730.2',
442 '74.0.3729.4',
443 '73.0.3683.73',
444 '73.0.3683.72',
445 '75.0.3730.1',
446 '75.0.3730.0',
447 '74.0.3729.3',
448 '73.0.3683.71',
449 '74.0.3729.2',
450 '73.0.3683.70',
451 '74.0.3729.1',
452 '74.0.3729.0',
453 '74.0.3726.4',
454 '73.0.3683.69',
455 '74.0.3726.3',
456 '74.0.3728.0',
457 '74.0.3726.2',
458 '73.0.3683.68',
459 '74.0.3726.1',
460 '74.0.3726.0',
461 '74.0.3725.4',
462 '73.0.3683.67',
463 '73.0.3683.66',
464 '74.0.3725.3',
465 '74.0.3725.2',
466 '74.0.3725.1',
467 '74.0.3724.8',
468 '74.0.3725.0',
469 '73.0.3683.65',
470 '74.0.3724.7',
471 '74.0.3724.6',
472 '74.0.3724.5',
473 '74.0.3724.4',
474 '74.0.3724.3',
475 '74.0.3724.2',
476 '74.0.3724.1',
477 '74.0.3724.0',
478 '73.0.3683.64',
479 '74.0.3723.1',
480 '74.0.3723.0',
481 '73.0.3683.63',
482 '74.0.3722.1',
483 '74.0.3722.0',
484 '73.0.3683.62',
485 '74.0.3718.9',
486 '74.0.3702.3',
487 '74.0.3721.3',
488 '74.0.3721.2',
489 '74.0.3721.1',
490 '74.0.3721.0',
491 '74.0.3720.6',
492 '73.0.3683.61',
493 '72.0.3626.122',
494 '73.0.3683.60',
495 '74.0.3720.5',
496 '72.0.3626.121',
497 '74.0.3718.8',
498 '74.0.3720.4',
499 '74.0.3720.3',
500 '74.0.3718.7',
501 '74.0.3720.2',
502 '74.0.3720.1',
503 '74.0.3720.0',
504 '74.0.3718.6',
505 '74.0.3719.5',
506 '73.0.3683.59',
507 '74.0.3718.5',
508 '74.0.3718.4',
509 '74.0.3719.4',
510 '74.0.3719.3',
511 '74.0.3719.2',
512 '74.0.3719.1',
513 '73.0.3683.58',
514 '74.0.3719.0',
515 '73.0.3683.57',
516 '73.0.3683.56',
517 '74.0.3718.3',
518 '73.0.3683.55',
519 '74.0.3718.2',
520 '74.0.3718.1',
521 '74.0.3718.0',
522 '73.0.3683.54',
523 '74.0.3717.2',
524 '73.0.3683.53',
525 '74.0.3717.1',
526 '74.0.3717.0',
527 '73.0.3683.52',
528 '74.0.3716.1',
529 '74.0.3716.0',
530 '73.0.3683.51',
531 '74.0.3715.1',
532 '74.0.3715.0',
533 '73.0.3683.50',
534 '74.0.3711.2',
535 '74.0.3714.2',
536 '74.0.3713.3',
537 '74.0.3714.1',
538 '74.0.3714.0',
539 '73.0.3683.49',
540 '74.0.3713.1',
541 '74.0.3713.0',
542 '72.0.3626.120',
543 '73.0.3683.48',
544 '74.0.3712.2',
545 '74.0.3712.1',
546 '74.0.3712.0',
547 '73.0.3683.47',
548 '72.0.3626.119',
549 '73.0.3683.46',
550 '74.0.3710.2',
551 '72.0.3626.118',
552 '74.0.3711.1',
553 '74.0.3711.0',
554 '73.0.3683.45',
555 '72.0.3626.117',
556 '74.0.3710.1',
557 '74.0.3710.0',
558 '73.0.3683.44',
559 '72.0.3626.116',
560 '74.0.3709.1',
561 '74.0.3709.0',
562 '74.0.3704.9',
563 '73.0.3683.43',
564 '72.0.3626.115',
565 '74.0.3704.8',
566 '74.0.3704.7',
567 '74.0.3708.0',
568 '74.0.3706.7',
569 '74.0.3704.6',
570 '73.0.3683.42',
571 '72.0.3626.114',
572 '74.0.3706.6',
573 '72.0.3626.113',
574 '74.0.3704.5',
575 '74.0.3706.5',
576 '74.0.3706.4',
577 '74.0.3706.3',
578 '74.0.3706.2',
579 '74.0.3706.1',
580 '74.0.3706.0',
581 '73.0.3683.41',
582 '72.0.3626.112',
583 '74.0.3705.1',
584 '74.0.3705.0',
585 '73.0.3683.40',
586 '72.0.3626.111',
587 '73.0.3683.39',
588 '74.0.3704.4',
589 '73.0.3683.38',
590 '74.0.3704.3',
591 '74.0.3704.2',
592 '74.0.3704.1',
593 '74.0.3704.0',
594 '73.0.3683.37',
595 '72.0.3626.110',
596 '72.0.3626.109',
597 '74.0.3703.3',
598 '74.0.3703.2',
599 '73.0.3683.36',
600 '74.0.3703.1',
601 '74.0.3703.0',
602 '73.0.3683.35',
603 '72.0.3626.108',
604 '74.0.3702.2',
605 '74.0.3699.3',
606 '74.0.3702.1',
607 '74.0.3702.0',
608 '73.0.3683.34',
609 '72.0.3626.107',
610 '73.0.3683.33',
611 '74.0.3701.1',
612 '74.0.3701.0',
613 '73.0.3683.32',
614 '73.0.3683.31',
615 '72.0.3626.105',
616 '74.0.3700.1',
617 '74.0.3700.0',
618 '73.0.3683.29',
619 '72.0.3626.103',
620 '74.0.3699.2',
621 '74.0.3699.1',
622 '74.0.3699.0',
623 '73.0.3683.28',
624 '72.0.3626.102',
625 '73.0.3683.27',
626 '73.0.3683.26',
627 '74.0.3698.0',
628 '74.0.3696.2',
629 '72.0.3626.101',
630 '73.0.3683.25',
631 '74.0.3696.1',
632 '74.0.3696.0',
633 '74.0.3694.8',
634 '72.0.3626.100',
635 '74.0.3694.7',
636 '74.0.3694.6',
637 '74.0.3694.5',
638 '74.0.3694.4',
639 '72.0.3626.99',
640 '72.0.3626.98',
641 '74.0.3694.3',
642 '73.0.3683.24',
643 '72.0.3626.97',
644 '72.0.3626.96',
645 '72.0.3626.95',
646 '73.0.3683.23',
647 '72.0.3626.94',
648 '73.0.3683.22',
649 '73.0.3683.21',
650 '72.0.3626.93',
651 '74.0.3694.2',
652 '72.0.3626.92',
653 '74.0.3694.1',
654 '74.0.3694.0',
655 '74.0.3693.6',
656 '73.0.3683.20',
657 '72.0.3626.91',
658 '74.0.3693.5',
659 '74.0.3693.4',
660 '74.0.3693.3',
661 '74.0.3693.2',
662 '73.0.3683.19',
663 '74.0.3693.1',
664 '74.0.3693.0',
665 '73.0.3683.18',
666 '72.0.3626.90',
667 '74.0.3692.1',
668 '74.0.3692.0',
669 '73.0.3683.17',
670 '72.0.3626.89',
671 '74.0.3687.3',
672 '74.0.3691.1',
673 '74.0.3691.0',
674 '73.0.3683.16',
675 '72.0.3626.88',
676 '72.0.3626.87',
677 '73.0.3683.15',
678 '74.0.3690.1',
679 '74.0.3690.0',
680 '73.0.3683.14',
681 '72.0.3626.86',
682 '73.0.3683.13',
683 '73.0.3683.12',
684 '74.0.3689.1',
685 '74.0.3689.0',
686 '73.0.3683.11',
687 '72.0.3626.85',
688 '73.0.3683.10',
689 '72.0.3626.84',
690 '73.0.3683.9',
691 '74.0.3688.1',
692 '74.0.3688.0',
693 '73.0.3683.8',
694 '72.0.3626.83',
695 '74.0.3687.2',
696 '74.0.3687.1',
697 '74.0.3687.0',
698 '73.0.3683.7',
699 '72.0.3626.82',
700 '74.0.3686.4',
701 '72.0.3626.81',
702 '74.0.3686.3',
703 '74.0.3686.2',
704 '74.0.3686.1',
705 '74.0.3686.0',
706 '73.0.3683.6',
707 '72.0.3626.80',
708 '74.0.3685.1',
709 '74.0.3685.0',
710 '73.0.3683.5',
711 '72.0.3626.79',
712 '74.0.3684.1',
713 '74.0.3684.0',
714 '73.0.3683.4',
715 '72.0.3626.78',
716 '72.0.3626.77',
717 '73.0.3683.3',
718 '73.0.3683.2',
719 '72.0.3626.76',
720 '73.0.3683.1',
721 '73.0.3683.0',
722 '72.0.3626.75',
723 '71.0.3578.141',
724 '73.0.3682.1',
725 '73.0.3682.0',
726 '72.0.3626.74',
727 '71.0.3578.140',
728 '73.0.3681.4',
729 '73.0.3681.3',
730 '73.0.3681.2',
731 '73.0.3681.1',
732 '73.0.3681.0',
733 '72.0.3626.73',
734 '71.0.3578.139',
735 '72.0.3626.72',
736 '72.0.3626.71',
737 '73.0.3680.1',
738 '73.0.3680.0',
739 '72.0.3626.70',
740 '71.0.3578.138',
741 '73.0.3678.2',
742 '73.0.3679.1',
743 '73.0.3679.0',
744 '72.0.3626.69',
745 '71.0.3578.137',
746 '73.0.3678.1',
747 '73.0.3678.0',
748 '71.0.3578.136',
749 '73.0.3677.1',
750 '73.0.3677.0',
751 '72.0.3626.68',
752 '72.0.3626.67',
753 '71.0.3578.135',
754 '73.0.3676.1',
755 '73.0.3676.0',
756 '73.0.3674.2',
757 '72.0.3626.66',
758 '71.0.3578.134',
759 '73.0.3674.1',
760 '73.0.3674.0',
761 '72.0.3626.65',
762 '71.0.3578.133',
763 '73.0.3673.2',
764 '73.0.3673.1',
765 '73.0.3673.0',
766 '72.0.3626.64',
767 '71.0.3578.132',
768 '72.0.3626.63',
769 '72.0.3626.62',
770 '72.0.3626.61',
771 '72.0.3626.60',
772 '73.0.3672.1',
773 '73.0.3672.0',
774 '72.0.3626.59',
775 '71.0.3578.131',
776 '73.0.3671.3',
777 '73.0.3671.2',
778 '73.0.3671.1',
779 '73.0.3671.0',
780 '72.0.3626.58',
781 '71.0.3578.130',
782 '73.0.3670.1',
783 '73.0.3670.0',
784 '72.0.3626.57',
785 '71.0.3578.129',
786 '73.0.3669.1',
787 '73.0.3669.0',
788 '72.0.3626.56',
789 '71.0.3578.128',
790 '73.0.3668.2',
791 '73.0.3668.1',
792 '73.0.3668.0',
793 '72.0.3626.55',
794 '71.0.3578.127',
795 '73.0.3667.2',
796 '73.0.3667.1',
797 '73.0.3667.0',
798 '72.0.3626.54',
799 '71.0.3578.126',
800 '73.0.3666.1',
801 '73.0.3666.0',
802 '72.0.3626.53',
803 '71.0.3578.125',
804 '73.0.3665.4',
805 '73.0.3665.3',
806 '72.0.3626.52',
807 '73.0.3665.2',
808 '73.0.3664.4',
809 '73.0.3665.1',
810 '73.0.3665.0',
811 '72.0.3626.51',
812 '71.0.3578.124',
813 '72.0.3626.50',
814 '73.0.3664.3',
815 '73.0.3664.2',
816 '73.0.3664.1',
817 '73.0.3664.0',
818 '73.0.3663.2',
819 '72.0.3626.49',
820 '71.0.3578.123',
821 '73.0.3663.1',
822 '73.0.3663.0',
823 '72.0.3626.48',
824 '71.0.3578.122',
825 '73.0.3662.1',
826 '73.0.3662.0',
827 '72.0.3626.47',
828 '71.0.3578.121',
829 '73.0.3661.1',
830 '72.0.3626.46',
831 '73.0.3661.0',
832 '72.0.3626.45',
833 '71.0.3578.120',
834 '73.0.3660.2',
835 '73.0.3660.1',
836 '73.0.3660.0',
837 '72.0.3626.44',
838 '71.0.3578.119',
839 '73.0.3659.1',
840 '73.0.3659.0',
841 '72.0.3626.43',
842 '71.0.3578.118',
843 '73.0.3658.1',
844 '73.0.3658.0',
845 '72.0.3626.42',
846 '71.0.3578.117',
847 '73.0.3657.1',
848 '73.0.3657.0',
849 '72.0.3626.41',
850 '71.0.3578.116',
851 '73.0.3656.1',
852 '73.0.3656.0',
853 '72.0.3626.40',
854 '71.0.3578.115',
855 '73.0.3655.1',
856 '73.0.3655.0',
857 '72.0.3626.39',
858 '71.0.3578.114',
859 '73.0.3654.1',
860 '73.0.3654.0',
861 '72.0.3626.38',
862 '71.0.3578.113',
863 '73.0.3653.1',
864 '73.0.3653.0',
865 '72.0.3626.37',
866 '71.0.3578.112',
867 '73.0.3652.1',
868 '73.0.3652.0',
869 '72.0.3626.36',
870 '71.0.3578.111',
871 '73.0.3651.1',
872 '73.0.3651.0',
873 '72.0.3626.35',
874 '71.0.3578.110',
875 '73.0.3650.1',
876 '73.0.3650.0',
877 '72.0.3626.34',
878 '71.0.3578.109',
879 '73.0.3649.1',
880 '73.0.3649.0',
881 '72.0.3626.33',
882 '71.0.3578.108',
883 '73.0.3648.2',
884 '73.0.3648.1',
885 '73.0.3648.0',
886 '72.0.3626.32',
887 '71.0.3578.107',
888 '73.0.3647.2',
889 '73.0.3647.1',
890 '73.0.3647.0',
891 '72.0.3626.31',
892 '71.0.3578.106',
893 '73.0.3635.3',
894 '73.0.3646.2',
895 '73.0.3646.1',
896 '73.0.3646.0',
897 '72.0.3626.30',
898 '71.0.3578.105',
899 '72.0.3626.29',
900 '73.0.3645.2',
901 '73.0.3645.1',
902 '73.0.3645.0',
903 '72.0.3626.28',
904 '71.0.3578.104',
905 '72.0.3626.27',
906 '72.0.3626.26',
907 '72.0.3626.25',
908 '72.0.3626.24',
909 '73.0.3644.0',
910 '73.0.3643.2',
911 '72.0.3626.23',
912 '71.0.3578.103',
913 '73.0.3643.1',
914 '73.0.3643.0',
915 '72.0.3626.22',
916 '71.0.3578.102',
917 '73.0.3642.1',
918 '73.0.3642.0',
919 '72.0.3626.21',
920 '71.0.3578.101',
921 '73.0.3641.1',
922 '73.0.3641.0',
923 '72.0.3626.20',
924 '71.0.3578.100',
925 '72.0.3626.19',
926 '73.0.3640.1',
927 '73.0.3640.0',
928 '72.0.3626.18',
929 '73.0.3639.1',
930 '71.0.3578.99',
931 '73.0.3639.0',
932 '72.0.3626.17',
933 '73.0.3638.2',
934 '72.0.3626.16',
935 '73.0.3638.1',
936 '73.0.3638.0',
937 '72.0.3626.15',
938 '71.0.3578.98',
939 '73.0.3635.2',
940 '71.0.3578.97',
941 '73.0.3637.1',
942 '73.0.3637.0',
943 '72.0.3626.14',
944 '71.0.3578.96',
945 '71.0.3578.95',
946 '72.0.3626.13',
947 '71.0.3578.94',
948 '73.0.3636.2',
949 '71.0.3578.93',
950 '73.0.3636.1',
951 '73.0.3636.0',
952 '72.0.3626.12',
953 '71.0.3578.92',
954 '73.0.3635.1',
955 '73.0.3635.0',
956 '72.0.3626.11',
957 '71.0.3578.91',
958 '73.0.3634.2',
959 '73.0.3634.1',
960 '73.0.3634.0',
961 '72.0.3626.10',
962 '71.0.3578.90',
963 '71.0.3578.89',
964 '73.0.3633.2',
965 '73.0.3633.1',
966 '73.0.3633.0',
967 '72.0.3610.4',
968 '72.0.3626.9',
969 '71.0.3578.88',
970 '73.0.3632.5',
971 '73.0.3632.4',
972 '73.0.3632.3',
973 '73.0.3632.2',
974 '73.0.3632.1',
975 '73.0.3632.0',
976 '72.0.3626.8',
977 '71.0.3578.87',
978 '73.0.3631.2',
979 '73.0.3631.1',
980 '73.0.3631.0',
981 '72.0.3626.7',
982 '71.0.3578.86',
983 '72.0.3626.6',
984 '73.0.3630.1',
985 '73.0.3630.0',
986 '72.0.3626.5',
987 '71.0.3578.85',
988 '72.0.3626.4',
989 '73.0.3628.3',
990 '73.0.3628.2',
991 '73.0.3629.1',
992 '73.0.3629.0',
993 '72.0.3626.3',
994 '71.0.3578.84',
995 '73.0.3628.1',
996 '73.0.3628.0',
997 '71.0.3578.83',
998 '73.0.3627.1',
999 '73.0.3627.0',
1000 '72.0.3626.2',
1001 '71.0.3578.82',
1002 '71.0.3578.81',
1003 '71.0.3578.80',
1004 '72.0.3626.1',
1005 '72.0.3626.0',
1006 '71.0.3578.79',
1007 '70.0.3538.124',
1008 '71.0.3578.78',
1009 '72.0.3623.4',
1010 '72.0.3625.2',
1011 '72.0.3625.1',
1012 '72.0.3625.0',
1013 '71.0.3578.77',
1014 '70.0.3538.123',
1015 '72.0.3624.4',
1016 '72.0.3624.3',
1017 '72.0.3624.2',
1018 '71.0.3578.76',
1019 '72.0.3624.1',
1020 '72.0.3624.0',
1021 '72.0.3623.3',
1022 '71.0.3578.75',
1023 '70.0.3538.122',
1024 '71.0.3578.74',
1025 '72.0.3623.2',
1026 '72.0.3610.3',
1027 '72.0.3623.1',
1028 '72.0.3623.0',
1029 '72.0.3622.3',
1030 '72.0.3622.2',
1031 '71.0.3578.73',
1032 '70.0.3538.121',
1033 '72.0.3622.1',
1034 '72.0.3622.0',
1035 '71.0.3578.72',
1036 '70.0.3538.120',
1037 '72.0.3621.1',
1038 '72.0.3621.0',
1039 '71.0.3578.71',
1040 '70.0.3538.119',
1041 '72.0.3620.1',
1042 '72.0.3620.0',
1043 '71.0.3578.70',
1044 '70.0.3538.118',
1045 '71.0.3578.69',
1046 '72.0.3619.1',
1047 '72.0.3619.0',
1048 '71.0.3578.68',
1049 '70.0.3538.117',
1050 '71.0.3578.67',
1051 '72.0.3618.1',
1052 '72.0.3618.0',
1053 '71.0.3578.66',
1054 '70.0.3538.116',
1055 '72.0.3617.1',
1056 '72.0.3617.0',
1057 '71.0.3578.65',
1058 '70.0.3538.115',
1059 '72.0.3602.3',
1060 '71.0.3578.64',
1061 '72.0.3616.1',
1062 '72.0.3616.0',
1063 '71.0.3578.63',
1064 '70.0.3538.114',
1065 '71.0.3578.62',
1066 '72.0.3615.1',
1067 '72.0.3615.0',
1068 '71.0.3578.61',
1069 '70.0.3538.113',
1070 '72.0.3614.1',
1071 '72.0.3614.0',
1072 '71.0.3578.60',
1073 '70.0.3538.112',
1074 '72.0.3613.1',
1075 '72.0.3613.0',
1076 '71.0.3578.59',
1077 '70.0.3538.111',
1078 '72.0.3612.2',
1079 '72.0.3612.1',
1080 '72.0.3612.0',
1081 '70.0.3538.110',
1082 '71.0.3578.58',
1083 '70.0.3538.109',
1084 '72.0.3611.2',
1085 '72.0.3611.1',
1086 '72.0.3611.0',
1087 '71.0.3578.57',
1088 '70.0.3538.108',
1089 '72.0.3610.2',
1090 '71.0.3578.56',
1091 '71.0.3578.55',
1092 '72.0.3610.1',
1093 '72.0.3610.0',
1094 '71.0.3578.54',
1095 '70.0.3538.107',
1096 '71.0.3578.53',
1097 '72.0.3609.3',
1098 '71.0.3578.52',
1099 '72.0.3609.2',
1100 '71.0.3578.51',
1101 '72.0.3608.5',
1102 '72.0.3609.1',
1103 '72.0.3609.0',
1104 '71.0.3578.50',
1105 '70.0.3538.106',
1106 '72.0.3608.4',
1107 '72.0.3608.3',
1108 '72.0.3608.2',
1109 '71.0.3578.49',
1110 '72.0.3608.1',
1111 '72.0.3608.0',
1112 '70.0.3538.105',
1113 '71.0.3578.48',
1114 '72.0.3607.1',
1115 '72.0.3607.0',
1116 '71.0.3578.47',
1117 '70.0.3538.104',
1118 '72.0.3606.2',
1119 '72.0.3606.1',
1120 '72.0.3606.0',
1121 '71.0.3578.46',
1122 '70.0.3538.103',
1123 '70.0.3538.102',
1124 '72.0.3605.3',
1125 '72.0.3605.2',
1126 '72.0.3605.1',
1127 '72.0.3605.0',
1128 '71.0.3578.45',
1129 '70.0.3538.101',
1130 '71.0.3578.44',
1131 '71.0.3578.43',
1132 '70.0.3538.100',
1133 '70.0.3538.99',
1134 '71.0.3578.42',
1135 '72.0.3604.1',
1136 '72.0.3604.0',
1137 '71.0.3578.41',
1138 '70.0.3538.98',
1139 '71.0.3578.40',
1140 '72.0.3603.2',
1141 '72.0.3603.1',
1142 '72.0.3603.0',
1143 '71.0.3578.39',
1144 '70.0.3538.97',
1145 '72.0.3602.2',
1146 '71.0.3578.38',
1147 '71.0.3578.37',
1148 '72.0.3602.1',
1149 '72.0.3602.0',
1150 '71.0.3578.36',
1151 '70.0.3538.96',
1152 '72.0.3601.1',
1153 '72.0.3601.0',
1154 '71.0.3578.35',
1155 '70.0.3538.95',
1156 '72.0.3600.1',
1157 '72.0.3600.0',
1158 '71.0.3578.34',
1159 '70.0.3538.94',
1160 '72.0.3599.3',
1161 '72.0.3599.2',
1162 '72.0.3599.1',
1163 '72.0.3599.0',
1164 '71.0.3578.33',
1165 '70.0.3538.93',
1166 '72.0.3598.1',
1167 '72.0.3598.0',
1168 '71.0.3578.32',
1169 '70.0.3538.87',
1170 '72.0.3597.1',
1171 '72.0.3597.0',
1172 '72.0.3596.2',
1173 '71.0.3578.31',
1174 '70.0.3538.86',
1175 '71.0.3578.30',
1176 '71.0.3578.29',
1177 '72.0.3596.1',
1178 '72.0.3596.0',
1179 '71.0.3578.28',
1180 '70.0.3538.85',
1181 '72.0.3595.2',
1182 '72.0.3591.3',
1183 '72.0.3595.1',
1184 '72.0.3595.0',
1185 '71.0.3578.27',
1186 '70.0.3538.84',
1187 '72.0.3594.1',
1188 '72.0.3594.0',
1189 '71.0.3578.26',
1190 '70.0.3538.83',
1191 '72.0.3593.2',
1192 '72.0.3593.1',
1193 '72.0.3593.0',
1194 '71.0.3578.25',
1195 '70.0.3538.82',
1196 '72.0.3589.3',
1197 '72.0.3592.2',
1198 '72.0.3592.1',
1199 '72.0.3592.0',
1200 '71.0.3578.24',
1201 '72.0.3589.2',
1202 '70.0.3538.81',
1203 '70.0.3538.80',
1204 '72.0.3591.2',
1205 '72.0.3591.1',
1206 '72.0.3591.0',
1207 '71.0.3578.23',
1208 '70.0.3538.79',
1209 '71.0.3578.22',
1210 '72.0.3590.1',
1211 '72.0.3590.0',
1212 '71.0.3578.21',
1213 '70.0.3538.78',
1214 '70.0.3538.77',
1215 '72.0.3589.1',
1216 '72.0.3589.0',
1217 '71.0.3578.20',
1218 '70.0.3538.76',
1219 '71.0.3578.19',
1220 '70.0.3538.75',
1221 '72.0.3588.1',
1222 '72.0.3588.0',
1223 '71.0.3578.18',
1224 '70.0.3538.74',
1225 '72.0.3586.2',
1226 '72.0.3587.0',
1227 '71.0.3578.17',
1228 '70.0.3538.73',
1229 '72.0.3586.1',
1230 '72.0.3586.0',
1231 '71.0.3578.16',
1232 '70.0.3538.72',
1233 '72.0.3585.1',
1234 '72.0.3585.0',
1235 '71.0.3578.15',
1236 '70.0.3538.71',
1237 '71.0.3578.14',
1238 '72.0.3584.1',
1239 '72.0.3584.0',
1240 '71.0.3578.13',
1241 '70.0.3538.70',
1242 '72.0.3583.2',
1243 '71.0.3578.12',
1244 '72.0.3583.1',
1245 '72.0.3583.0',
1246 '71.0.3578.11',
1247 '70.0.3538.69',
1248 '71.0.3578.10',
1249 '72.0.3582.0',
1250 '72.0.3581.4',
1251 '71.0.3578.9',
1252 '70.0.3538.67',
1253 '72.0.3581.3',
1254 '72.0.3581.2',
1255 '72.0.3581.1',
1256 '72.0.3581.0',
1257 '71.0.3578.8',
1258 '70.0.3538.66',
1259 '72.0.3580.1',
1260 '72.0.3580.0',
1261 '71.0.3578.7',
1262 '70.0.3538.65',
1263 '71.0.3578.6',
1264 '72.0.3579.1',
1265 '72.0.3579.0',
1266 '71.0.3578.5',
1267 '70.0.3538.64',
1268 '71.0.3578.4',
1269 '71.0.3578.3',
1270 '71.0.3578.2',
1271 '71.0.3578.1',
1272 '71.0.3578.0',
1273 '70.0.3538.63',
1274 '69.0.3497.128',
1275 '70.0.3538.62',
1276 '70.0.3538.61',
1277 '70.0.3538.60',
1278 '70.0.3538.59',
1279 '71.0.3577.1',
1280 '71.0.3577.0',
1281 '70.0.3538.58',
1282 '69.0.3497.127',
1283 '71.0.3576.2',
1284 '71.0.3576.1',
1285 '71.0.3576.0',
1286 '70.0.3538.57',
1287 '70.0.3538.56',
1288 '71.0.3575.2',
1289 '70.0.3538.55',
1290 '69.0.3497.126',
1291 '70.0.3538.54',
1292 '71.0.3575.1',
1293 '71.0.3575.0',
1294 '71.0.3574.1',
1295 '71.0.3574.0',
1296 '70.0.3538.53',
1297 '69.0.3497.125',
1298 '70.0.3538.52',
1299 '71.0.3573.1',
1300 '71.0.3573.0',
1301 '70.0.3538.51',
1302 '69.0.3497.124',
1303 '71.0.3572.1',
1304 '71.0.3572.0',
1305 '70.0.3538.50',
1306 '69.0.3497.123',
1307 '71.0.3571.2',
1308 '70.0.3538.49',
1309 '69.0.3497.122',
1310 '71.0.3571.1',
1311 '71.0.3571.0',
1312 '70.0.3538.48',
1313 '69.0.3497.121',
1314 '71.0.3570.1',
1315 '71.0.3570.0',
1316 '70.0.3538.47',
1317 '69.0.3497.120',
1318 '71.0.3568.2',
1319 '71.0.3569.1',
1320 '71.0.3569.0',
1321 '70.0.3538.46',
1322 '69.0.3497.119',
1323 '70.0.3538.45',
1324 '71.0.3568.1',
1325 '71.0.3568.0',
1326 '70.0.3538.44',
1327 '69.0.3497.118',
1328 '70.0.3538.43',
1329 '70.0.3538.42',
1330 '71.0.3567.1',
1331 '71.0.3567.0',
1332 '70.0.3538.41',
1333 '69.0.3497.117',
1334 '71.0.3566.1',
1335 '71.0.3566.0',
1336 '70.0.3538.40',
1337 '69.0.3497.116',
1338 '71.0.3565.1',
1339 '71.0.3565.0',
1340 '70.0.3538.39',
1341 '69.0.3497.115',
1342 '71.0.3564.1',
1343 '71.0.3564.0',
1344 '70.0.3538.38',
1345 '69.0.3497.114',
1346 '71.0.3563.0',
1347 '71.0.3562.2',
1348 '70.0.3538.37',
1349 '69.0.3497.113',
1350 '70.0.3538.36',
1351 '70.0.3538.35',
1352 '71.0.3562.1',
1353 '71.0.3562.0',
1354 '70.0.3538.34',
1355 '69.0.3497.112',
1356 '70.0.3538.33',
1357 '71.0.3561.1',
1358 '71.0.3561.0',
1359 '70.0.3538.32',
1360 '69.0.3497.111',
1361 '71.0.3559.6',
1362 '71.0.3560.1',
1363 '71.0.3560.0',
1364 '71.0.3559.5',
1365 '71.0.3559.4',
1366 '70.0.3538.31',
1367 '69.0.3497.110',
1368 '71.0.3559.3',
1369 '70.0.3538.30',
1370 '69.0.3497.109',
1371 '71.0.3559.2',
1372 '71.0.3559.1',
1373 '71.0.3559.0',
1374 '70.0.3538.29',
1375 '69.0.3497.108',
1376 '71.0.3558.2',
1377 '71.0.3558.1',
1378 '71.0.3558.0',
1379 '70.0.3538.28',
1380 '69.0.3497.107',
1381 '71.0.3557.2',
1382 '71.0.3557.1',
1383 '71.0.3557.0',
1384 '70.0.3538.27',
1385 '69.0.3497.106',
1386 '71.0.3554.4',
1387 '70.0.3538.26',
1388 '71.0.3556.1',
1389 '71.0.3556.0',
1390 '70.0.3538.25',
1391 '71.0.3554.3',
1392 '69.0.3497.105',
1393 '71.0.3554.2',
1394 '70.0.3538.24',
1395 '69.0.3497.104',
1396 '71.0.3555.2',
1397 '70.0.3538.23',
1398 '71.0.3555.1',
1399 '71.0.3555.0',
1400 '70.0.3538.22',
1401 '69.0.3497.103',
1402 '71.0.3554.1',
1403 '71.0.3554.0',
1404 '70.0.3538.21',
1405 '69.0.3497.102',
1406 '71.0.3553.3',
1407 '70.0.3538.20',
1408 '69.0.3497.101',
1409 '71.0.3553.2',
1410 '69.0.3497.100',
1411 '71.0.3553.1',
1412 '71.0.3553.0',
1413 '70.0.3538.19',
1414 '69.0.3497.99',
1415 '69.0.3497.98',
1416 '69.0.3497.97',
1417 '71.0.3552.6',
1418 '71.0.3552.5',
1419 '71.0.3552.4',
1420 '71.0.3552.3',
1421 '71.0.3552.2',
1422 '71.0.3552.1',
1423 '71.0.3552.0',
1424 '70.0.3538.18',
1425 '69.0.3497.96',
1426 '71.0.3551.3',
1427 '71.0.3551.2',
1428 '71.0.3551.1',
1429 '71.0.3551.0',
1430 '70.0.3538.17',
1431 '69.0.3497.95',
1432 '71.0.3550.3',
1433 '71.0.3550.2',
1434 '71.0.3550.1',
1435 '71.0.3550.0',
1436 '70.0.3538.16',
1437 '69.0.3497.94',
1438 '71.0.3549.1',
1439 '71.0.3549.0',
1440 '70.0.3538.15',
1441 '69.0.3497.93',
1442 '69.0.3497.92',
1443 '71.0.3548.1',
1444 '71.0.3548.0',
1445 '70.0.3538.14',
1446 '69.0.3497.91',
1447 '71.0.3547.1',
1448 '71.0.3547.0',
1449 '70.0.3538.13',
1450 '69.0.3497.90',
1451 '71.0.3546.2',
1452 '69.0.3497.89',
1453 '71.0.3546.1',
1454 '71.0.3546.0',
1455 '70.0.3538.12',
1456 '69.0.3497.88',
1457 '71.0.3545.4',
1458 '71.0.3545.3',
1459 '71.0.3545.2',
1460 '71.0.3545.1',
1461 '71.0.3545.0',
1462 '70.0.3538.11',
1463 '69.0.3497.87',
1464 '71.0.3544.5',
1465 '71.0.3544.4',
1466 '71.0.3544.3',
1467 '71.0.3544.2',
1468 '71.0.3544.1',
1469 '71.0.3544.0',
1470 '69.0.3497.86',
1471 '70.0.3538.10',
1472 '69.0.3497.85',
1473 '70.0.3538.9',
1474 '69.0.3497.84',
1475 '71.0.3543.4',
1476 '70.0.3538.8',
1477 '71.0.3543.3',
1478 '71.0.3543.2',
1479 '71.0.3543.1',
1480 '71.0.3543.0',
1481 '70.0.3538.7',
1482 '69.0.3497.83',
1483 '71.0.3542.2',
1484 '71.0.3542.1',
1485 '71.0.3542.0',
1486 '70.0.3538.6',
1487 '69.0.3497.82',
1488 '69.0.3497.81',
1489 '71.0.3541.1',
1490 '71.0.3541.0',
1491 '70.0.3538.5',
1492 '69.0.3497.80',
1493 '71.0.3540.1',
1494 '71.0.3540.0',
1495 '70.0.3538.4',
1496 '69.0.3497.79',
1497 '70.0.3538.3',
1498 '71.0.3539.1',
1499 '71.0.3539.0',
1500 '69.0.3497.78',
1501 '68.0.3440.134',
1502 '69.0.3497.77',
1503 '70.0.3538.2',
1504 '70.0.3538.1',
1505 '70.0.3538.0',
1506 '69.0.3497.76',
1507 '68.0.3440.133',
1508 '69.0.3497.75',
1509 '70.0.3537.2',
1510 '70.0.3537.1',
1511 '70.0.3537.0',
1512 '69.0.3497.74',
1513 '68.0.3440.132',
1514 '70.0.3536.0',
1515 '70.0.3535.5',
1516 '70.0.3535.4',
1517 '70.0.3535.3',
1518 '69.0.3497.73',
1519 '68.0.3440.131',
1520 '70.0.3532.8',
1521 '70.0.3532.7',
1522 '69.0.3497.72',
1523 '69.0.3497.71',
1524 '70.0.3535.2',
1525 '70.0.3535.1',
1526 '70.0.3535.0',
1527 '69.0.3497.70',
1528 '68.0.3440.130',
1529 '69.0.3497.69',
1530 '68.0.3440.129',
1531 '70.0.3534.4',
1532 '70.0.3534.3',
1533 '70.0.3534.2',
1534 '70.0.3534.1',
1535 '70.0.3534.0',
1536 '69.0.3497.68',
1537 '68.0.3440.128',
1538 '70.0.3533.2',
1539 '70.0.3533.1',
1540 '70.0.3533.0',
1541 '69.0.3497.67',
1542 '68.0.3440.127',
1543 '70.0.3532.6',
1544 '70.0.3532.5',
1545 '70.0.3532.4',
1546 '69.0.3497.66',
1547 '68.0.3440.126',
1548 '70.0.3532.3',
1549 '70.0.3532.2',
1550 '70.0.3532.1',
1551 '69.0.3497.60',
1552 '69.0.3497.65',
1553 '69.0.3497.64',
1554 '70.0.3532.0',
1555 '70.0.3531.0',
1556 '70.0.3530.4',
1557 '70.0.3530.3',
1558 '70.0.3530.2',
1559 '69.0.3497.58',
1560 '68.0.3440.125',
1561 '69.0.3497.57',
1562 '69.0.3497.56',
1563 '69.0.3497.55',
1564 '69.0.3497.54',
1565 '70.0.3530.1',
1566 '70.0.3530.0',
1567 '69.0.3497.53',
1568 '68.0.3440.124',
1569 '69.0.3497.52',
1570 '70.0.3529.3',
1571 '70.0.3529.2',
1572 '70.0.3529.1',
1573 '70.0.3529.0',
1574 '69.0.3497.51',
1575 '70.0.3528.4',
1576 '68.0.3440.123',
1577 '70.0.3528.3',
1578 '70.0.3528.2',
1579 '70.0.3528.1',
1580 '70.0.3528.0',
1581 '69.0.3497.50',
1582 '68.0.3440.122',
1583 '70.0.3527.1',
1584 '70.0.3527.0',
1585 '69.0.3497.49',
1586 '68.0.3440.121',
1587 '70.0.3526.1',
1588 '70.0.3526.0',
1589 '68.0.3440.120',
1590 '69.0.3497.48',
1591 '69.0.3497.47',
1592 '68.0.3440.119',
1593 '68.0.3440.118',
1594 '70.0.3525.5',
1595 '70.0.3525.4',
1596 '70.0.3525.3',
1597 '68.0.3440.117',
1598 '69.0.3497.46',
1599 '70.0.3525.2',
1600 '70.0.3525.1',
1601 '70.0.3525.0',
1602 '69.0.3497.45',
1603 '68.0.3440.116',
1604 '70.0.3524.4',
1605 '70.0.3524.3',
1606 '69.0.3497.44',
1607 '70.0.3524.2',
1608 '70.0.3524.1',
1609 '70.0.3524.0',
1610 '70.0.3523.2',
1611 '69.0.3497.43',
1612 '68.0.3440.115',
1613 '70.0.3505.9',
1614 '69.0.3497.42',
1615 '70.0.3505.8',
1616 '70.0.3523.1',
1617 '70.0.3523.0',
1618 '69.0.3497.41',
1619 '68.0.3440.114',
1620 '70.0.3505.7',
1621 '69.0.3497.40',
1622 '70.0.3522.1',
1623 '70.0.3522.0',
1624 '70.0.3521.2',
1625 '69.0.3497.39',
1626 '68.0.3440.113',
1627 '70.0.3505.6',
1628 '70.0.3521.1',
1629 '70.0.3521.0',
1630 '69.0.3497.38',
1631 '68.0.3440.112',
1632 '70.0.3520.1',
1633 '70.0.3520.0',
1634 '69.0.3497.37',
1635 '68.0.3440.111',
1636 '70.0.3519.3',
1637 '70.0.3519.2',
1638 '70.0.3519.1',
1639 '70.0.3519.0',
1640 '69.0.3497.36',
1641 '68.0.3440.110',
1642 '70.0.3518.1',
1643 '70.0.3518.0',
1644 '69.0.3497.35',
1645 '69.0.3497.34',
1646 '68.0.3440.109',
1647 '70.0.3517.1',
1648 '70.0.3517.0',
1649 '69.0.3497.33',
1650 '68.0.3440.108',
1651 '69.0.3497.32',
1652 '70.0.3516.3',
1653 '70.0.3516.2',
1654 '70.0.3516.1',
1655 '70.0.3516.0',
1656 '69.0.3497.31',
1657 '68.0.3440.107',
1658 '70.0.3515.4',
1659 '68.0.3440.106',
1660 '70.0.3515.3',
1661 '70.0.3515.2',
1662 '70.0.3515.1',
1663 '70.0.3515.0',
1664 '69.0.3497.30',
1665 '68.0.3440.105',
1666 '68.0.3440.104',
1667 '70.0.3514.2',
1668 '70.0.3514.1',
1669 '70.0.3514.0',
1670 '69.0.3497.29',
1671 '68.0.3440.103',
1672 '70.0.3513.1',
1673 '70.0.3513.0',
1674 '69.0.3497.28',
1675 )
1676 return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
1677
1678
1679 std_headers = {
1680 'User-Agent': random_user_agent(),
1681 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
1682 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
1683 'Accept-Encoding': 'gzip, deflate',
1684 'Accept-Language': 'en-us,en;q=0.5',
1685 }
1686
1687
1688 USER_AGENTS = {
1689 'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
1690 }
1691
1692
1693 NO_DEFAULT = object()
1694
1695 ENGLISH_MONTH_NAMES = [
1696 'January', 'February', 'March', 'April', 'May', 'June',
1697 'July', 'August', 'September', 'October', 'November', 'December']
1698
1699 MONTH_NAMES = {
1700 'en': ENGLISH_MONTH_NAMES,
1701 'fr': [
1702 'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
1703 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
1704 }
1705
1706 KNOWN_EXTENSIONS = (
1707 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
1708 'flv', 'f4v', 'f4a', 'f4b',
1709 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
1710 'mkv', 'mka', 'mk3d',
1711 'avi', 'divx',
1712 'mov',
1713 'asf', 'wmv', 'wma',
1714 '3gp', '3g2',
1715 'mp3',
1716 'flac',
1717 'ape',
1718 'wav',
1719 'f4f', 'f4m', 'm3u8', 'smil')
1720
1721 # needed for sanitizing filenames in restricted mode
1722 ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
1723 itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
1724 'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
1725
1726 DATE_FORMATS = (
1727 '%d %B %Y',
1728 '%d %b %Y',
1729 '%B %d %Y',
1730 '%B %dst %Y',
1731 '%B %dnd %Y',
1732 '%B %drd %Y',
1733 '%B %dth %Y',
1734 '%b %d %Y',
1735 '%b %dst %Y',
1736 '%b %dnd %Y',
1737 '%b %drd %Y',
1738 '%b %dth %Y',
1739 '%b %dst %Y %I:%M',
1740 '%b %dnd %Y %I:%M',
1741 '%b %drd %Y %I:%M',
1742 '%b %dth %Y %I:%M',
1743 '%Y %m %d',
1744 '%Y-%m-%d',
1745 '%Y.%m.%d.',
1746 '%Y/%m/%d',
1747 '%Y/%m/%d %H:%M',
1748 '%Y/%m/%d %H:%M:%S',
1749 '%Y%m%d%H%M',
1750 '%Y%m%d%H%M%S',
1751 '%Y-%m-%d %H:%M',
1752 '%Y-%m-%d %H:%M:%S',
1753 '%Y-%m-%d %H:%M:%S.%f',
1754 '%Y-%m-%d %H:%M:%S:%f',
1755 '%d.%m.%Y %H:%M',
1756 '%d.%m.%Y %H.%M',
1757 '%Y-%m-%dT%H:%M:%SZ',
1758 '%Y-%m-%dT%H:%M:%S.%fZ',
1759 '%Y-%m-%dT%H:%M:%S.%f0Z',
1760 '%Y-%m-%dT%H:%M:%S',
1761 '%Y-%m-%dT%H:%M:%S.%f',
1762 '%Y-%m-%dT%H:%M',
1763 '%b %d %Y at %H:%M',
1764 '%b %d %Y at %H:%M:%S',
1765 '%B %d %Y at %H:%M',
1766 '%B %d %Y at %H:%M:%S',
1767 '%H:%M %d-%b-%Y',
1768 )
1769
1770 DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
1771 DATE_FORMATS_DAY_FIRST.extend([
1772 '%d-%m-%Y',
1773 '%d.%m.%Y',
1774 '%d.%m.%y',
1775 '%d/%m/%Y',
1776 '%d/%m/%y',
1777 '%d/%m/%Y %H:%M:%S',
1778 ])
1779
1780 DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
1781 DATE_FORMATS_MONTH_FIRST.extend([
1782 '%m-%d-%Y',
1783 '%m.%d.%Y',
1784 '%m/%d/%Y',
1785 '%m/%d/%y',
1786 '%m/%d/%Y %H:%M:%S',
1787 ])
1788
1789 PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
1790 JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
1791
1792
1793 def preferredencoding():
1794 """Get preferred encoding.
1795
1796 Returns the best encoding scheme for the system, based on
1797 locale.getpreferredencoding() and some further tweaks.
1798 """
1799 try:
1800 pref = locale.getpreferredencoding()
1801 'TEST'.encode(pref)
1802 except Exception:
1803 pref = 'UTF-8'
1804
1805 return pref
1806
1807
1808 def write_json_file(obj, fn):
1809 """ Encode obj as JSON and write it to fn, atomically if possible """
1810
1811 fn = encodeFilename(fn)
1812 if sys.version_info < (3, 0) and sys.platform != 'win32':
1813 encoding = get_filesystem_encoding()
1814 # os.path.basename returns a bytes object, but NamedTemporaryFile
1815 # will fail if the filename contains non ascii characters unless we
1816 # use a unicode object
1817 path_basename = lambda f: os.path.basename(fn).decode(encoding)
1818 # the same for os.path.dirname
1819 path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
1820 else:
1821 path_basename = os.path.basename
1822 path_dirname = os.path.dirname
1823
1824 args = {
1825 'suffix': '.tmp',
1826 'prefix': path_basename(fn) + '.',
1827 'dir': path_dirname(fn),
1828 'delete': False,
1829 }
1830
1831 # In Python 2.x, json.dump expects a bytestream.
1832 # In Python 3.x, it writes to a character stream
1833 if sys.version_info < (3, 0):
1834 args['mode'] = 'wb'
1835 else:
1836 args.update({
1837 'mode': 'w',
1838 'encoding': 'utf-8',
1839 })
1840
1841 tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
1842
1843 try:
1844 with tf:
1845 json.dump(obj, tf)
1846 if sys.platform == 'win32':
1847 # Need to remove existing file on Windows, else os.rename raises
1848 # WindowsError or FileExistsError.
1849 try:
1850 os.unlink(fn)
1851 except OSError:
1852 pass
1853 try:
1854 mask = os.umask(0)
1855 os.umask(mask)
1856 os.chmod(tf.name, 0o666 & ~mask)
1857 except OSError:
1858 pass
1859 os.rename(tf.name, fn)
1860 except Exception:
1861 try:
1862 os.remove(tf.name)
1863 except OSError:
1864 pass
1865 raise
1866
1867
1868 if sys.version_info >= (2, 7):
1869 def find_xpath_attr(node, xpath, key, val=None):
1870 """ Find the xpath xpath[@key=val] """
1871 assert re.match(r'^[a-zA-Z_-]+$', key)
1872 expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
1873 return node.find(expr)
1874 else:
1875 def find_xpath_attr(node, xpath, key, val=None):
1876 for f in node.findall(compat_xpath(xpath)):
1877 if key not in f.attrib:
1878 continue
1879 if val is None or f.attrib.get(key) == val:
1880 return f
1881 return None
1882
1883 # On python2.6 the xml.etree.ElementTree.Element methods don't support
1884 # the namespace parameter
1885
1886
1887 def xpath_with_ns(path, ns_map):
1888 components = [c.split(':') for c in path.split('/')]
1889 replaced = []
1890 for c in components:
1891 if len(c) == 1:
1892 replaced.append(c[0])
1893 else:
1894 ns, tag = c
1895 replaced.append('{%s}%s' % (ns_map[ns], tag))
1896 return '/'.join(replaced)
1897
1898
1899 def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
1900 def _find_xpath(xpath):
1901 return node.find(compat_xpath(xpath))
1902
1903 if isinstance(xpath, (str, compat_str)):
1904 n = _find_xpath(xpath)
1905 else:
1906 for xp in xpath:
1907 n = _find_xpath(xp)
1908 if n is not None:
1909 break
1910
1911 if n is None:
1912 if default is not NO_DEFAULT:
1913 return default
1914 elif fatal:
1915 name = xpath if name is None else name
1916 raise ExtractorError('Could not find XML element %s' % name)
1917 else:
1918 return None
1919 return n
1920
1921
1922 def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
1923 n = xpath_element(node, xpath, name, fatal=fatal, default=default)
1924 if n is None or n == default:
1925 return n
1926 if n.text is None:
1927 if default is not NO_DEFAULT:
1928 return default
1929 elif fatal:
1930 name = xpath if name is None else name
1931 raise ExtractorError('Could not find XML element\'s text %s' % name)
1932 else:
1933 return None
1934 return n.text
1935
1936
1937 def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
1938 n = find_xpath_attr(node, xpath, key)
1939 if n is None:
1940 if default is not NO_DEFAULT:
1941 return default
1942 elif fatal:
1943 name = '%s[@%s]' % (xpath, key) if name is None else name
1944 raise ExtractorError('Could not find XML attribute %s' % name)
1945 else:
1946 return None
1947 return n.attrib[key]
1948
1949
1950 def get_element_by_id(id, html):
1951 """Return the content of the tag with the specified ID in the passed HTML document"""
1952 return get_element_by_attribute('id', id, html)
1953
1954
1955 def get_element_by_class(class_name, html):
1956 """Return the content of the first tag with the specified class in the passed HTML document"""
1957 retval = get_elements_by_class(class_name, html)
1958 return retval[0] if retval else None
1959
1960
1961 def get_element_by_attribute(attribute, value, html, escape_value=True):
1962 retval = get_elements_by_attribute(attribute, value, html, escape_value)
1963 return retval[0] if retval else None
1964
1965
1966 def get_elements_by_class(class_name, html):
1967 """Return the content of all tags with the specified class in the passed HTML document as a list"""
1968 return get_elements_by_attribute(
1969 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
1970 html, escape_value=False)
1971
1972
1973 def get_elements_by_attribute(attribute, value, html, escape_value=True):
1974 """Return the content of the tag with the specified attribute in the passed HTML document"""
1975
1976 value = re.escape(value) if escape_value else value
1977
1978 retlist = []
1979 for m in re.finditer(r'''(?xs)
1980 <([a-zA-Z0-9:._-]+)
1981 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
1982 \s+%s=['"]?%s['"]?
1983 (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
1984 \s*>
1985 (?P<content>.*?)
1986 </\1>
1987 ''' % (re.escape(attribute), value), html):
1988 res = m.group('content')
1989
1990 if res.startswith('"') or res.startswith("'"):
1991 res = res[1:-1]
1992
1993 retlist.append(unescapeHTML(res))
1994
1995 return retlist
1996
1997
1998 class HTMLAttributeParser(compat_HTMLParser):
1999 """Trivial HTML parser to gather the attributes for a single element"""
2000
2001 def __init__(self):
2002 self.attrs = {}
2003 compat_HTMLParser.__init__(self)
2004
2005 def handle_starttag(self, tag, attrs):
2006 self.attrs = dict(attrs)
2007
2008
2009 def extract_attributes(html_element):
2010 """Given a string for an HTML element such as
2011 <el
2012 a="foo" B="bar" c="&98;az" d=boz
2013 empty= noval entity="&amp;"
2014 sq='"' dq="'"
2015 >
2016 Decode and return a dictionary of attributes.
2017 {
2018 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
2019 'empty': '', 'noval': None, 'entity': '&',
2020 'sq': '"', 'dq': '\''
2021 }.
2022 NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
2023 but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
2024 """
2025 parser = HTMLAttributeParser()
2026 try:
2027 parser.feed(html_element)
2028 parser.close()
2029 # Older Python may throw HTMLParseError in case of malformed HTML
2030 except compat_HTMLParseError:
2031 pass
2032 return parser.attrs
2033
2034
2035 def clean_html(html):
2036 """Clean an HTML snippet into a readable string"""
2037
2038 if html is None: # Convenience for sanitizing descriptions etc.
2039 return html
2040
2041 # Newline vs <br />
2042 html = html.replace('\n', ' ')
2043 html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
2044 html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
2045 # Strip html tags
2046 html = re.sub('<.*?>', '', html)
2047 # Replace html entities
2048 html = unescapeHTML(html)
2049 return html.strip()
2050
2051
2052 def sanitize_open(filename, open_mode):
2053 """Try to open the given filename, and slightly tweak it if this fails.
2054
2055 Attempts to open the given filename. If this fails, it tries to change
2056 the filename slightly, step by step, until it's either able to open it
2057 or it fails and raises a final exception, like the standard open()
2058 function.
2059
2060 It returns the tuple (stream, definitive_file_name).
2061 """
2062 try:
2063 if filename == '-':
2064 if sys.platform == 'win32':
2065 import msvcrt
2066 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2067 return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
2068 stream = open(encodeFilename(filename), open_mode)
2069 return (stream, filename)
2070 except (IOError, OSError) as err:
2071 if err.errno in (errno.EACCES,):
2072 raise
2073
2074 # In case of error, try to remove win32 forbidden chars
2075 alt_filename = sanitize_path(filename)
2076 if alt_filename == filename:
2077 raise
2078 else:
2079 # An exception here should be caught in the caller
2080 stream = open(encodeFilename(alt_filename), open_mode)
2081 return (stream, alt_filename)
2082
2083
2084 def timeconvert(timestr):
2085 """Convert RFC 2822 defined time string into system timestamp"""
2086 timestamp = None
2087 timetuple = email.utils.parsedate_tz(timestr)
2088 if timetuple is not None:
2089 timestamp = email.utils.mktime_tz(timetuple)
2090 return timestamp
2091
2092
2093 def sanitize_filename(s, restricted=False, is_id=False):
2094 """Sanitizes a string so it could be used as part of a filename.
2095 If restricted is set, use a stricter subset of allowed characters.
2096 Set is_id if this is not an arbitrary string, but an ID that should be kept
2097 if possible.
2098 """
2099 def replace_insane(char):
2100 if restricted and char in ACCENT_CHARS:
2101 return ACCENT_CHARS[char]
2102 elif not restricted and char == '\n':
2103 return ' '
2104 elif char == '?' or ord(char) < 32 or ord(char) == 127:
2105 return ''
2106 elif char == '"':
2107 return '' if restricted else '\''
2108 elif char == ':':
2109 return '_-' if restricted else ' -'
2110 elif char in '\\/|*<>':
2111 return '_'
2112 if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
2113 return '_'
2114 if restricted and ord(char) > 127:
2115 return '_'
2116 return char
2117
2118 if s == '':
2119 return ''
2120 # Handle timestamps
2121 s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
2122 result = ''.join(map(replace_insane, s))
2123 if not is_id:
2124 while '__' in result:
2125 result = result.replace('__', '_')
2126 result = result.strip('_')
2127 # Common case of "Foreign band name - English song title"
2128 if restricted and result.startswith('-_'):
2129 result = result[2:]
2130 if result.startswith('-'):
2131 result = '_' + result[len('-'):]
2132 result = result.lstrip('.')
2133 if not result:
2134 result = '_'
2135 return result
2136
2137
2138 def sanitize_path(s, force=False):
2139 """Sanitizes and normalizes path on Windows"""
2140 if sys.platform == 'win32':
2141 force = False
2142 drive_or_unc, _ = os.path.splitdrive(s)
2143 if sys.version_info < (2, 7) and not drive_or_unc:
2144 drive_or_unc, _ = os.path.splitunc(s)
2145 elif force:
2146 drive_or_unc = ''
2147 else:
2148 return s
2149
2150 norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
2151 if drive_or_unc:
2152 norm_path.pop(0)
2153 sanitized_path = [
2154 path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
2155 for path_part in norm_path]
2156 if drive_or_unc:
2157 sanitized_path.insert(0, drive_or_unc + os.path.sep)
2158 elif force and s[0] == os.path.sep:
2159 sanitized_path.insert(0, os.path.sep)
2160 return os.path.join(*sanitized_path)
2161
2162
2163 def sanitize_url(url):
2164 # Prepend protocol-less URLs with `http:` scheme in order to mitigate
2165 # the number of unwanted failures due to missing protocol
2166 if url.startswith('//'):
2167 return 'http:%s' % url
2168 # Fix some common typos seen so far
2169 COMMON_TYPOS = (
2170 # https://github.com/ytdl-org/youtube-dl/issues/15649
2171 (r'^httpss://', r'https://'),
2172 # https://bx1.be/lives/direct-tv/
2173 (r'^rmtp([es]?)://', r'rtmp\1://'),
2174 )
2175 for mistake, fixup in COMMON_TYPOS:
2176 if re.match(mistake, url):
2177 return re.sub(mistake, fixup, url)
2178 return url
2179
2180
2181 def extract_basic_auth(url):
2182 parts = compat_urlparse.urlsplit(url)
2183 if parts.username is None:
2184 return url, None
2185 url = compat_urlparse.urlunsplit(parts._replace(netloc=(
2186 parts.hostname if parts.port is None
2187 else '%s:%d' % (parts.hostname, parts.port))))
2188 auth_payload = base64.b64encode(
2189 ('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
2190 return url, 'Basic ' + auth_payload.decode('utf-8')
2191
2192
2193 def sanitized_Request(url, *args, **kwargs):
2194 url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
2195 if auth_header is not None:
2196 headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
2197 headers['Authorization'] = auth_header
2198 return compat_urllib_request.Request(url, *args, **kwargs)
2199
2200
2201 def expand_path(s):
2202 """Expand shell variables and ~"""
2203 return os.path.expandvars(compat_expanduser(s))
2204
2205
2206 def orderedSet(iterable):
2207 """ Remove all duplicates from the input iterable """
2208 res = []
2209 for el in iterable:
2210 if el not in res:
2211 res.append(el)
2212 return res
2213
2214
2215 def _htmlentity_transform(entity_with_semicolon):
2216 """Transforms an HTML entity to a character."""
2217 entity = entity_with_semicolon[:-1]
2218
2219 # Known non-numeric HTML entity
2220 if entity in compat_html_entities.name2codepoint:
2221 return compat_chr(compat_html_entities.name2codepoint[entity])
2222
2223 # TODO: HTML5 allows entities without a semicolon. For example,
2224 # '&Eacuteric' should be decoded as 'Éric'.
2225 if entity_with_semicolon in compat_html_entities_html5:
2226 return compat_html_entities_html5[entity_with_semicolon]
2227
2228 mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
2229 if mobj is not None:
2230 numstr = mobj.group(1)
2231 if numstr.startswith('x'):
2232 base = 16
2233 numstr = '0%s' % numstr
2234 else:
2235 base = 10
2236 # See https://github.com/ytdl-org/youtube-dl/issues/7518
2237 try:
2238 return compat_chr(int(numstr, base))
2239 except ValueError:
2240 pass
2241
2242 # Unknown entity in name, return its literal representation
2243 return '&%s;' % entity
2244
2245
2246 def unescapeHTML(s):
2247 if s is None:
2248 return None
2249 assert type(s) == compat_str
2250
2251 return re.sub(
2252 r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
2253
2254
2255 def escapeHTML(text):
2256 return (
2257 text
2258 .replace('&', '&amp;')
2259 .replace('<', '&lt;')
2260 .replace('>', '&gt;')
2261 .replace('"', '&quot;')
2262 .replace("'", '&#39;')
2263 )
2264
2265
2266 def process_communicate_or_kill(p, *args, **kwargs):
2267 try:
2268 return p.communicate(*args, **kwargs)
2269 except BaseException: # Including KeyboardInterrupt
2270 p.kill()
2271 p.wait()
2272 raise
2273
2274
2275 def get_subprocess_encoding():
2276 if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
2277 # For subprocess calls, encode with locale encoding
2278 # Refer to http://stackoverflow.com/a/9951851/35070
2279 encoding = preferredencoding()
2280 else:
2281 encoding = sys.getfilesystemencoding()
2282 if encoding is None:
2283 encoding = 'utf-8'
2284 return encoding
2285
2286
2287 def encodeFilename(s, for_subprocess=False):
2288 """
2289 @param s The name of the file
2290 """
2291
2292 assert type(s) == compat_str
2293
2294 # Python 3 has a Unicode API
2295 if sys.version_info >= (3, 0):
2296 return s
2297
2298 # Pass '' directly to use Unicode APIs on Windows 2000 and up
2299 # (Detecting Windows NT 4 is tricky because 'major >= 4' would
2300 # match Windows 9x series as well. Besides, NT 4 is obsolete.)
2301 if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
2302 return s
2303
2304 # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
2305 if sys.platform.startswith('java'):
2306 return s
2307
2308 return s.encode(get_subprocess_encoding(), 'ignore')
2309
2310
2311 def decodeFilename(b, for_subprocess=False):
2312
2313 if sys.version_info >= (3, 0):
2314 return b
2315
2316 if not isinstance(b, bytes):
2317 return b
2318
2319 return b.decode(get_subprocess_encoding(), 'ignore')
2320
2321
2322 def encodeArgument(s):
2323 if not isinstance(s, compat_str):
2324 # Legacy code that uses byte strings
2325 # Uncomment the following line after fixing all post processors
2326 # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
2327 s = s.decode('ascii')
2328 return encodeFilename(s, True)
2329
2330
2331 def decodeArgument(b):
2332 return decodeFilename(b, True)
2333
2334
2335 def decodeOption(optval):
2336 if optval is None:
2337 return optval
2338 if isinstance(optval, bytes):
2339 optval = optval.decode(preferredencoding())
2340
2341 assert isinstance(optval, compat_str)
2342 return optval
2343
2344
2345 def formatSeconds(secs, delim=':', msec=False):
2346 if secs > 3600:
2347 ret = '%d%s%02d%s%02d' % (secs // 3600, delim, (secs % 3600) // 60, delim, secs % 60)
2348 elif secs > 60:
2349 ret = '%d%s%02d' % (secs // 60, delim, secs % 60)
2350 else:
2351 ret = '%d' % secs
2352 return '%s.%03d' % (ret, secs % 1) if msec else ret
2353
2354
2355 def _ssl_load_windows_store_certs(ssl_context, storename):
2356 # Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
2357 try:
2358 certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
2359 if encoding == 'x509_asn' and (
2360 trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
2361 except PermissionError:
2362 return
2363 for cert in certs:
2364 try:
2365 ssl_context.load_verify_locations(cadata=cert)
2366 except ssl.SSLError:
2367 pass
2368
2369
2370 def make_HTTPS_handler(params, **kwargs):
2371 opts_check_certificate = not params.get('nocheckcertificate')
2372 context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
2373 context.check_hostname = opts_check_certificate
2374 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
2375 if opts_check_certificate:
2376 # Work around the issue in load_default_certs when there are bad certificates. See:
2377 # https://github.com/yt-dlp/yt-dlp/issues/1060,
2378 # https://bugs.python.org/issue35665, https://bugs.python.org/issue4531
2379 if sys.platform == 'win32':
2380 for storename in ('CA', 'ROOT'):
2381 _ssl_load_windows_store_certs(context, storename)
2382 context.set_default_verify_paths()
2383 return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
2384
2385
2386 def bug_reports_message(before=';'):
2387 if ytdl_is_updateable():
2388 update_cmd = 'type yt-dlp -U to update'
2389 else:
2390 update_cmd = 'see https://github.com/yt-dlp/yt-dlp on how to update'
2391 msg = 'please report this issue on https://github.com/yt-dlp/yt-dlp .'
2392 msg += ' Make sure you are using the latest version; %s.' % update_cmd
2393 msg += ' Be sure to call yt-dlp with the --verbose flag and include its complete output.'
2394
2395 before = before.rstrip()
2396 if not before or before.endswith(('.', '!', '?')):
2397 msg = msg[0].title() + msg[1:]
2398
2399 return (before + ' ' if before else '') + msg
2400
2401
2402 class YoutubeDLError(Exception):
2403 """Base exception for YoutubeDL errors."""
2404 pass
2405
2406
2407 network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
2408 if hasattr(ssl, 'CertificateError'):
2409 network_exceptions.append(ssl.CertificateError)
2410 network_exceptions = tuple(network_exceptions)
2411
2412
2413 class ExtractorError(YoutubeDLError):
2414 """Error during info extraction."""
2415
2416 def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
2417 """ tb, if given, is the original traceback (so that it can be printed out).
2418 If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
2419 """
2420 if sys.exc_info()[0] in network_exceptions:
2421 expected = True
2422
2423 self.msg = str(msg)
2424 self.traceback = tb
2425 self.expected = expected
2426 self.cause = cause
2427 self.video_id = video_id
2428 self.ie = ie
2429 self.exc_info = sys.exc_info() # preserve original exception
2430
2431 super(ExtractorError, self).__init__(''.join((
2432 format_field(ie, template='[%s] '),
2433 format_field(video_id, template='%s: '),
2434 self.msg,
2435 format_field(cause, template=' (caused by %r)'),
2436 '' if expected else bug_reports_message())))
2437
2438 def format_traceback(self):
2439 if self.traceback is None:
2440 return None
2441 return ''.join(traceback.format_tb(self.traceback))
2442
2443
2444 class UnsupportedError(ExtractorError):
2445 def __init__(self, url):
2446 super(UnsupportedError, self).__init__(
2447 'Unsupported URL: %s' % url, expected=True)
2448 self.url = url
2449
2450
2451 class RegexNotFoundError(ExtractorError):
2452 """Error when a regex didn't match"""
2453 pass
2454
2455
2456 class GeoRestrictedError(ExtractorError):
2457 """Geographic restriction Error exception.
2458
2459 This exception may be thrown when a video is not available from your
2460 geographic location due to geographic restrictions imposed by a website.
2461 """
2462
2463 def __init__(self, msg, countries=None):
2464 super(GeoRestrictedError, self).__init__(msg, expected=True)
2465 self.msg = msg
2466 self.countries = countries
2467
2468
2469 class DownloadError(YoutubeDLError):
2470 """Download Error exception.
2471
2472 This exception may be thrown by FileDownloader objects if they are not
2473 configured to continue on errors. They will contain the appropriate
2474 error message.
2475 """
2476
2477 def __init__(self, msg, exc_info=None):
2478 """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
2479 super(DownloadError, self).__init__(msg)
2480 self.exc_info = exc_info
2481
2482
2483 class EntryNotInPlaylist(YoutubeDLError):
2484 """Entry not in playlist exception.
2485
2486 This exception will be thrown by YoutubeDL when a requested entry
2487 is not found in the playlist info_dict
2488 """
2489 pass
2490
2491
2492 class SameFileError(YoutubeDLError):
2493 """Same File exception.
2494
2495 This exception will be thrown by FileDownloader objects if they detect
2496 multiple files would have to be downloaded to the same file on disk.
2497 """
2498 pass
2499
2500
2501 class PostProcessingError(YoutubeDLError):
2502 """Post Processing exception.
2503
2504 This exception may be raised by PostProcessor's .run() method to
2505 indicate an error in the postprocessing task.
2506 """
2507
2508 def __init__(self, msg):
2509 super(PostProcessingError, self).__init__(msg)
2510 self.msg = msg
2511
2512
2513 class ExistingVideoReached(YoutubeDLError):
2514 """ --max-downloads limit has been reached. """
2515 pass
2516
2517
2518 class RejectedVideoReached(YoutubeDLError):
2519 """ --max-downloads limit has been reached. """
2520 pass
2521
2522
2523 class ThrottledDownload(YoutubeDLError):
2524 """ Download speed below --throttled-rate. """
2525 pass
2526
2527
2528 class MaxDownloadsReached(YoutubeDLError):
2529 """ --max-downloads limit has been reached. """
2530 pass
2531
2532
2533 class UnavailableVideoError(YoutubeDLError):
2534 """Unavailable Format exception.
2535
2536 This exception will be thrown when a video is requested
2537 in a format that is not available for that video.
2538 """
2539 pass
2540
2541
2542 class ContentTooShortError(YoutubeDLError):
2543 """Content Too Short exception.
2544
2545 This exception may be raised by FileDownloader objects when a file they
2546 download is too small for what the server announced first, indicating
2547 the connection was probably interrupted.
2548 """
2549
2550 def __init__(self, downloaded, expected):
2551 super(ContentTooShortError, self).__init__(
2552 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
2553 )
2554 # Both in bytes
2555 self.downloaded = downloaded
2556 self.expected = expected
2557
2558
2559 class XAttrMetadataError(YoutubeDLError):
2560 def __init__(self, code=None, msg='Unknown error'):
2561 super(XAttrMetadataError, self).__init__(msg)
2562 self.code = code
2563 self.msg = msg
2564
2565 # Parsing code and msg
2566 if (self.code in (errno.ENOSPC, errno.EDQUOT)
2567 or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
2568 self.reason = 'NO_SPACE'
2569 elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
2570 self.reason = 'VALUE_TOO_LONG'
2571 else:
2572 self.reason = 'NOT_SUPPORTED'
2573
2574
2575 class XAttrUnavailableError(YoutubeDLError):
2576 pass
2577
2578
2579 def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
2580 # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
2581 # expected HTTP responses to meet HTTP/1.0 or later (see also
2582 # https://github.com/ytdl-org/youtube-dl/issues/6727)
2583 if sys.version_info < (3, 0):
2584 kwargs['strict'] = True
2585 hc = http_class(*args, **compat_kwargs(kwargs))
2586 source_address = ydl_handler._params.get('source_address')
2587
2588 if source_address is not None:
2589 # This is to workaround _create_connection() from socket where it will try all
2590 # address data from getaddrinfo() including IPv6. This filters the result from
2591 # getaddrinfo() based on the source_address value.
2592 # This is based on the cpython socket.create_connection() function.
2593 # https://github.com/python/cpython/blob/master/Lib/socket.py#L691
2594 def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
2595 host, port = address
2596 err = None
2597 addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
2598 af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
2599 ip_addrs = [addr for addr in addrs if addr[0] == af]
2600 if addrs and not ip_addrs:
2601 ip_version = 'v4' if af == socket.AF_INET else 'v6'
2602 raise socket.error(
2603 "No remote IP%s addresses available for connect, can't use '%s' as source address"
2604 % (ip_version, source_address[0]))
2605 for res in ip_addrs:
2606 af, socktype, proto, canonname, sa = res
2607 sock = None
2608 try:
2609 sock = socket.socket(af, socktype, proto)
2610 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
2611 sock.settimeout(timeout)
2612 sock.bind(source_address)
2613 sock.connect(sa)
2614 err = None # Explicitly break reference cycle
2615 return sock
2616 except socket.error as _:
2617 err = _
2618 if sock is not None:
2619 sock.close()
2620 if err is not None:
2621 raise err
2622 else:
2623 raise socket.error('getaddrinfo returns an empty list')
2624 if hasattr(hc, '_create_connection'):
2625 hc._create_connection = _create_connection
2626 sa = (source_address, 0)
2627 if hasattr(hc, 'source_address'): # Python 2.7+
2628 hc.source_address = sa
2629 else: # Python 2.6
2630 def _hc_connect(self, *args, **kwargs):
2631 sock = _create_connection(
2632 (self.host, self.port), self.timeout, sa)
2633 if is_https:
2634 self.sock = ssl.wrap_socket(
2635 sock, self.key_file, self.cert_file,
2636 ssl_version=ssl.PROTOCOL_TLSv1)
2637 else:
2638 self.sock = sock
2639 hc.connect = functools.partial(_hc_connect, hc)
2640
2641 return hc
2642
2643
2644 def handle_youtubedl_headers(headers):
2645 filtered_headers = headers
2646
2647 if 'Youtubedl-no-compression' in filtered_headers:
2648 filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
2649 del filtered_headers['Youtubedl-no-compression']
2650
2651 return filtered_headers
2652
2653
2654 class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
2655 """Handler for HTTP requests and responses.
2656
2657 This class, when installed with an OpenerDirector, automatically adds
2658 the standard headers to every HTTP request and handles gzipped and
2659 deflated responses from web servers. If compression is to be avoided in
2660 a particular request, the original request in the program code only has
2661 to include the HTTP header "Youtubedl-no-compression", which will be
2662 removed before making the real request.
2663
2664 Part of this code was copied from:
2665
2666 http://techknack.net/python-urllib2-handlers/
2667
2668 Andrew Rowls, the author of that code, agreed to release it to the
2669 public domain.
2670 """
2671
2672 def __init__(self, params, *args, **kwargs):
2673 compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
2674 self._params = params
2675
2676 def http_open(self, req):
2677 conn_class = compat_http_client.HTTPConnection
2678
2679 socks_proxy = req.headers.get('Ytdl-socks-proxy')
2680 if socks_proxy:
2681 conn_class = make_socks_conn_class(conn_class, socks_proxy)
2682 del req.headers['Ytdl-socks-proxy']
2683
2684 return self.do_open(functools.partial(
2685 _create_http_connection, self, conn_class, False),
2686 req)
2687
2688 @staticmethod
2689 def deflate(data):
2690 if not data:
2691 return data
2692 try:
2693 return zlib.decompress(data, -zlib.MAX_WBITS)
2694 except zlib.error:
2695 return zlib.decompress(data)
2696
2697 def http_request(self, req):
2698 # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
2699 # always respected by websites, some tend to give out URLs with non percent-encoded
2700 # non-ASCII characters (see telemb.py, ard.py [#3412])
2701 # urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
2702 # To work around aforementioned issue we will replace request's original URL with
2703 # percent-encoded one
2704 # Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
2705 # the code of this workaround has been moved here from YoutubeDL.urlopen()
2706 url = req.get_full_url()
2707 url_escaped = escape_url(url)
2708
2709 # Substitute URL if any change after escaping
2710 if url != url_escaped:
2711 req = update_Request(req, url=url_escaped)
2712
2713 for h, v in std_headers.items():
2714 # Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
2715 # The dict keys are capitalized because of this bug by urllib
2716 if h.capitalize() not in req.headers:
2717 req.add_header(h, v)
2718
2719 req.headers = handle_youtubedl_headers(req.headers)
2720
2721 if sys.version_info < (2, 7) and '#' in req.get_full_url():
2722 # Python 2.6 is brain-dead when it comes to fragments
2723 req._Request__original = req._Request__original.partition('#')[0]
2724 req._Request__r_type = req._Request__r_type.partition('#')[0]
2725
2726 return req
2727
2728 def http_response(self, req, resp):
2729 old_resp = resp
2730 # gzip
2731 if resp.headers.get('Content-encoding', '') == 'gzip':
2732 content = resp.read()
2733 gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
2734 try:
2735 uncompressed = io.BytesIO(gz.read())
2736 except IOError as original_ioerror:
2737 # There may be junk add the end of the file
2738 # See http://stackoverflow.com/q/4928560/35070 for details
2739 for i in range(1, 1024):
2740 try:
2741 gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
2742 uncompressed = io.BytesIO(gz.read())
2743 except IOError:
2744 continue
2745 break
2746 else:
2747 raise original_ioerror
2748 resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
2749 resp.msg = old_resp.msg
2750 del resp.headers['Content-encoding']
2751 # deflate
2752 if resp.headers.get('Content-encoding', '') == 'deflate':
2753 gz = io.BytesIO(self.deflate(resp.read()))
2754 resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
2755 resp.msg = old_resp.msg
2756 del resp.headers['Content-encoding']
2757 # Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
2758 # https://github.com/ytdl-org/youtube-dl/issues/6457).
2759 if 300 <= resp.code < 400:
2760 location = resp.headers.get('Location')
2761 if location:
2762 # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
2763 if sys.version_info >= (3, 0):
2764 location = location.encode('iso-8859-1').decode('utf-8')
2765 else:
2766 location = location.decode('utf-8')
2767 location_escaped = escape_url(location)
2768 if location != location_escaped:
2769 del resp.headers['Location']
2770 if sys.version_info < (3, 0):
2771 location_escaped = location_escaped.encode('utf-8')
2772 resp.headers['Location'] = location_escaped
2773 return resp
2774
2775 https_request = http_request
2776 https_response = http_response
2777
2778
2779 def make_socks_conn_class(base_class, socks_proxy):
2780 assert issubclass(base_class, (
2781 compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
2782
2783 url_components = compat_urlparse.urlparse(socks_proxy)
2784 if url_components.scheme.lower() == 'socks5':
2785 socks_type = ProxyType.SOCKS5
2786 elif url_components.scheme.lower() in ('socks', 'socks4'):
2787 socks_type = ProxyType.SOCKS4
2788 elif url_components.scheme.lower() == 'socks4a':
2789 socks_type = ProxyType.SOCKS4A
2790
2791 def unquote_if_non_empty(s):
2792 if not s:
2793 return s
2794 return compat_urllib_parse_unquote_plus(s)
2795
2796 proxy_args = (
2797 socks_type,
2798 url_components.hostname, url_components.port or 1080,
2799 True, # Remote DNS
2800 unquote_if_non_empty(url_components.username),
2801 unquote_if_non_empty(url_components.password),
2802 )
2803
2804 class SocksConnection(base_class):
2805 def connect(self):
2806 self.sock = sockssocket()
2807 self.sock.setproxy(*proxy_args)
2808 if type(self.timeout) in (int, float):
2809 self.sock.settimeout(self.timeout)
2810 self.sock.connect((self.host, self.port))
2811
2812 if isinstance(self, compat_http_client.HTTPSConnection):
2813 if hasattr(self, '_context'): # Python > 2.6
2814 self.sock = self._context.wrap_socket(
2815 self.sock, server_hostname=self.host)
2816 else:
2817 self.sock = ssl.wrap_socket(self.sock)
2818
2819 return SocksConnection
2820
2821
2822 class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
2823 def __init__(self, params, https_conn_class=None, *args, **kwargs):
2824 compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
2825 self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
2826 self._params = params
2827
2828 def https_open(self, req):
2829 kwargs = {}
2830 conn_class = self._https_conn_class
2831
2832 if hasattr(self, '_context'): # python > 2.6
2833 kwargs['context'] = self._context
2834 if hasattr(self, '_check_hostname'): # python 3.x
2835 kwargs['check_hostname'] = self._check_hostname
2836
2837 socks_proxy = req.headers.get('Ytdl-socks-proxy')
2838 if socks_proxy:
2839 conn_class = make_socks_conn_class(conn_class, socks_proxy)
2840 del req.headers['Ytdl-socks-proxy']
2841
2842 return self.do_open(functools.partial(
2843 _create_http_connection, self, conn_class, True),
2844 req, **kwargs)
2845
2846
2847 class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
2848 """
2849 See [1] for cookie file format.
2850
2851 1. https://curl.haxx.se/docs/http-cookies.html
2852 """
2853 _HTTPONLY_PREFIX = '#HttpOnly_'
2854 _ENTRY_LEN = 7
2855 _HEADER = '''# Netscape HTTP Cookie File
2856 # This file is generated by yt-dlp. Do not edit.
2857
2858 '''
2859 _CookieFileEntry = collections.namedtuple(
2860 'CookieFileEntry',
2861 ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
2862
2863 def save(self, filename=None, ignore_discard=False, ignore_expires=False):
2864 """
2865 Save cookies to a file.
2866
2867 Most of the code is taken from CPython 3.8 and slightly adapted
2868 to support cookie files with UTF-8 in both python 2 and 3.
2869 """
2870 if filename is None:
2871 if self.filename is not None:
2872 filename = self.filename
2873 else:
2874 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
2875
2876 # Store session cookies with `expires` set to 0 instead of an empty
2877 # string
2878 for cookie in self:
2879 if cookie.expires is None:
2880 cookie.expires = 0
2881
2882 with io.open(filename, 'w', encoding='utf-8') as f:
2883 f.write(self._HEADER)
2884 now = time.time()
2885 for cookie in self:
2886 if not ignore_discard and cookie.discard:
2887 continue
2888 if not ignore_expires and cookie.is_expired(now):
2889 continue
2890 if cookie.secure:
2891 secure = 'TRUE'
2892 else:
2893 secure = 'FALSE'
2894 if cookie.domain.startswith('.'):
2895 initial_dot = 'TRUE'
2896 else:
2897 initial_dot = 'FALSE'
2898 if cookie.expires is not None:
2899 expires = compat_str(cookie.expires)
2900 else:
2901 expires = ''
2902 if cookie.value is None:
2903 # cookies.txt regards 'Set-Cookie: foo' as a cookie
2904 # with no name, whereas http.cookiejar regards it as a
2905 # cookie with no value.
2906 name = ''
2907 value = cookie.name
2908 else:
2909 name = cookie.name
2910 value = cookie.value
2911 f.write(
2912 '\t'.join([cookie.domain, initial_dot, cookie.path,
2913 secure, expires, name, value]) + '\n')
2914
2915 def load(self, filename=None, ignore_discard=False, ignore_expires=False):
2916 """Load cookies from a file."""
2917 if filename is None:
2918 if self.filename is not None:
2919 filename = self.filename
2920 else:
2921 raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
2922
2923 def prepare_line(line):
2924 if line.startswith(self._HTTPONLY_PREFIX):
2925 line = line[len(self._HTTPONLY_PREFIX):]
2926 # comments and empty lines are fine
2927 if line.startswith('#') or not line.strip():
2928 return line
2929 cookie_list = line.split('\t')
2930 if len(cookie_list) != self._ENTRY_LEN:
2931 raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
2932 cookie = self._CookieFileEntry(*cookie_list)
2933 if cookie.expires_at and not cookie.expires_at.isdigit():
2934 raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
2935 return line
2936
2937 cf = io.StringIO()
2938 with io.open(filename, encoding='utf-8') as f:
2939 for line in f:
2940 try:
2941 cf.write(prepare_line(line))
2942 except compat_cookiejar.LoadError as e:
2943 write_string(
2944 'WARNING: skipping cookie file entry due to %s: %r\n'
2945 % (e, line), sys.stderr)
2946 continue
2947 cf.seek(0)
2948 self._really_load(cf, filename, ignore_discard, ignore_expires)
2949 # Session cookies are denoted by either `expires` field set to
2950 # an empty string or 0. MozillaCookieJar only recognizes the former
2951 # (see [1]). So we need force the latter to be recognized as session
2952 # cookies on our own.
2953 # Session cookies may be important for cookies-based authentication,
2954 # e.g. usually, when user does not check 'Remember me' check box while
2955 # logging in on a site, some important cookies are stored as session
2956 # cookies so that not recognizing them will result in failed login.
2957 # 1. https://bugs.python.org/issue17164
2958 for cookie in self:
2959 # Treat `expires=0` cookies as session cookies
2960 if cookie.expires == 0:
2961 cookie.expires = None
2962 cookie.discard = True
2963
2964
2965 class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
2966 def __init__(self, cookiejar=None):
2967 compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
2968
2969 def http_response(self, request, response):
2970 # Python 2 will choke on next HTTP request in row if there are non-ASCII
2971 # characters in Set-Cookie HTTP header of last response (see
2972 # https://github.com/ytdl-org/youtube-dl/issues/6769).
2973 # In order to at least prevent crashing we will percent encode Set-Cookie
2974 # header before HTTPCookieProcessor starts processing it.
2975 # if sys.version_info < (3, 0) and response.headers:
2976 # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
2977 # set_cookie = response.headers.get(set_cookie_header)
2978 # if set_cookie:
2979 # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
2980 # if set_cookie != set_cookie_escaped:
2981 # del response.headers[set_cookie_header]
2982 # response.headers[set_cookie_header] = set_cookie_escaped
2983 return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
2984
2985 https_request = compat_urllib_request.HTTPCookieProcessor.http_request
2986 https_response = http_response
2987
2988
2989 class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
2990 """YoutubeDL redirect handler
2991
2992 The code is based on HTTPRedirectHandler implementation from CPython [1].
2993
2994 This redirect handler solves two issues:
2995 - ensures redirect URL is always unicode under python 2
2996 - introduces support for experimental HTTP response status code
2997 308 Permanent Redirect [2] used by some sites [3]
2998
2999 1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
3000 2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
3001 3. https://github.com/ytdl-org/youtube-dl/issues/28768
3002 """
3003
3004 http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
3005
3006 def redirect_request(self, req, fp, code, msg, headers, newurl):
3007 """Return a Request or None in response to a redirect.
3008
3009 This is called by the http_error_30x methods when a
3010 redirection response is received. If a redirection should
3011 take place, return a new Request to allow http_error_30x to
3012 perform the redirect. Otherwise, raise HTTPError if no-one
3013 else should try to handle this url. Return None if you can't
3014 but another Handler might.
3015 """
3016 m = req.get_method()
3017 if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
3018 or code in (301, 302, 303) and m == "POST")):
3019 raise compat_HTTPError(req.full_url, code, msg, headers, fp)
3020 # Strictly (according to RFC 2616), 301 or 302 in response to
3021 # a POST MUST NOT cause a redirection without confirmation
3022 # from the user (of urllib.request, in this case). In practice,
3023 # essentially all clients do redirect in this case, so we do
3024 # the same.
3025
3026 # On python 2 urlh.geturl() may sometimes return redirect URL
3027 # as byte string instead of unicode. This workaround allows
3028 # to force it always return unicode.
3029 if sys.version_info[0] < 3:
3030 newurl = compat_str(newurl)
3031
3032 # Be conciliant with URIs containing a space. This is mainly
3033 # redundant with the more complete encoding done in http_error_302(),
3034 # but it is kept for compatibility with other callers.
3035 newurl = newurl.replace(' ', '%20')
3036
3037 CONTENT_HEADERS = ("content-length", "content-type")
3038 # NB: don't use dict comprehension for python 2.6 compatibility
3039 newheaders = dict((k, v) for k, v in req.headers.items()
3040 if k.lower() not in CONTENT_HEADERS)
3041 return compat_urllib_request.Request(
3042 newurl, headers=newheaders, origin_req_host=req.origin_req_host,
3043 unverifiable=True)
3044
3045
3046 def extract_timezone(date_str):
3047 m = re.search(
3048 r'''(?x)
3049 ^.{8,}? # >=8 char non-TZ prefix, if present
3050 (?P<tz>Z| # just the UTC Z, or
3051 (?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
3052 (?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
3053 [ ]? # optional space
3054 (?P<sign>\+|-) # +/-
3055 (?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
3056 $)
3057 ''', date_str)
3058 if not m:
3059 timezone = datetime.timedelta()
3060 else:
3061 date_str = date_str[:-len(m.group('tz'))]
3062 if not m.group('sign'):
3063 timezone = datetime.timedelta()
3064 else:
3065 sign = 1 if m.group('sign') == '+' else -1
3066 timezone = datetime.timedelta(
3067 hours=sign * int(m.group('hours')),
3068 minutes=sign * int(m.group('minutes')))
3069 return timezone, date_str
3070
3071
3072 def parse_iso8601(date_str, delimiter='T', timezone=None):
3073 """ Return a UNIX timestamp from the given date """
3074
3075 if date_str is None:
3076 return None
3077
3078 date_str = re.sub(r'\.[0-9]+', '', date_str)
3079
3080 if timezone is None:
3081 timezone, date_str = extract_timezone(date_str)
3082
3083 try:
3084 date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
3085 dt = datetime.datetime.strptime(date_str, date_format) - timezone
3086 return calendar.timegm(dt.timetuple())
3087 except ValueError:
3088 pass
3089
3090
3091 def date_formats(day_first=True):
3092 return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
3093
3094
3095 def unified_strdate(date_str, day_first=True):
3096 """Return a string with the date in the format YYYYMMDD"""
3097
3098 if date_str is None:
3099 return None
3100 upload_date = None
3101 # Replace commas
3102 date_str = date_str.replace(',', ' ')
3103 # Remove AM/PM + timezone
3104 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
3105 _, date_str = extract_timezone(date_str)
3106
3107 for expression in date_formats(day_first):
3108 try:
3109 upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
3110 except ValueError:
3111 pass
3112 if upload_date is None:
3113 timetuple = email.utils.parsedate_tz(date_str)
3114 if timetuple:
3115 try:
3116 upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
3117 except ValueError:
3118 pass
3119 if upload_date is not None:
3120 return compat_str(upload_date)
3121
3122
3123 def unified_timestamp(date_str, day_first=True):
3124 if date_str is None:
3125 return None
3126
3127 date_str = re.sub(r'[,|]', '', date_str)
3128
3129 pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
3130 timezone, date_str = extract_timezone(date_str)
3131
3132 # Remove AM/PM + timezone
3133 date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
3134
3135 # Remove unrecognized timezones from ISO 8601 alike timestamps
3136 m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
3137 if m:
3138 date_str = date_str[:-len(m.group('tz'))]
3139
3140 # Python only supports microseconds, so remove nanoseconds
3141 m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
3142 if m:
3143 date_str = m.group(1)
3144
3145 for expression in date_formats(day_first):
3146 try:
3147 dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
3148 return calendar.timegm(dt.timetuple())
3149 except ValueError:
3150 pass
3151 timetuple = email.utils.parsedate_tz(date_str)
3152 if timetuple:
3153 return calendar.timegm(timetuple) + pm_delta * 3600
3154
3155
3156 def determine_ext(url, default_ext='unknown_video'):
3157 if url is None or '.' not in url:
3158 return default_ext
3159 guess = url.partition('?')[0].rpartition('.')[2]
3160 if re.match(r'^[A-Za-z0-9]+$', guess):
3161 return guess
3162 # Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
3163 elif guess.rstrip('/') in KNOWN_EXTENSIONS:
3164 return guess.rstrip('/')
3165 else:
3166 return default_ext
3167
3168
3169 def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
3170 return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
3171
3172
3173 def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
3174 """
3175 Return a datetime object from a string in the format YYYYMMDD or
3176 (now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
3177
3178 format: string date format used to return datetime object from
3179 precision: round the time portion of a datetime object.
3180 auto|microsecond|second|minute|hour|day.
3181 auto: round to the unit provided in date_str (if applicable).
3182 """
3183 auto_precision = False
3184 if precision == 'auto':
3185 auto_precision = True
3186 precision = 'microsecond'
3187 today = datetime_round(datetime.datetime.now(), precision)
3188 if date_str in ('now', 'today'):
3189 return today
3190 if date_str == 'yesterday':
3191 return today - datetime.timedelta(days=1)
3192 match = re.match(
3193 r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
3194 date_str)
3195 if match is not None:
3196 start_time = datetime_from_str(match.group('start'), precision, format)
3197 time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
3198 unit = match.group('unit')
3199 if unit == 'month' or unit == 'year':
3200 new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
3201 unit = 'day'
3202 else:
3203 if unit == 'week':
3204 unit = 'day'
3205 time *= 7
3206 delta = datetime.timedelta(**{unit + 's': time})
3207 new_date = start_time + delta
3208 if auto_precision:
3209 return datetime_round(new_date, unit)
3210 return new_date
3211
3212 return datetime_round(datetime.datetime.strptime(date_str, format), precision)
3213
3214
3215 def date_from_str(date_str, format='%Y%m%d'):
3216 """
3217 Return a datetime object from a string in the format YYYYMMDD or
3218 (now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
3219
3220 format: string date format used to return datetime object from
3221 """
3222 return datetime_from_str(date_str, precision='microsecond', format=format).date()
3223
3224
3225 def datetime_add_months(dt, months):
3226 """Increment/Decrement a datetime object by months."""
3227 month = dt.month + months - 1
3228 year = dt.year + month // 12
3229 month = month % 12 + 1
3230 day = min(dt.day, calendar.monthrange(year, month)[1])
3231 return dt.replace(year, month, day)
3232
3233
3234 def datetime_round(dt, precision='day'):
3235 """
3236 Round a datetime object's time to a specific precision
3237 """
3238 if precision == 'microsecond':
3239 return dt
3240
3241 unit_seconds = {
3242 'day': 86400,
3243 'hour': 3600,
3244 'minute': 60,
3245 'second': 1,
3246 }
3247 roundto = lambda x, n: ((x + n / 2) // n) * n
3248 timestamp = calendar.timegm(dt.timetuple())
3249 return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
3250
3251
3252 def hyphenate_date(date_str):
3253 """
3254 Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
3255 match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
3256 if match is not None:
3257 return '-'.join(match.groups())
3258 else:
3259 return date_str
3260
3261
3262 class DateRange(object):
3263 """Represents a time interval between two dates"""
3264
3265 def __init__(self, start=None, end=None):
3266 """start and end must be strings in the format accepted by date"""
3267 if start is not None:
3268 self.start = date_from_str(start)
3269 else:
3270 self.start = datetime.datetime.min.date()
3271 if end is not None:
3272 self.end = date_from_str(end)
3273 else:
3274 self.end = datetime.datetime.max.date()
3275 if self.start > self.end:
3276 raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
3277
3278 @classmethod
3279 def day(cls, day):
3280 """Returns a range that only contains the given day"""
3281 return cls(day, day)
3282
3283 def __contains__(self, date):
3284 """Check if the date is in the range"""
3285 if not isinstance(date, datetime.date):
3286 date = date_from_str(date)
3287 return self.start <= date <= self.end
3288
3289 def __str__(self):
3290 return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
3291
3292
3293 def platform_name():
3294 """ Returns the platform name as a compat_str """
3295 res = platform.platform()
3296 if isinstance(res, bytes):
3297 res = res.decode(preferredencoding())
3298
3299 assert isinstance(res, compat_str)
3300 return res
3301
3302
3303 def get_windows_version():
3304 ''' Get Windows version. None if it's not running on Windows '''
3305 if compat_os_name == 'nt':
3306 return version_tuple(platform.win32_ver()[1])
3307 else:
3308 return None
3309
3310
3311 def _windows_write_string(s, out):
3312 """ Returns True if the string was written using special methods,
3313 False if it has yet to be written out."""
3314 # Adapted from http://stackoverflow.com/a/3259271/35070
3315
3316 import ctypes
3317 import ctypes.wintypes
3318
3319 WIN_OUTPUT_IDS = {
3320 1: -11,
3321 2: -12,
3322 }
3323
3324 try:
3325 fileno = out.fileno()
3326 except AttributeError:
3327 # If the output stream doesn't have a fileno, it's virtual
3328 return False
3329 except io.UnsupportedOperation:
3330 # Some strange Windows pseudo files?
3331 return False
3332 if fileno not in WIN_OUTPUT_IDS:
3333 return False
3334
3335 GetStdHandle = compat_ctypes_WINFUNCTYPE(
3336 ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
3337 ('GetStdHandle', ctypes.windll.kernel32))
3338 h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
3339
3340 WriteConsoleW = compat_ctypes_WINFUNCTYPE(
3341 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
3342 ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
3343 ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
3344 written = ctypes.wintypes.DWORD(0)
3345
3346 GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
3347 FILE_TYPE_CHAR = 0x0002
3348 FILE_TYPE_REMOTE = 0x8000
3349 GetConsoleMode = compat_ctypes_WINFUNCTYPE(
3350 ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
3351 ctypes.POINTER(ctypes.wintypes.DWORD))(
3352 ('GetConsoleMode', ctypes.windll.kernel32))
3353 INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
3354
3355 def not_a_console(handle):
3356 if handle == INVALID_HANDLE_VALUE or handle is None:
3357 return True
3358 return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
3359 or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
3360
3361 if not_a_console(h):
3362 return False
3363
3364 def next_nonbmp_pos(s):
3365 try:
3366 return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
3367 except StopIteration:
3368 return len(s)
3369
3370 while s:
3371 count = min(next_nonbmp_pos(s), 1024)
3372
3373 ret = WriteConsoleW(
3374 h, s, count if count else 2, ctypes.byref(written), None)
3375 if ret == 0:
3376 raise OSError('Failed to write string')
3377 if not count: # We just wrote a non-BMP character
3378 assert written.value == 2
3379 s = s[1:]
3380 else:
3381 assert written.value > 0
3382 s = s[written.value:]
3383 return True
3384
3385
3386 def write_string(s, out=None, encoding=None):
3387 if out is None:
3388 out = sys.stderr
3389 assert type(s) == compat_str
3390
3391 if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
3392 if _windows_write_string(s, out):
3393 return
3394
3395 if ('b' in getattr(out, 'mode', '')
3396 or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
3397 byt = s.encode(encoding or preferredencoding(), 'ignore')
3398 out.write(byt)
3399 elif hasattr(out, 'buffer'):
3400 enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
3401 byt = s.encode(enc, 'ignore')
3402 out.buffer.write(byt)
3403 else:
3404 out.write(s)
3405 out.flush()
3406
3407
3408 def bytes_to_intlist(bs):
3409 if not bs:
3410 return []
3411 if isinstance(bs[0], int): # Python 3
3412 return list(bs)
3413 else:
3414 return [ord(c) for c in bs]
3415
3416
3417 def intlist_to_bytes(xs):
3418 if not xs:
3419 return b''
3420 return compat_struct_pack('%dB' % len(xs), *xs)
3421
3422
3423 # Cross-platform file locking
3424 if sys.platform == 'win32':
3425 import ctypes.wintypes
3426 import msvcrt
3427
3428 class OVERLAPPED(ctypes.Structure):
3429 _fields_ = [
3430 ('Internal', ctypes.wintypes.LPVOID),
3431 ('InternalHigh', ctypes.wintypes.LPVOID),
3432 ('Offset', ctypes.wintypes.DWORD),
3433 ('OffsetHigh', ctypes.wintypes.DWORD),
3434 ('hEvent', ctypes.wintypes.HANDLE),
3435 ]
3436
3437 kernel32 = ctypes.windll.kernel32
3438 LockFileEx = kernel32.LockFileEx
3439 LockFileEx.argtypes = [
3440 ctypes.wintypes.HANDLE, # hFile
3441 ctypes.wintypes.DWORD, # dwFlags
3442 ctypes.wintypes.DWORD, # dwReserved
3443 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
3444 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
3445 ctypes.POINTER(OVERLAPPED) # Overlapped
3446 ]
3447 LockFileEx.restype = ctypes.wintypes.BOOL
3448 UnlockFileEx = kernel32.UnlockFileEx
3449 UnlockFileEx.argtypes = [
3450 ctypes.wintypes.HANDLE, # hFile
3451 ctypes.wintypes.DWORD, # dwReserved
3452 ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
3453 ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
3454 ctypes.POINTER(OVERLAPPED) # Overlapped
3455 ]
3456 UnlockFileEx.restype = ctypes.wintypes.BOOL
3457 whole_low = 0xffffffff
3458 whole_high = 0x7fffffff
3459
3460 def _lock_file(f, exclusive):
3461 overlapped = OVERLAPPED()
3462 overlapped.Offset = 0
3463 overlapped.OffsetHigh = 0
3464 overlapped.hEvent = 0
3465 f._lock_file_overlapped_p = ctypes.pointer(overlapped)
3466 handle = msvcrt.get_osfhandle(f.fileno())
3467 if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
3468 whole_low, whole_high, f._lock_file_overlapped_p):
3469 raise OSError('Locking file failed: %r' % ctypes.FormatError())
3470
3471 def _unlock_file(f):
3472 assert f._lock_file_overlapped_p
3473 handle = msvcrt.get_osfhandle(f.fileno())
3474 if not UnlockFileEx(handle, 0,
3475 whole_low, whole_high, f._lock_file_overlapped_p):
3476 raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
3477
3478 else:
3479 # Some platforms, such as Jython, is missing fcntl
3480 try:
3481 import fcntl
3482
3483 def _lock_file(f, exclusive):
3484 fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
3485
3486 def _unlock_file(f):
3487 fcntl.flock(f, fcntl.LOCK_UN)
3488 except ImportError:
3489 UNSUPPORTED_MSG = 'file locking is not supported on this platform'
3490
3491 def _lock_file(f, exclusive):
3492 raise IOError(UNSUPPORTED_MSG)
3493
3494 def _unlock_file(f):
3495 raise IOError(UNSUPPORTED_MSG)
3496
3497
3498 class locked_file(object):
3499 def __init__(self, filename, mode, encoding=None):
3500 assert mode in ['r', 'a', 'w']
3501 self.f = io.open(filename, mode, encoding=encoding)
3502 self.mode = mode
3503
3504 def __enter__(self):
3505 exclusive = self.mode != 'r'
3506 try:
3507 _lock_file(self.f, exclusive)
3508 except IOError:
3509 self.f.close()
3510 raise
3511 return self
3512
3513 def __exit__(self, etype, value, traceback):
3514 try:
3515 _unlock_file(self.f)
3516 finally:
3517 self.f.close()
3518
3519 def __iter__(self):
3520 return iter(self.f)
3521
3522 def write(self, *args):
3523 return self.f.write(*args)
3524
3525 def read(self, *args):
3526 return self.f.read(*args)
3527
3528
3529 def get_filesystem_encoding():
3530 encoding = sys.getfilesystemencoding()
3531 return encoding if encoding is not None else 'utf-8'
3532
3533
3534 def shell_quote(args):
3535 quoted_args = []
3536 encoding = get_filesystem_encoding()
3537 for a in args:
3538 if isinstance(a, bytes):
3539 # We may get a filename encoded with 'encodeFilename'
3540 a = a.decode(encoding)
3541 quoted_args.append(compat_shlex_quote(a))
3542 return ' '.join(quoted_args)
3543
3544
3545 def smuggle_url(url, data):
3546 """ Pass additional data in a URL for internal use. """
3547
3548 url, idata = unsmuggle_url(url, {})
3549 data.update(idata)
3550 sdata = compat_urllib_parse_urlencode(
3551 {'__youtubedl_smuggle': json.dumps(data)})
3552 return url + '#' + sdata
3553
3554
3555 def unsmuggle_url(smug_url, default=None):
3556 if '#__youtubedl_smuggle' not in smug_url:
3557 return smug_url, default
3558 url, _, sdata = smug_url.rpartition('#')
3559 jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
3560 data = json.loads(jsond)
3561 return url, data
3562
3563
3564 def format_bytes(bytes):
3565 if bytes is None:
3566 return 'N/A'
3567 if type(bytes) is str:
3568 bytes = float(bytes)
3569 if bytes == 0.0:
3570 exponent = 0
3571 else:
3572 exponent = int(math.log(bytes, 1024.0))
3573 suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
3574 converted = float(bytes) / float(1024 ** exponent)
3575 return '%.2f%s' % (converted, suffix)
3576
3577
3578 def lookup_unit_table(unit_table, s):
3579 units_re = '|'.join(re.escape(u) for u in unit_table)
3580 m = re.match(
3581 r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
3582 if not m:
3583 return None
3584 num_str = m.group('num').replace(',', '.')
3585 mult = unit_table[m.group('unit')]
3586 return int(float(num_str) * mult)
3587
3588
3589 def parse_filesize(s):
3590 if s is None:
3591 return None
3592
3593 # The lower-case forms are of course incorrect and unofficial,
3594 # but we support those too
3595 _UNIT_TABLE = {
3596 'B': 1,
3597 'b': 1,
3598 'bytes': 1,
3599 'KiB': 1024,
3600 'KB': 1000,
3601 'kB': 1024,
3602 'Kb': 1000,
3603 'kb': 1000,
3604 'kilobytes': 1000,
3605 'kibibytes': 1024,
3606 'MiB': 1024 ** 2,
3607 'MB': 1000 ** 2,
3608 'mB': 1024 ** 2,
3609 'Mb': 1000 ** 2,
3610 'mb': 1000 ** 2,
3611 'megabytes': 1000 ** 2,
3612 'mebibytes': 1024 ** 2,
3613 'GiB': 1024 ** 3,
3614 'GB': 1000 ** 3,
3615 'gB': 1024 ** 3,
3616 'Gb': 1000 ** 3,
3617 'gb': 1000 ** 3,
3618 'gigabytes': 1000 ** 3,
3619 'gibibytes': 1024 ** 3,
3620 'TiB': 1024 ** 4,
3621 'TB': 1000 ** 4,
3622 'tB': 1024 ** 4,
3623 'Tb': 1000 ** 4,
3624 'tb': 1000 ** 4,
3625 'terabytes': 1000 ** 4,
3626 'tebibytes': 1024 ** 4,
3627 'PiB': 1024 ** 5,
3628 'PB': 1000 ** 5,
3629 'pB': 1024 ** 5,
3630 'Pb': 1000 ** 5,
3631 'pb': 1000 ** 5,
3632 'petabytes': 1000 ** 5,
3633 'pebibytes': 1024 ** 5,
3634 'EiB': 1024 ** 6,
3635 'EB': 1000 ** 6,
3636 'eB': 1024 ** 6,
3637 'Eb': 1000 ** 6,
3638 'eb': 1000 ** 6,
3639 'exabytes': 1000 ** 6,
3640 'exbibytes': 1024 ** 6,
3641 'ZiB': 1024 ** 7,
3642 'ZB': 1000 ** 7,
3643 'zB': 1024 ** 7,
3644 'Zb': 1000 ** 7,
3645 'zb': 1000 ** 7,
3646 'zettabytes': 1000 ** 7,
3647 'zebibytes': 1024 ** 7,
3648 'YiB': 1024 ** 8,
3649 'YB': 1000 ** 8,
3650 'yB': 1024 ** 8,
3651 'Yb': 1000 ** 8,
3652 'yb': 1000 ** 8,
3653 'yottabytes': 1000 ** 8,
3654 'yobibytes': 1024 ** 8,
3655 }
3656
3657 return lookup_unit_table(_UNIT_TABLE, s)
3658
3659
3660 def parse_count(s):
3661 if s is None:
3662 return None
3663
3664 s = s.strip()
3665
3666 if re.match(r'^[\d,.]+$', s):
3667 return str_to_int(s)
3668
3669 _UNIT_TABLE = {
3670 'k': 1000,
3671 'K': 1000,
3672 'm': 1000 ** 2,
3673 'M': 1000 ** 2,
3674 'kk': 1000 ** 2,
3675 'KK': 1000 ** 2,
3676 }
3677
3678 return lookup_unit_table(_UNIT_TABLE, s)
3679
3680
3681 def parse_resolution(s):
3682 if s is None:
3683 return {}
3684
3685 mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s)
3686 if mobj:
3687 return {
3688 'width': int(mobj.group('w')),
3689 'height': int(mobj.group('h')),
3690 }
3691
3692 mobj = re.search(r'\b(\d+)[pPiI]\b', s)
3693 if mobj:
3694 return {'height': int(mobj.group(1))}
3695
3696 mobj = re.search(r'\b([48])[kK]\b', s)
3697 if mobj:
3698 return {'height': int(mobj.group(1)) * 540}
3699
3700 return {}
3701
3702
3703 def parse_bitrate(s):
3704 if not isinstance(s, compat_str):
3705 return
3706 mobj = re.search(r'\b(\d+)\s*kbps', s)
3707 if mobj:
3708 return int(mobj.group(1))
3709
3710
3711 def month_by_name(name, lang='en'):
3712 """ Return the number of a month by (locale-independently) English name """
3713
3714 month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
3715
3716 try:
3717 return month_names.index(name) + 1
3718 except ValueError:
3719 return None
3720
3721
3722 def month_by_abbreviation(abbrev):
3723 """ Return the number of a month by (locale-independently) English
3724 abbreviations """
3725
3726 try:
3727 return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
3728 except ValueError:
3729 return None
3730
3731
3732 def fix_xml_ampersands(xml_str):
3733 """Replace all the '&' by '&amp;' in XML"""
3734 return re.sub(
3735 r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
3736 '&amp;',
3737 xml_str)
3738
3739
3740 def setproctitle(title):
3741 assert isinstance(title, compat_str)
3742
3743 # ctypes in Jython is not complete
3744 # http://bugs.jython.org/issue2148
3745 if sys.platform.startswith('java'):
3746 return
3747
3748 try:
3749 libc = ctypes.cdll.LoadLibrary('libc.so.6')
3750 except OSError:
3751 return
3752 except TypeError:
3753 # LoadLibrary in Windows Python 2.7.13 only expects
3754 # a bytestring, but since unicode_literals turns
3755 # every string into a unicode string, it fails.
3756 return
3757 title_bytes = title.encode('utf-8')
3758 buf = ctypes.create_string_buffer(len(title_bytes))
3759 buf.value = title_bytes
3760 try:
3761 libc.prctl(15, buf, 0, 0, 0)
3762 except AttributeError:
3763 return # Strange libc, just skip this
3764
3765
3766 def remove_start(s, start):
3767 return s[len(start):] if s is not None and s.startswith(start) else s
3768
3769
3770 def remove_end(s, end):
3771 return s[:-len(end)] if s is not None and s.endswith(end) else s
3772
3773
3774 def remove_quotes(s):
3775 if s is None or len(s) < 2:
3776 return s
3777 for quote in ('"', "'", ):
3778 if s[0] == quote and s[-1] == quote:
3779 return s[1:-1]
3780 return s
3781
3782
3783 def get_domain(url):
3784 domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url)
3785 return domain.group('domain') if domain else None
3786
3787
3788 def url_basename(url):
3789 path = compat_urlparse.urlparse(url).path
3790 return path.strip('/').split('/')[-1]
3791
3792
3793 def base_url(url):
3794 return re.match(r'https?://[^?#&]+/', url).group()
3795
3796
3797 def urljoin(base, path):
3798 if isinstance(path, bytes):
3799 path = path.decode('utf-8')
3800 if not isinstance(path, compat_str) or not path:
3801 return None
3802 if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
3803 return path
3804 if isinstance(base, bytes):
3805 base = base.decode('utf-8')
3806 if not isinstance(base, compat_str) or not re.match(
3807 r'^(?:https?:)?//', base):
3808 return None
3809 return compat_urlparse.urljoin(base, path)
3810
3811
3812 class HEADRequest(compat_urllib_request.Request):
3813 def get_method(self):
3814 return 'HEAD'
3815
3816
3817 class PUTRequest(compat_urllib_request.Request):
3818 def get_method(self):
3819 return 'PUT'
3820
3821
3822 def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
3823 if get_attr:
3824 if v is not None:
3825 v = getattr(v, get_attr, None)
3826 if v == '':
3827 v = None
3828 if v is None:
3829 return default
3830 try:
3831 return int(v) * invscale // scale
3832 except (ValueError, TypeError):
3833 return default
3834
3835
3836 def str_or_none(v, default=None):
3837 return default if v is None else compat_str(v)
3838
3839
3840 def str_to_int(int_str):
3841 """ A more relaxed version of int_or_none """
3842 if isinstance(int_str, compat_integer_types):
3843 return int_str
3844 elif isinstance(int_str, compat_str):
3845 int_str = re.sub(r'[,\.\+]', '', int_str)
3846 return int_or_none(int_str)
3847
3848
3849 def float_or_none(v, scale=1, invscale=1, default=None):
3850 if v is None:
3851 return default
3852 try:
3853 return float(v) * invscale / scale
3854 except (ValueError, TypeError):
3855 return default
3856
3857
3858 def bool_or_none(v, default=None):
3859 return v if isinstance(v, bool) else default
3860
3861
3862 def strip_or_none(v, default=None):
3863 return v.strip() if isinstance(v, compat_str) else default
3864
3865
3866 def url_or_none(url):
3867 if not url or not isinstance(url, compat_str):
3868 return None
3869 url = url.strip()
3870 return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
3871
3872
3873 def strftime_or_none(timestamp, date_format, default=None):
3874 datetime_object = None
3875 try:
3876 if isinstance(timestamp, compat_numeric_types): # unix timestamp
3877 datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
3878 elif isinstance(timestamp, compat_str): # assume YYYYMMDD
3879 datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
3880 return datetime_object.strftime(date_format)
3881 except (ValueError, TypeError, AttributeError):
3882 return default
3883
3884
3885 def parse_duration(s):
3886 if not isinstance(s, compat_basestring):
3887 return None
3888
3889 s = s.strip()
3890
3891 days, hours, mins, secs, ms = [None] * 5
3892 m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
3893 if m:
3894 days, hours, mins, secs, ms = m.groups()
3895 else:
3896 m = re.match(
3897 r'''(?ix)(?:P?
3898 (?:
3899 [0-9]+\s*y(?:ears?)?\s*
3900 )?
3901 (?:
3902 [0-9]+\s*m(?:onths?)?\s*
3903 )?
3904 (?:
3905 [0-9]+\s*w(?:eeks?)?\s*
3906 )?
3907 (?:
3908 (?P<days>[0-9]+)\s*d(?:ays?)?\s*
3909 )?
3910 T)?
3911 (?:
3912 (?P<hours>[0-9]+)\s*h(?:ours?)?\s*
3913 )?
3914 (?:
3915 (?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
3916 )?
3917 (?:
3918 (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
3919 )?Z?$''', s)
3920 if m:
3921 days, hours, mins, secs, ms = m.groups()
3922 else:
3923 m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
3924 if m:
3925 hours, mins = m.groups()
3926 else:
3927 return None
3928
3929 duration = 0
3930 if secs:
3931 duration += float(secs)
3932 if mins:
3933 duration += float(mins) * 60
3934 if hours:
3935 duration += float(hours) * 60 * 60
3936 if days:
3937 duration += float(days) * 24 * 60 * 60
3938 if ms:
3939 duration += float(ms)
3940 return duration
3941
3942
3943 def prepend_extension(filename, ext, expected_real_ext=None):
3944 name, real_ext = os.path.splitext(filename)
3945 return (
3946 '{0}.{1}{2}'.format(name, ext, real_ext)
3947 if not expected_real_ext or real_ext[1:] == expected_real_ext
3948 else '{0}.{1}'.format(filename, ext))
3949
3950
3951 def replace_extension(filename, ext, expected_real_ext=None):
3952 name, real_ext = os.path.splitext(filename)
3953 return '{0}.{1}'.format(
3954 name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
3955 ext)
3956
3957
3958 def check_executable(exe, args=[]):
3959 """ Checks if the given binary is installed somewhere in PATH, and returns its name.
3960 args can be a list of arguments for a short output (like -version) """
3961 try:
3962 process_communicate_or_kill(subprocess.Popen(
3963 [exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
3964 except OSError:
3965 return False
3966 return exe
3967
3968
3969 def get_exe_version(exe, args=['--version'],
3970 version_re=None, unrecognized='present'):
3971 """ Returns the version of the specified executable,
3972 or False if the executable is not present """
3973 try:
3974 # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
3975 # SIGTTOU if yt-dlp is run in the background.
3976 # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
3977 out, _ = process_communicate_or_kill(subprocess.Popen(
3978 [encodeArgument(exe)] + args,
3979 stdin=subprocess.PIPE,
3980 stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
3981 except OSError:
3982 return False
3983 if isinstance(out, bytes): # Python 2.x
3984 out = out.decode('ascii', 'ignore')
3985 return detect_exe_version(out, version_re, unrecognized)
3986
3987
3988 def detect_exe_version(output, version_re=None, unrecognized='present'):
3989 assert isinstance(output, compat_str)
3990 if version_re is None:
3991 version_re = r'version\s+([-0-9._a-zA-Z]+)'
3992 m = re.search(version_re, output)
3993 if m:
3994 return m.group(1)
3995 else:
3996 return unrecognized
3997
3998
3999 class LazyList(collections.abc.Sequence):
4000 ''' Lazy immutable list from an iterable
4001 Note that slices of a LazyList are lists and not LazyList'''
4002
4003 class IndexError(IndexError):
4004 pass
4005
4006 def __init__(self, iterable):
4007 self.__iterable = iter(iterable)
4008 self.__cache = []
4009 self.__reversed = False
4010
4011 def __iter__(self):
4012 if self.__reversed:
4013 # We need to consume the entire iterable to iterate in reverse
4014 yield from self.exhaust()
4015 return
4016 yield from self.__cache
4017 for item in self.__iterable:
4018 self.__cache.append(item)
4019 yield item
4020
4021 def __exhaust(self):
4022 self.__cache.extend(self.__iterable)
4023 return self.__cache
4024
4025 def exhaust(self):
4026 ''' Evaluate the entire iterable '''
4027 return self.__exhaust()[::-1 if self.__reversed else 1]
4028
4029 @staticmethod
4030 def __reverse_index(x):
4031 return None if x is None else -(x + 1)
4032
4033 def __getitem__(self, idx):
4034 if isinstance(idx, slice):
4035 if self.__reversed:
4036 idx = slice(self.__reverse_index(idx.start), self.__reverse_index(idx.stop), -(idx.step or 1))
4037 start, stop, step = idx.start, idx.stop, idx.step or 1
4038 elif isinstance(idx, int):
4039 if self.__reversed:
4040 idx = self.__reverse_index(idx)
4041 start, stop, step = idx, idx, 0
4042 else:
4043 raise TypeError('indices must be integers or slices')
4044 if ((start or 0) < 0 or (stop or 0) < 0
4045 or (start is None and step < 0)
4046 or (stop is None and step > 0)):
4047 # We need to consume the entire iterable to be able to slice from the end
4048 # Obviously, never use this with infinite iterables
4049 self.__exhaust()
4050 try:
4051 return self.__cache[idx]
4052 except IndexError as e:
4053 raise self.IndexError(e) from e
4054 n = max(start or 0, stop or 0) - len(self.__cache) + 1
4055 if n > 0:
4056 self.__cache.extend(itertools.islice(self.__iterable, n))
4057 try:
4058 return self.__cache[idx]
4059 except IndexError as e:
4060 raise self.IndexError(e) from e
4061
4062 def __bool__(self):
4063 try:
4064 self[-1] if self.__reversed else self[0]
4065 except self.IndexError:
4066 return False
4067 return True
4068
4069 def __len__(self):
4070 self.__exhaust()
4071 return len(self.__cache)
4072
4073 def reverse(self):
4074 self.__reversed = not self.__reversed
4075 return self
4076
4077 def __repr__(self):
4078 # repr and str should mimic a list. So we exhaust the iterable
4079 return repr(self.exhaust())
4080
4081 def __str__(self):
4082 return repr(self.exhaust())
4083
4084
4085 class PagedList:
4086 def __len__(self):
4087 # This is only useful for tests
4088 return len(self.getslice())
4089
4090 def __init__(self, pagefunc, pagesize, use_cache=True):
4091 self._pagefunc = pagefunc
4092 self._pagesize = pagesize
4093 self._use_cache = use_cache
4094 self._cache = {}
4095
4096 def getpage(self, pagenum):
4097 page_results = self._cache.get(pagenum) or list(self._pagefunc(pagenum))
4098 if self._use_cache:
4099 self._cache[pagenum] = page_results
4100 return page_results
4101
4102 def getslice(self, start=0, end=None):
4103 return list(self._getslice(start, end))
4104
4105 def _getslice(self, start, end):
4106 raise NotImplementedError('This method must be implemented by subclasses')
4107
4108 def __getitem__(self, idx):
4109 # NOTE: cache must be enabled if this is used
4110 if not isinstance(idx, int) or idx < 0:
4111 raise TypeError('indices must be non-negative integers')
4112 entries = self.getslice(idx, idx + 1)
4113 return entries[0] if entries else None
4114
4115
4116 class OnDemandPagedList(PagedList):
4117 def _getslice(self, start, end):
4118 for pagenum in itertools.count(start // self._pagesize):
4119 firstid = pagenum * self._pagesize
4120 nextfirstid = pagenum * self._pagesize + self._pagesize
4121 if start >= nextfirstid:
4122 continue
4123
4124 startv = (
4125 start % self._pagesize
4126 if firstid <= start < nextfirstid
4127 else 0)
4128 endv = (
4129 ((end - 1) % self._pagesize) + 1
4130 if (end is not None and firstid <= end <= nextfirstid)
4131 else None)
4132
4133 page_results = self.getpage(pagenum)
4134 if startv != 0 or endv is not None:
4135 page_results = page_results[startv:endv]
4136 yield from page_results
4137
4138 # A little optimization - if current page is not "full", ie. does
4139 # not contain page_size videos then we can assume that this page
4140 # is the last one - there are no more ids on further pages -
4141 # i.e. no need to query again.
4142 if len(page_results) + startv < self._pagesize:
4143 break
4144
4145 # If we got the whole page, but the next page is not interesting,
4146 # break out early as well
4147 if end == nextfirstid:
4148 break
4149
4150
4151 class InAdvancePagedList(PagedList):
4152 def __init__(self, pagefunc, pagecount, pagesize):
4153 self._pagecount = pagecount
4154 PagedList.__init__(self, pagefunc, pagesize, True)
4155
4156 def _getslice(self, start, end):
4157 start_page = start // self._pagesize
4158 end_page = (
4159 self._pagecount if end is None else (end // self._pagesize + 1))
4160 skip_elems = start - start_page * self._pagesize
4161 only_more = None if end is None else end - start
4162 for pagenum in range(start_page, end_page):
4163 page_results = self.getpage(pagenum)
4164 if skip_elems:
4165 page_results = page_results[skip_elems:]
4166 skip_elems = None
4167 if only_more is not None:
4168 if len(page_results) < only_more:
4169 only_more -= len(page_results)
4170 else:
4171 yield from page_results[:only_more]
4172 break
4173 yield from page_results
4174
4175
4176 def uppercase_escape(s):
4177 unicode_escape = codecs.getdecoder('unicode_escape')
4178 return re.sub(
4179 r'\\U[0-9a-fA-F]{8}',
4180 lambda m: unicode_escape(m.group(0))[0],
4181 s)
4182
4183
4184 def lowercase_escape(s):
4185 unicode_escape = codecs.getdecoder('unicode_escape')
4186 return re.sub(
4187 r'\\u[0-9a-fA-F]{4}',
4188 lambda m: unicode_escape(m.group(0))[0],
4189 s)
4190
4191
4192 def escape_rfc3986(s):
4193 """Escape non-ASCII characters as suggested by RFC 3986"""
4194 if sys.version_info < (3, 0) and isinstance(s, compat_str):
4195 s = s.encode('utf-8')
4196 return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
4197
4198
4199 def escape_url(url):
4200 """Escape URL as suggested by RFC 3986"""
4201 url_parsed = compat_urllib_parse_urlparse(url)
4202 return url_parsed._replace(
4203 netloc=url_parsed.netloc.encode('idna').decode('ascii'),
4204 path=escape_rfc3986(url_parsed.path),
4205 params=escape_rfc3986(url_parsed.params),
4206 query=escape_rfc3986(url_parsed.query),
4207 fragment=escape_rfc3986(url_parsed.fragment)
4208 ).geturl()
4209
4210
4211 def parse_qs(url):
4212 return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
4213
4214
4215 def read_batch_urls(batch_fd):
4216 def fixup(url):
4217 if not isinstance(url, compat_str):
4218 url = url.decode('utf-8', 'replace')
4219 BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
4220 for bom in BOM_UTF8:
4221 if url.startswith(bom):
4222 url = url[len(bom):]
4223 url = url.lstrip()
4224 if not url or url.startswith(('#', ';', ']')):
4225 return False
4226 # "#" cannot be stripped out since it is part of the URI
4227 # However, it can be safely stipped out if follwing a whitespace
4228 return re.split(r'\s#', url, 1)[0].rstrip()
4229
4230 with contextlib.closing(batch_fd) as fd:
4231 return [url for url in map(fixup, fd) if url]
4232
4233
4234 def urlencode_postdata(*args, **kargs):
4235 return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
4236
4237
4238 def update_url_query(url, query):
4239 if not query:
4240 return url
4241 parsed_url = compat_urlparse.urlparse(url)
4242 qs = compat_parse_qs(parsed_url.query)
4243 qs.update(query)
4244 return compat_urlparse.urlunparse(parsed_url._replace(
4245 query=compat_urllib_parse_urlencode(qs, True)))
4246
4247
4248 def update_Request(req, url=None, data=None, headers={}, query={}):
4249 req_headers = req.headers.copy()
4250 req_headers.update(headers)
4251 req_data = data or req.data
4252 req_url = update_url_query(url or req.get_full_url(), query)
4253 req_get_method = req.get_method()
4254 if req_get_method == 'HEAD':
4255 req_type = HEADRequest
4256 elif req_get_method == 'PUT':
4257 req_type = PUTRequest
4258 else:
4259 req_type = compat_urllib_request.Request
4260 new_req = req_type(
4261 req_url, data=req_data, headers=req_headers,
4262 origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
4263 if hasattr(req, 'timeout'):
4264 new_req.timeout = req.timeout
4265 return new_req
4266
4267
4268 def _multipart_encode_impl(data, boundary):
4269 content_type = 'multipart/form-data; boundary=%s' % boundary
4270
4271 out = b''
4272 for k, v in data.items():
4273 out += b'--' + boundary.encode('ascii') + b'\r\n'
4274 if isinstance(k, compat_str):
4275 k = k.encode('utf-8')
4276 if isinstance(v, compat_str):
4277 v = v.encode('utf-8')
4278 # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
4279 # suggests sending UTF-8 directly. Firefox sends UTF-8, too
4280 content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
4281 if boundary.encode('ascii') in content:
4282 raise ValueError('Boundary overlaps with data')
4283 out += content
4284
4285 out += b'--' + boundary.encode('ascii') + b'--\r\n'
4286
4287 return out, content_type
4288
4289
4290 def multipart_encode(data, boundary=None):
4291 '''
4292 Encode a dict to RFC 7578-compliant form-data
4293
4294 data:
4295 A dict where keys and values can be either Unicode or bytes-like
4296 objects.
4297 boundary:
4298 If specified a Unicode object, it's used as the boundary. Otherwise
4299 a random boundary is generated.
4300
4301 Reference: https://tools.ietf.org/html/rfc7578
4302 '''
4303 has_specified_boundary = boundary is not None
4304
4305 while True:
4306 if boundary is None:
4307 boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
4308
4309 try:
4310 out, content_type = _multipart_encode_impl(data, boundary)
4311 break
4312 except ValueError:
4313 if has_specified_boundary:
4314 raise
4315 boundary = None
4316
4317 return out, content_type
4318
4319
4320 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
4321 if isinstance(key_or_keys, (list, tuple)):
4322 for key in key_or_keys:
4323 if key not in d or d[key] is None or skip_false_values and not d[key]:
4324 continue
4325 return d[key]
4326 return default
4327 return d.get(key_or_keys, default)
4328
4329
4330 def try_get(src, getter, expected_type=None):
4331 for get in variadic(getter):
4332 try:
4333 v = get(src)
4334 except (AttributeError, KeyError, TypeError, IndexError):
4335 pass
4336 else:
4337 if expected_type is None or isinstance(v, expected_type):
4338 return v
4339
4340
4341 def merge_dicts(*dicts):
4342 merged = {}
4343 for a_dict in dicts:
4344 for k, v in a_dict.items():
4345 if v is None:
4346 continue
4347 if (k not in merged
4348 or (isinstance(v, compat_str) and v
4349 and isinstance(merged[k], compat_str)
4350 and not merged[k])):
4351 merged[k] = v
4352 return merged
4353
4354
4355 def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
4356 return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
4357
4358
4359 US_RATINGS = {
4360 'G': 0,
4361 'PG': 10,
4362 'PG-13': 13,
4363 'R': 16,
4364 'NC': 18,
4365 }
4366
4367
4368 TV_PARENTAL_GUIDELINES = {
4369 'TV-Y': 0,
4370 'TV-Y7': 7,
4371 'TV-G': 0,
4372 'TV-PG': 0,
4373 'TV-14': 14,
4374 'TV-MA': 17,
4375 }
4376
4377
4378 def parse_age_limit(s):
4379 if type(s) == int:
4380 return s if 0 <= s <= 21 else None
4381 if not isinstance(s, compat_basestring):
4382 return None
4383 m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
4384 if m:
4385 return int(m.group('age'))
4386 s = s.upper()
4387 if s in US_RATINGS:
4388 return US_RATINGS[s]
4389 m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
4390 if m:
4391 return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
4392 return None
4393
4394
4395 def strip_jsonp(code):
4396 return re.sub(
4397 r'''(?sx)^
4398 (?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
4399 (?:\s*&&\s*(?P=func_name))?
4400 \s*\(\s*(?P<callback_data>.*)\);?
4401 \s*?(?://[^\n]*)*$''',
4402 r'\g<callback_data>', code)
4403
4404
4405 def js_to_json(code, vars={}):
4406 # vars is a dict of var, val pairs to substitute
4407 COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
4408 SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
4409 INTEGER_TABLE = (
4410 (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
4411 (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
4412 )
4413
4414 def fix_kv(m):
4415 v = m.group(0)
4416 if v in ('true', 'false', 'null'):
4417 return v
4418 elif v in ('undefined', 'void 0'):
4419 return 'null'
4420 elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
4421 return ""
4422
4423 if v[0] in ("'", '"'):
4424 v = re.sub(r'(?s)\\.|"', lambda m: {
4425 '"': '\\"',
4426 "\\'": "'",
4427 '\\\n': '',
4428 '\\x': '\\u00',
4429 }.get(m.group(0), m.group(0)), v[1:-1])
4430 else:
4431 for regex, base in INTEGER_TABLE:
4432 im = re.match(regex, v)
4433 if im:
4434 i = int(im.group(1), base)
4435 return '"%d":' % i if v.endswith(':') else '%d' % i
4436
4437 if v in vars:
4438 return vars[v]
4439
4440 return '"%s"' % v
4441
4442 return re.sub(r'''(?sx)
4443 "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
4444 '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
4445 {comment}|,(?={skip}[\]}}])|
4446 void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
4447 \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
4448 [0-9]+(?={skip}:)|
4449 !+
4450 '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
4451
4452
4453 def qualities(quality_ids):
4454 """ Get a numeric quality value out of a list of possible values """
4455 def q(qid):
4456 try:
4457 return quality_ids.index(qid)
4458 except ValueError:
4459 return -1
4460 return q
4461
4462
4463 DEFAULT_OUTTMPL = {
4464 'default': '%(title)s [%(id)s].%(ext)s',
4465 'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
4466 }
4467 OUTTMPL_TYPES = {
4468 'chapter': None,
4469 'subtitle': None,
4470 'thumbnail': None,
4471 'description': 'description',
4472 'annotation': 'annotations.xml',
4473 'infojson': 'info.json',
4474 'pl_thumbnail': None,
4475 'pl_description': 'description',
4476 'pl_infojson': 'info.json',
4477 }
4478
4479 # As of [1] format syntax is:
4480 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
4481 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
4482 STR_FORMAT_RE_TMPL = r'''(?x)
4483 (?<!%)(?P<prefix>(?:%%)*)
4484 %
4485 (?P<has_key>\((?P<key>{0})\))?
4486 (?P<format>
4487 (?P<conversion>[#0\-+ ]+)?
4488 (?P<min_width>\d+)?
4489 (?P<precision>\.\d+)?
4490 (?P<len_mod>[hlL])? # unused in python
4491 {1} # conversion type
4492 )
4493 '''
4494
4495
4496 STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
4497
4498
4499 def limit_length(s, length):
4500 """ Add ellipses to overly long strings """
4501 if s is None:
4502 return None
4503 ELLIPSES = '...'
4504 if len(s) > length:
4505 return s[:length - len(ELLIPSES)] + ELLIPSES
4506 return s
4507
4508
4509 def version_tuple(v):
4510 return tuple(int(e) for e in re.split(r'[-.]', v))
4511
4512
4513 def is_outdated_version(version, limit, assume_new=True):
4514 if not version:
4515 return not assume_new
4516 try:
4517 return version_tuple(version) < version_tuple(limit)
4518 except ValueError:
4519 return not assume_new
4520
4521
4522 def ytdl_is_updateable():
4523 """ Returns if yt-dlp can be updated with -U """
4524
4525 from .update import is_non_updateable
4526
4527 return not is_non_updateable()
4528
4529
4530 def args_to_str(args):
4531 # Get a short string representation for a subprocess command
4532 return ' '.join(compat_shlex_quote(a) for a in args)
4533
4534
4535 def error_to_compat_str(err):
4536 err_str = str(err)
4537 # On python 2 error byte string must be decoded with proper
4538 # encoding rather than ascii
4539 if sys.version_info[0] < 3:
4540 err_str = err_str.decode(preferredencoding())
4541 return err_str
4542
4543
4544 def mimetype2ext(mt):
4545 if mt is None:
4546 return None
4547
4548 mt, _, params = mt.partition(';')
4549 mt = mt.strip()
4550
4551 FULL_MAP = {
4552 'audio/mp4': 'm4a',
4553 # Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
4554 # it's the most popular one
4555 'audio/mpeg': 'mp3',
4556 'audio/x-wav': 'wav',
4557 'audio/wav': 'wav',
4558 'audio/wave': 'wav',
4559 }
4560
4561 ext = FULL_MAP.get(mt)
4562 if ext is not None:
4563 return ext
4564
4565 SUBTYPE_MAP = {
4566 '3gpp': '3gp',
4567 'smptett+xml': 'tt',
4568 'ttaf+xml': 'dfxp',
4569 'ttml+xml': 'ttml',
4570 'x-flv': 'flv',
4571 'x-mp4-fragmented': 'mp4',
4572 'x-ms-sami': 'sami',
4573 'x-ms-wmv': 'wmv',
4574 'mpegurl': 'm3u8',
4575 'x-mpegurl': 'm3u8',
4576 'vnd.apple.mpegurl': 'm3u8',
4577 'dash+xml': 'mpd',
4578 'f4m+xml': 'f4m',
4579 'hds+xml': 'f4m',
4580 'vnd.ms-sstr+xml': 'ism',
4581 'quicktime': 'mov',
4582 'mp2t': 'ts',
4583 'x-wav': 'wav',
4584 'filmstrip+json': 'fs',
4585 'svg+xml': 'svg',
4586 }
4587
4588 _, _, subtype = mt.rpartition('/')
4589 ext = SUBTYPE_MAP.get(subtype.lower())
4590 if ext is not None:
4591 return ext
4592
4593 SUFFIX_MAP = {
4594 'json': 'json',
4595 'xml': 'xml',
4596 'zip': 'zip',
4597 'gzip': 'gz',
4598 }
4599
4600 _, _, suffix = subtype.partition('+')
4601 ext = SUFFIX_MAP.get(suffix)
4602 if ext is not None:
4603 return ext
4604
4605 return subtype.replace('+', '.')
4606
4607
4608 def parse_codecs(codecs_str):
4609 # http://tools.ietf.org/html/rfc6381
4610 if not codecs_str:
4611 return {}
4612 split_codecs = list(filter(None, map(
4613 str.strip, codecs_str.strip().strip(',').split(','))))
4614 vcodec, acodec = None, None
4615 for full_codec in split_codecs:
4616 codec = full_codec.split('.')[0]
4617 if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01', 'theora'):
4618 if not vcodec:
4619 vcodec = full_codec
4620 elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
4621 if not acodec:
4622 acodec = full_codec
4623 else:
4624 write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
4625 if not vcodec and not acodec:
4626 if len(split_codecs) == 2:
4627 return {
4628 'vcodec': split_codecs[0],
4629 'acodec': split_codecs[1],
4630 }
4631 else:
4632 return {
4633 'vcodec': vcodec or 'none',
4634 'acodec': acodec or 'none',
4635 }
4636 return {}
4637
4638
4639 def urlhandle_detect_ext(url_handle):
4640 getheader = url_handle.headers.get
4641
4642 cd = getheader('Content-Disposition')
4643 if cd:
4644 m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
4645 if m:
4646 e = determine_ext(m.group('filename'), default_ext=None)
4647 if e:
4648 return e
4649
4650 return mimetype2ext(getheader('Content-Type'))
4651
4652
4653 def encode_data_uri(data, mime_type):
4654 return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
4655
4656
4657 def age_restricted(content_limit, age_limit):
4658 """ Returns True iff the content should be blocked """
4659
4660 if age_limit is None: # No limit set
4661 return False
4662 if content_limit is None:
4663 return False # Content available for everyone
4664 return age_limit < content_limit
4665
4666
4667 def is_html(first_bytes):
4668 """ Detect whether a file contains HTML by examining its first bytes. """
4669
4670 BOMS = [
4671 (b'\xef\xbb\xbf', 'utf-8'),
4672 (b'\x00\x00\xfe\xff', 'utf-32-be'),
4673 (b'\xff\xfe\x00\x00', 'utf-32-le'),
4674 (b'\xff\xfe', 'utf-16-le'),
4675 (b'\xfe\xff', 'utf-16-be'),
4676 ]
4677 for bom, enc in BOMS:
4678 if first_bytes.startswith(bom):
4679 s = first_bytes[len(bom):].decode(enc, 'replace')
4680 break
4681 else:
4682 s = first_bytes.decode('utf-8', 'replace')
4683
4684 return re.match(r'^\s*<', s)
4685
4686
4687 def determine_protocol(info_dict):
4688 protocol = info_dict.get('protocol')
4689 if protocol is not None:
4690 return protocol
4691
4692 url = info_dict['url']
4693 if url.startswith('rtmp'):
4694 return 'rtmp'
4695 elif url.startswith('mms'):
4696 return 'mms'
4697 elif url.startswith('rtsp'):
4698 return 'rtsp'
4699
4700 ext = determine_ext(url)
4701 if ext == 'm3u8':
4702 return 'm3u8'
4703 elif ext == 'f4m':
4704 return 'f4m'
4705
4706 return compat_urllib_parse_urlparse(url).scheme
4707
4708
4709 def render_table(header_row, data, delim=False, extraGap=0, hideEmpty=False):
4710 """ Render a list of rows, each as a list of values """
4711
4712 def get_max_lens(table):
4713 return [max(len(compat_str(v)) for v in col) for col in zip(*table)]
4714
4715 def filter_using_list(row, filterArray):
4716 return [col for (take, col) in zip(filterArray, row) if take]
4717
4718 if hideEmpty:
4719 max_lens = get_max_lens(data)
4720 header_row = filter_using_list(header_row, max_lens)
4721 data = [filter_using_list(row, max_lens) for row in data]
4722
4723 table = [header_row] + data
4724 max_lens = get_max_lens(table)
4725 if delim:
4726 table = [header_row] + [['-' * ml for ml in max_lens]] + data
4727 format_str = ' '.join('%-' + compat_str(ml + extraGap) + 's' for ml in max_lens[:-1]) + ' %s'
4728 return '\n'.join(format_str % tuple(row) for row in table)
4729
4730
4731 def _match_one(filter_part, dct, incomplete):
4732 # TODO: Generalize code with YoutubeDL._build_format_filter
4733 STRING_OPERATORS = {
4734 '*=': operator.contains,
4735 '^=': lambda attr, value: attr.startswith(value),
4736 '$=': lambda attr, value: attr.endswith(value),
4737 '~=': lambda attr, value: re.search(value, attr),
4738 }
4739 COMPARISON_OPERATORS = {
4740 **STRING_OPERATORS,
4741 '<=': operator.le, # "<=" must be defined above "<"
4742 '<': operator.lt,
4743 '>=': operator.ge,
4744 '>': operator.gt,
4745 '=': operator.eq,
4746 }
4747
4748 operator_rex = re.compile(r'''(?x)\s*
4749 (?P<key>[a-z_]+)
4750 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
4751 (?:
4752 (?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
4753 (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
4754 (?P<strval>.+?)
4755 )
4756 \s*$
4757 ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
4758 m = operator_rex.search(filter_part)
4759 if m:
4760 unnegated_op = COMPARISON_OPERATORS[m.group('op')]
4761 if m.group('negation'):
4762 op = lambda attr, value: not unnegated_op(attr, value)
4763 else:
4764 op = unnegated_op
4765 actual_value = dct.get(m.group('key'))
4766 if (m.group('quotedstrval') is not None
4767 or m.group('strval') is not None
4768 # If the original field is a string and matching comparisonvalue is
4769 # a number we should respect the origin of the original field
4770 # and process comparison value as a string (see
4771 # https://github.com/ytdl-org/youtube-dl/issues/11082).
4772 or actual_value is not None and m.group('intval') is not None
4773 and isinstance(actual_value, compat_str)):
4774 comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
4775 quote = m.group('quote')
4776 if quote is not None:
4777 comparison_value = comparison_value.replace(r'\%s' % quote, quote)
4778 else:
4779 if m.group('op') in STRING_OPERATORS:
4780 raise ValueError('Operator %s only supports string values!' % m.group('op'))
4781 try:
4782 comparison_value = int(m.group('intval'))
4783 except ValueError:
4784 comparison_value = parse_filesize(m.group('intval'))
4785 if comparison_value is None:
4786 comparison_value = parse_filesize(m.group('intval') + 'B')
4787 if comparison_value is None:
4788 raise ValueError(
4789 'Invalid integer value %r in filter part %r' % (
4790 m.group('intval'), filter_part))
4791 if actual_value is None:
4792 return incomplete or m.group('none_inclusive')
4793 return op(actual_value, comparison_value)
4794
4795 UNARY_OPERATORS = {
4796 '': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
4797 '!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
4798 }
4799 operator_rex = re.compile(r'''(?x)\s*
4800 (?P<op>%s)\s*(?P<key>[a-z_]+)
4801 \s*$
4802 ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
4803 m = operator_rex.search(filter_part)
4804 if m:
4805 op = UNARY_OPERATORS[m.group('op')]
4806 actual_value = dct.get(m.group('key'))
4807 if incomplete and actual_value is None:
4808 return True
4809 return op(actual_value)
4810
4811 raise ValueError('Invalid filter part %r' % filter_part)
4812
4813
4814 def match_str(filter_str, dct, incomplete=False):
4815 """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false
4816 When incomplete, all conditions passes on missing fields
4817 """
4818 return all(
4819 _match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
4820 for filter_part in re.split(r'(?<!\\)&', filter_str))
4821
4822
4823 def match_filter_func(filter_str):
4824 def _match_func(info_dict, *args, **kwargs):
4825 if match_str(filter_str, info_dict, *args, **kwargs):
4826 return None
4827 else:
4828 video_title = info_dict.get('title', info_dict.get('id', 'video'))
4829 return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
4830 return _match_func
4831
4832
4833 def parse_dfxp_time_expr(time_expr):
4834 if not time_expr:
4835 return
4836
4837 mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
4838 if mobj:
4839 return float(mobj.group('time_offset'))
4840
4841 mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
4842 if mobj:
4843 return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
4844
4845
4846 def srt_subtitles_timecode(seconds):
4847 return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
4848
4849
4850 def dfxp2srt(dfxp_data):
4851 '''
4852 @param dfxp_data A bytes-like object containing DFXP data
4853 @returns A unicode object containing converted SRT data
4854 '''
4855 LEGACY_NAMESPACES = (
4856 (b'http://www.w3.org/ns/ttml', [
4857 b'http://www.w3.org/2004/11/ttaf1',
4858 b'http://www.w3.org/2006/04/ttaf1',
4859 b'http://www.w3.org/2006/10/ttaf1',
4860 ]),
4861 (b'http://www.w3.org/ns/ttml#styling', [
4862 b'http://www.w3.org/ns/ttml#style',
4863 ]),
4864 )
4865
4866 SUPPORTED_STYLING = [
4867 'color',
4868 'fontFamily',
4869 'fontSize',
4870 'fontStyle',
4871 'fontWeight',
4872 'textDecoration'
4873 ]
4874
4875 _x = functools.partial(xpath_with_ns, ns_map={
4876 'xml': 'http://www.w3.org/XML/1998/namespace',
4877 'ttml': 'http://www.w3.org/ns/ttml',
4878 'tts': 'http://www.w3.org/ns/ttml#styling',
4879 })
4880
4881 styles = {}
4882 default_style = {}
4883
4884 class TTMLPElementParser(object):
4885 _out = ''
4886 _unclosed_elements = []
4887 _applied_styles = []
4888
4889 def start(self, tag, attrib):
4890 if tag in (_x('ttml:br'), 'br'):
4891 self._out += '\n'
4892 else:
4893 unclosed_elements = []
4894 style = {}
4895 element_style_id = attrib.get('style')
4896 if default_style:
4897 style.update(default_style)
4898 if element_style_id:
4899 style.update(styles.get(element_style_id, {}))
4900 for prop in SUPPORTED_STYLING:
4901 prop_val = attrib.get(_x('tts:' + prop))
4902 if prop_val:
4903 style[prop] = prop_val
4904 if style:
4905 font = ''
4906 for k, v in sorted(style.items()):
4907 if self._applied_styles and self._applied_styles[-1].get(k) == v:
4908 continue
4909 if k == 'color':
4910 font += ' color="%s"' % v
4911 elif k == 'fontSize':
4912 font += ' size="%s"' % v
4913 elif k == 'fontFamily':
4914 font += ' face="%s"' % v
4915 elif k == 'fontWeight' and v == 'bold':
4916 self._out += '<b>'
4917 unclosed_elements.append('b')
4918 elif k == 'fontStyle' and v == 'italic':
4919 self._out += '<i>'
4920 unclosed_elements.append('i')
4921 elif k == 'textDecoration' and v == 'underline':
4922 self._out += '<u>'
4923 unclosed_elements.append('u')
4924 if font:
4925 self._out += '<font' + font + '>'
4926 unclosed_elements.append('font')
4927 applied_style = {}
4928 if self._applied_styles:
4929 applied_style.update(self._applied_styles[-1])
4930 applied_style.update(style)
4931 self._applied_styles.append(applied_style)
4932 self._unclosed_elements.append(unclosed_elements)
4933
4934 def end(self, tag):
4935 if tag not in (_x('ttml:br'), 'br'):
4936 unclosed_elements = self._unclosed_elements.pop()
4937 for element in reversed(unclosed_elements):
4938 self._out += '</%s>' % element
4939 if unclosed_elements and self._applied_styles:
4940 self._applied_styles.pop()
4941
4942 def data(self, data):
4943 self._out += data
4944
4945 def close(self):
4946 return self._out.strip()
4947
4948 def parse_node(node):
4949 target = TTMLPElementParser()
4950 parser = xml.etree.ElementTree.XMLParser(target=target)
4951 parser.feed(xml.etree.ElementTree.tostring(node))
4952 return parser.close()
4953
4954 for k, v in LEGACY_NAMESPACES:
4955 for ns in v:
4956 dfxp_data = dfxp_data.replace(ns, k)
4957
4958 dfxp = compat_etree_fromstring(dfxp_data)
4959 out = []
4960 paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
4961
4962 if not paras:
4963 raise ValueError('Invalid dfxp/TTML subtitle')
4964
4965 repeat = False
4966 while True:
4967 for style in dfxp.findall(_x('.//ttml:style')):
4968 style_id = style.get('id') or style.get(_x('xml:id'))
4969 if not style_id:
4970 continue
4971 parent_style_id = style.get('style')
4972 if parent_style_id:
4973 if parent_style_id not in styles:
4974 repeat = True
4975 continue
4976 styles[style_id] = styles[parent_style_id].copy()
4977 for prop in SUPPORTED_STYLING:
4978 prop_val = style.get(_x('tts:' + prop))
4979 if prop_val:
4980 styles.setdefault(style_id, {})[prop] = prop_val
4981 if repeat:
4982 repeat = False
4983 else:
4984 break
4985
4986 for p in ('body', 'div'):
4987 ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
4988 if ele is None:
4989 continue
4990 style = styles.get(ele.get('style'))
4991 if not style:
4992 continue
4993 default_style.update(style)
4994
4995 for para, index in zip(paras, itertools.count(1)):
4996 begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
4997 end_time = parse_dfxp_time_expr(para.attrib.get('end'))
4998 dur = parse_dfxp_time_expr(para.attrib.get('dur'))
4999 if begin_time is None:
5000 continue
5001 if not end_time:
5002 if not dur:
5003 continue
5004 end_time = begin_time + dur
5005 out.append('%d\n%s --> %s\n%s\n\n' % (
5006 index,
5007 srt_subtitles_timecode(begin_time),
5008 srt_subtitles_timecode(end_time),
5009 parse_node(para)))
5010
5011 return ''.join(out)
5012
5013
5014 def cli_option(params, command_option, param):
5015 param = params.get(param)
5016 if param:
5017 param = compat_str(param)
5018 return [command_option, param] if param is not None else []
5019
5020
5021 def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
5022 param = params.get(param)
5023 if param is None:
5024 return []
5025 assert isinstance(param, bool)
5026 if separator:
5027 return [command_option + separator + (true_value if param else false_value)]
5028 return [command_option, true_value if param else false_value]
5029
5030
5031 def cli_valueless_option(params, command_option, param, expected_value=True):
5032 param = params.get(param)
5033 return [command_option] if param == expected_value else []
5034
5035
5036 def cli_configuration_args(argdict, keys, default=[], use_compat=True):
5037 if isinstance(argdict, (list, tuple)): # for backward compatibility
5038 if use_compat:
5039 return argdict
5040 else:
5041 argdict = None
5042 if argdict is None:
5043 return default
5044 assert isinstance(argdict, dict)
5045
5046 assert isinstance(keys, (list, tuple))
5047 for key_list in keys:
5048 arg_list = list(filter(
5049 lambda x: x is not None,
5050 [argdict.get(key.lower()) for key in variadic(key_list)]))
5051 if arg_list:
5052 return [arg for args in arg_list for arg in args]
5053 return default
5054
5055
5056 def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
5057 main_key, exe = main_key.lower(), exe.lower()
5058 root_key = exe if main_key == exe else f'{main_key}+{exe}'
5059 keys = [f'{root_key}{k}' for k in (keys or [''])]
5060 if root_key in keys:
5061 if main_key != exe:
5062 keys.append((main_key, exe))
5063 keys.append('default')
5064 else:
5065 use_compat = False
5066 return cli_configuration_args(argdict, keys, default, use_compat)
5067
5068
5069 class ISO639Utils(object):
5070 # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
5071 _lang_map = {
5072 'aa': 'aar',
5073 'ab': 'abk',
5074 'ae': 'ave',
5075 'af': 'afr',
5076 'ak': 'aka',
5077 'am': 'amh',
5078 'an': 'arg',
5079 'ar': 'ara',
5080 'as': 'asm',
5081 'av': 'ava',
5082 'ay': 'aym',
5083 'az': 'aze',
5084 'ba': 'bak',
5085 'be': 'bel',
5086 'bg': 'bul',
5087 'bh': 'bih',
5088 'bi': 'bis',
5089 'bm': 'bam',
5090 'bn': 'ben',
5091 'bo': 'bod',
5092 'br': 'bre',
5093 'bs': 'bos',
5094 'ca': 'cat',
5095 'ce': 'che',
5096 'ch': 'cha',
5097 'co': 'cos',
5098 'cr': 'cre',
5099 'cs': 'ces',
5100 'cu': 'chu',
5101 'cv': 'chv',
5102 'cy': 'cym',
5103 'da': 'dan',
5104 'de': 'deu',
5105 'dv': 'div',
5106 'dz': 'dzo',
5107 'ee': 'ewe',
5108 'el': 'ell',
5109 'en': 'eng',
5110 'eo': 'epo',
5111 'es': 'spa',
5112 'et': 'est',
5113 'eu': 'eus',
5114 'fa': 'fas',
5115 'ff': 'ful',
5116 'fi': 'fin',
5117 'fj': 'fij',
5118 'fo': 'fao',
5119 'fr': 'fra',
5120 'fy': 'fry',
5121 'ga': 'gle',
5122 'gd': 'gla',
5123 'gl': 'glg',
5124 'gn': 'grn',
5125 'gu': 'guj',
5126 'gv': 'glv',
5127 'ha': 'hau',
5128 'he': 'heb',
5129 'iw': 'heb', # Replaced by he in 1989 revision
5130 'hi': 'hin',
5131 'ho': 'hmo',
5132 'hr': 'hrv',
5133 'ht': 'hat',
5134 'hu': 'hun',
5135 'hy': 'hye',
5136 'hz': 'her',
5137 'ia': 'ina',
5138 'id': 'ind',
5139 'in': 'ind', # Replaced by id in 1989 revision
5140 'ie': 'ile',
5141 'ig': 'ibo',
5142 'ii': 'iii',
5143 'ik': 'ipk',
5144 'io': 'ido',
5145 'is': 'isl',
5146 'it': 'ita',
5147 'iu': 'iku',
5148 'ja': 'jpn',
5149 'jv': 'jav',
5150 'ka': 'kat',
5151 'kg': 'kon',
5152 'ki': 'kik',
5153 'kj': 'kua',
5154 'kk': 'kaz',
5155 'kl': 'kal',
5156 'km': 'khm',
5157 'kn': 'kan',
5158 'ko': 'kor',
5159 'kr': 'kau',
5160 'ks': 'kas',
5161 'ku': 'kur',
5162 'kv': 'kom',
5163 'kw': 'cor',
5164 'ky': 'kir',
5165 'la': 'lat',
5166 'lb': 'ltz',
5167 'lg': 'lug',
5168 'li': 'lim',
5169 'ln': 'lin',
5170 'lo': 'lao',
5171 'lt': 'lit',
5172 'lu': 'lub',
5173 'lv': 'lav',
5174 'mg': 'mlg',
5175 'mh': 'mah',
5176 'mi': 'mri',
5177 'mk': 'mkd',
5178 'ml': 'mal',
5179 'mn': 'mon',
5180 'mr': 'mar',
5181 'ms': 'msa',
5182 'mt': 'mlt',
5183 'my': 'mya',
5184 'na': 'nau',
5185 'nb': 'nob',
5186 'nd': 'nde',
5187 'ne': 'nep',
5188 'ng': 'ndo',
5189 'nl': 'nld',
5190 'nn': 'nno',
5191 'no': 'nor',
5192 'nr': 'nbl',
5193 'nv': 'nav',
5194 'ny': 'nya',
5195 'oc': 'oci',
5196 'oj': 'oji',
5197 'om': 'orm',
5198 'or': 'ori',
5199 'os': 'oss',
5200 'pa': 'pan',
5201 'pi': 'pli',
5202 'pl': 'pol',
5203 'ps': 'pus',
5204 'pt': 'por',
5205 'qu': 'que',
5206 'rm': 'roh',
5207 'rn': 'run',
5208 'ro': 'ron',
5209 'ru': 'rus',
5210 'rw': 'kin',
5211 'sa': 'san',
5212 'sc': 'srd',
5213 'sd': 'snd',
5214 'se': 'sme',
5215 'sg': 'sag',
5216 'si': 'sin',
5217 'sk': 'slk',
5218 'sl': 'slv',
5219 'sm': 'smo',
5220 'sn': 'sna',
5221 'so': 'som',
5222 'sq': 'sqi',
5223 'sr': 'srp',
5224 'ss': 'ssw',
5225 'st': 'sot',
5226 'su': 'sun',
5227 'sv': 'swe',
5228 'sw': 'swa',
5229 'ta': 'tam',
5230 'te': 'tel',
5231 'tg': 'tgk',
5232 'th': 'tha',
5233 'ti': 'tir',
5234 'tk': 'tuk',
5235 'tl': 'tgl',
5236 'tn': 'tsn',
5237 'to': 'ton',
5238 'tr': 'tur',
5239 'ts': 'tso',
5240 'tt': 'tat',
5241 'tw': 'twi',
5242 'ty': 'tah',
5243 'ug': 'uig',
5244 'uk': 'ukr',
5245 'ur': 'urd',
5246 'uz': 'uzb',
5247 've': 'ven',
5248 'vi': 'vie',
5249 'vo': 'vol',
5250 'wa': 'wln',
5251 'wo': 'wol',
5252 'xh': 'xho',
5253 'yi': 'yid',
5254 'ji': 'yid', # Replaced by yi in 1989 revision
5255 'yo': 'yor',
5256 'za': 'zha',
5257 'zh': 'zho',
5258 'zu': 'zul',
5259 }
5260
5261 @classmethod
5262 def short2long(cls, code):
5263 """Convert language code from ISO 639-1 to ISO 639-2/T"""
5264 return cls._lang_map.get(code[:2])
5265
5266 @classmethod
5267 def long2short(cls, code):
5268 """Convert language code from ISO 639-2/T to ISO 639-1"""
5269 for short_name, long_name in cls._lang_map.items():
5270 if long_name == code:
5271 return short_name
5272
5273
5274 class ISO3166Utils(object):
5275 # From http://data.okfn.org/data/core/country-list
5276 _country_map = {
5277 'AF': 'Afghanistan',
5278 'AX': 'Åland Islands',
5279 'AL': 'Albania',
5280 'DZ': 'Algeria',
5281 'AS': 'American Samoa',
5282 'AD': 'Andorra',
5283 'AO': 'Angola',
5284 'AI': 'Anguilla',
5285 'AQ': 'Antarctica',
5286 'AG': 'Antigua and Barbuda',
5287 'AR': 'Argentina',
5288 'AM': 'Armenia',
5289 'AW': 'Aruba',
5290 'AU': 'Australia',
5291 'AT': 'Austria',
5292 'AZ': 'Azerbaijan',
5293 'BS': 'Bahamas',
5294 'BH': 'Bahrain',
5295 'BD': 'Bangladesh',
5296 'BB': 'Barbados',
5297 'BY': 'Belarus',
5298 'BE': 'Belgium',
5299 'BZ': 'Belize',
5300 'BJ': 'Benin',
5301 'BM': 'Bermuda',
5302 'BT': 'Bhutan',
5303 'BO': 'Bolivia, Plurinational State of',
5304 'BQ': 'Bonaire, Sint Eustatius and Saba',
5305 'BA': 'Bosnia and Herzegovina',
5306 'BW': 'Botswana',
5307 'BV': 'Bouvet Island',
5308 'BR': 'Brazil',
5309 'IO': 'British Indian Ocean Territory',
5310 'BN': 'Brunei Darussalam',
5311 'BG': 'Bulgaria',
5312 'BF': 'Burkina Faso',
5313 'BI': 'Burundi',
5314 'KH': 'Cambodia',
5315 'CM': 'Cameroon',
5316 'CA': 'Canada',
5317 'CV': 'Cape Verde',
5318 'KY': 'Cayman Islands',
5319 'CF': 'Central African Republic',
5320 'TD': 'Chad',
5321 'CL': 'Chile',
5322 'CN': 'China',
5323 'CX': 'Christmas Island',
5324 'CC': 'Cocos (Keeling) Islands',
5325 'CO': 'Colombia',
5326 'KM': 'Comoros',
5327 'CG': 'Congo',
5328 'CD': 'Congo, the Democratic Republic of the',
5329 'CK': 'Cook Islands',
5330 'CR': 'Costa Rica',
5331 'CI': 'Côte d\'Ivoire',
5332 'HR': 'Croatia',
5333 'CU': 'Cuba',
5334 'CW': 'Curaçao',
5335 'CY': 'Cyprus',
5336 'CZ': 'Czech Republic',
5337 'DK': 'Denmark',
5338 'DJ': 'Djibouti',
5339 'DM': 'Dominica',
5340 'DO': 'Dominican Republic',
5341 'EC': 'Ecuador',
5342 'EG': 'Egypt',
5343 'SV': 'El Salvador',
5344 'GQ': 'Equatorial Guinea',
5345 'ER': 'Eritrea',
5346 'EE': 'Estonia',
5347 'ET': 'Ethiopia',
5348 'FK': 'Falkland Islands (Malvinas)',
5349 'FO': 'Faroe Islands',
5350 'FJ': 'Fiji',
5351 'FI': 'Finland',
5352 'FR': 'France',
5353 'GF': 'French Guiana',
5354 'PF': 'French Polynesia',
5355 'TF': 'French Southern Territories',
5356 'GA': 'Gabon',
5357 'GM': 'Gambia',
5358 'GE': 'Georgia',
5359 'DE': 'Germany',
5360 'GH': 'Ghana',
5361 'GI': 'Gibraltar',
5362 'GR': 'Greece',
5363 'GL': 'Greenland',
5364 'GD': 'Grenada',
5365 'GP': 'Guadeloupe',
5366 'GU': 'Guam',
5367 'GT': 'Guatemala',
5368 'GG': 'Guernsey',
5369 'GN': 'Guinea',
5370 'GW': 'Guinea-Bissau',
5371 'GY': 'Guyana',
5372 'HT': 'Haiti',
5373 'HM': 'Heard Island and McDonald Islands',
5374 'VA': 'Holy See (Vatican City State)',
5375 'HN': 'Honduras',
5376 'HK': 'Hong Kong',
5377 'HU': 'Hungary',
5378 'IS': 'Iceland',
5379 'IN': 'India',
5380 'ID': 'Indonesia',
5381 'IR': 'Iran, Islamic Republic of',
5382 'IQ': 'Iraq',
5383 'IE': 'Ireland',
5384 'IM': 'Isle of Man',
5385 'IL': 'Israel',
5386 'IT': 'Italy',
5387 'JM': 'Jamaica',
5388 'JP': 'Japan',
5389 'JE': 'Jersey',
5390 'JO': 'Jordan',
5391 'KZ': 'Kazakhstan',
5392 'KE': 'Kenya',
5393 'KI': 'Kiribati',
5394 'KP': 'Korea, Democratic People\'s Republic of',
5395 'KR': 'Korea, Republic of',
5396 'KW': 'Kuwait',
5397 'KG': 'Kyrgyzstan',
5398 'LA': 'Lao People\'s Democratic Republic',
5399 'LV': 'Latvia',
5400 'LB': 'Lebanon',
5401 'LS': 'Lesotho',
5402 'LR': 'Liberia',
5403 'LY': 'Libya',
5404 'LI': 'Liechtenstein',
5405 'LT': 'Lithuania',
5406 'LU': 'Luxembourg',
5407 'MO': 'Macao',
5408 'MK': 'Macedonia, the Former Yugoslav Republic of',
5409 'MG': 'Madagascar',
5410 'MW': 'Malawi',
5411 'MY': 'Malaysia',
5412 'MV': 'Maldives',
5413 'ML': 'Mali',
5414 'MT': 'Malta',
5415 'MH': 'Marshall Islands',
5416 'MQ': 'Martinique',
5417 'MR': 'Mauritania',
5418 'MU': 'Mauritius',
5419 'YT': 'Mayotte',
5420 'MX': 'Mexico',
5421 'FM': 'Micronesia, Federated States of',
5422 'MD': 'Moldova, Republic of',
5423 'MC': 'Monaco',
5424 'MN': 'Mongolia',
5425 'ME': 'Montenegro',
5426 'MS': 'Montserrat',
5427 'MA': 'Morocco',
5428 'MZ': 'Mozambique',
5429 'MM': 'Myanmar',
5430 'NA': 'Namibia',
5431 'NR': 'Nauru',
5432 'NP': 'Nepal',
5433 'NL': 'Netherlands',
5434 'NC': 'New Caledonia',
5435 'NZ': 'New Zealand',
5436 'NI': 'Nicaragua',
5437 'NE': 'Niger',
5438 'NG': 'Nigeria',
5439 'NU': 'Niue',
5440 'NF': 'Norfolk Island',
5441 'MP': 'Northern Mariana Islands',
5442 'NO': 'Norway',
5443 'OM': 'Oman',
5444 'PK': 'Pakistan',
5445 'PW': 'Palau',
5446 'PS': 'Palestine, State of',
5447 'PA': 'Panama',
5448 'PG': 'Papua New Guinea',
5449 'PY': 'Paraguay',
5450 'PE': 'Peru',
5451 'PH': 'Philippines',
5452 'PN': 'Pitcairn',
5453 'PL': 'Poland',
5454 'PT': 'Portugal',
5455 'PR': 'Puerto Rico',
5456 'QA': 'Qatar',
5457 'RE': 'Réunion',
5458 'RO': 'Romania',
5459 'RU': 'Russian Federation',
5460 'RW': 'Rwanda',
5461 'BL': 'Saint Barthélemy',
5462 'SH': 'Saint Helena, Ascension and Tristan da Cunha',
5463 'KN': 'Saint Kitts and Nevis',
5464 'LC': 'Saint Lucia',
5465 'MF': 'Saint Martin (French part)',
5466 'PM': 'Saint Pierre and Miquelon',
5467 'VC': 'Saint Vincent and the Grenadines',
5468 'WS': 'Samoa',
5469 'SM': 'San Marino',
5470 'ST': 'Sao Tome and Principe',
5471 'SA': 'Saudi Arabia',
5472 'SN': 'Senegal',
5473 'RS': 'Serbia',
5474 'SC': 'Seychelles',
5475 'SL': 'Sierra Leone',
5476 'SG': 'Singapore',
5477 'SX': 'Sint Maarten (Dutch part)',
5478 'SK': 'Slovakia',
5479 'SI': 'Slovenia',
5480 'SB': 'Solomon Islands',
5481 'SO': 'Somalia',
5482 'ZA': 'South Africa',
5483 'GS': 'South Georgia and the South Sandwich Islands',
5484 'SS': 'South Sudan',
5485 'ES': 'Spain',
5486 'LK': 'Sri Lanka',
5487 'SD': 'Sudan',
5488 'SR': 'Suriname',
5489 'SJ': 'Svalbard and Jan Mayen',
5490 'SZ': 'Swaziland',
5491 'SE': 'Sweden',
5492 'CH': 'Switzerland',
5493 'SY': 'Syrian Arab Republic',
5494 'TW': 'Taiwan, Province of China',
5495 'TJ': 'Tajikistan',
5496 'TZ': 'Tanzania, United Republic of',
5497 'TH': 'Thailand',
5498 'TL': 'Timor-Leste',
5499 'TG': 'Togo',
5500 'TK': 'Tokelau',
5501 'TO': 'Tonga',
5502 'TT': 'Trinidad and Tobago',
5503 'TN': 'Tunisia',
5504 'TR': 'Turkey',
5505 'TM': 'Turkmenistan',
5506 'TC': 'Turks and Caicos Islands',
5507 'TV': 'Tuvalu',
5508 'UG': 'Uganda',
5509 'UA': 'Ukraine',
5510 'AE': 'United Arab Emirates',
5511 'GB': 'United Kingdom',
5512 'US': 'United States',
5513 'UM': 'United States Minor Outlying Islands',
5514 'UY': 'Uruguay',
5515 'UZ': 'Uzbekistan',
5516 'VU': 'Vanuatu',
5517 'VE': 'Venezuela, Bolivarian Republic of',
5518 'VN': 'Viet Nam',
5519 'VG': 'Virgin Islands, British',
5520 'VI': 'Virgin Islands, U.S.',
5521 'WF': 'Wallis and Futuna',
5522 'EH': 'Western Sahara',
5523 'YE': 'Yemen',
5524 'ZM': 'Zambia',
5525 'ZW': 'Zimbabwe',
5526 }
5527
5528 @classmethod
5529 def short2full(cls, code):
5530 """Convert an ISO 3166-2 country code to the corresponding full name"""
5531 return cls._country_map.get(code.upper())
5532
5533
5534 class GeoUtils(object):
5535 # Major IPv4 address blocks per country
5536 _country_ip_map = {
5537 'AD': '46.172.224.0/19',
5538 'AE': '94.200.0.0/13',
5539 'AF': '149.54.0.0/17',
5540 'AG': '209.59.64.0/18',
5541 'AI': '204.14.248.0/21',
5542 'AL': '46.99.0.0/16',
5543 'AM': '46.70.0.0/15',
5544 'AO': '105.168.0.0/13',
5545 'AP': '182.50.184.0/21',
5546 'AQ': '23.154.160.0/24',
5547 'AR': '181.0.0.0/12',
5548 'AS': '202.70.112.0/20',
5549 'AT': '77.116.0.0/14',
5550 'AU': '1.128.0.0/11',
5551 'AW': '181.41.0.0/18',
5552 'AX': '185.217.4.0/22',
5553 'AZ': '5.197.0.0/16',
5554 'BA': '31.176.128.0/17',
5555 'BB': '65.48.128.0/17',
5556 'BD': '114.130.0.0/16',
5557 'BE': '57.0.0.0/8',
5558 'BF': '102.178.0.0/15',
5559 'BG': '95.42.0.0/15',
5560 'BH': '37.131.0.0/17',
5561 'BI': '154.117.192.0/18',
5562 'BJ': '137.255.0.0/16',
5563 'BL': '185.212.72.0/23',
5564 'BM': '196.12.64.0/18',
5565 'BN': '156.31.0.0/16',
5566 'BO': '161.56.0.0/16',
5567 'BQ': '161.0.80.0/20',
5568 'BR': '191.128.0.0/12',
5569 'BS': '24.51.64.0/18',
5570 'BT': '119.2.96.0/19',
5571 'BW': '168.167.0.0/16',
5572 'BY': '178.120.0.0/13',
5573 'BZ': '179.42.192.0/18',
5574 'CA': '99.224.0.0/11',
5575 'CD': '41.243.0.0/16',
5576 'CF': '197.242.176.0/21',
5577 'CG': '160.113.0.0/16',
5578 'CH': '85.0.0.0/13',
5579 'CI': '102.136.0.0/14',
5580 'CK': '202.65.32.0/19',
5581 'CL': '152.172.0.0/14',
5582 'CM': '102.244.0.0/14',
5583 'CN': '36.128.0.0/10',
5584 'CO': '181.240.0.0/12',
5585 'CR': '201.192.0.0/12',
5586 'CU': '152.206.0.0/15',
5587 'CV': '165.90.96.0/19',
5588 'CW': '190.88.128.0/17',
5589 'CY': '31.153.0.0/16',
5590 'CZ': '88.100.0.0/14',
5591 'DE': '53.0.0.0/8',
5592 'DJ': '197.241.0.0/17',
5593 'DK': '87.48.0.0/12',
5594 'DM': '192.243.48.0/20',
5595 'DO': '152.166.0.0/15',
5596 'DZ': '41.96.0.0/12',
5597 'EC': '186.68.0.0/15',
5598 'EE': '90.190.0.0/15',
5599 'EG': '156.160.0.0/11',
5600 'ER': '196.200.96.0/20',
5601 'ES': '88.0.0.0/11',
5602 'ET': '196.188.0.0/14',
5603 'EU': '2.16.0.0/13',
5604 'FI': '91.152.0.0/13',
5605 'FJ': '144.120.0.0/16',
5606 'FK': '80.73.208.0/21',
5607 'FM': '119.252.112.0/20',
5608 'FO': '88.85.32.0/19',
5609 'FR': '90.0.0.0/9',
5610 'GA': '41.158.0.0/15',
5611 'GB': '25.0.0.0/8',
5612 'GD': '74.122.88.0/21',
5613 'GE': '31.146.0.0/16',
5614 'GF': '161.22.64.0/18',
5615 'GG': '62.68.160.0/19',
5616 'GH': '154.160.0.0/12',
5617 'GI': '95.164.0.0/16',
5618 'GL': '88.83.0.0/19',
5619 'GM': '160.182.0.0/15',
5620 'GN': '197.149.192.0/18',
5621 'GP': '104.250.0.0/19',
5622 'GQ': '105.235.224.0/20',
5623 'GR': '94.64.0.0/13',
5624 'GT': '168.234.0.0/16',
5625 'GU': '168.123.0.0/16',
5626 'GW': '197.214.80.0/20',
5627 'GY': '181.41.64.0/18',
5628 'HK': '113.252.0.0/14',
5629 'HN': '181.210.0.0/16',
5630 'HR': '93.136.0.0/13',
5631 'HT': '148.102.128.0/17',
5632 'HU': '84.0.0.0/14',
5633 'ID': '39.192.0.0/10',
5634 'IE': '87.32.0.0/12',
5635 'IL': '79.176.0.0/13',
5636 'IM': '5.62.80.0/20',
5637 'IN': '117.192.0.0/10',
5638 'IO': '203.83.48.0/21',
5639 'IQ': '37.236.0.0/14',
5640 'IR': '2.176.0.0/12',
5641 'IS': '82.221.0.0/16',
5642 'IT': '79.0.0.0/10',
5643 'JE': '87.244.64.0/18',
5644 'JM': '72.27.0.0/17',
5645 'JO': '176.29.0.0/16',
5646 'JP': '133.0.0.0/8',
5647 'KE': '105.48.0.0/12',
5648 'KG': '158.181.128.0/17',
5649 'KH': '36.37.128.0/17',
5650 'KI': '103.25.140.0/22',
5651 'KM': '197.255.224.0/20',
5652 'KN': '198.167.192.0/19',
5653 'KP': '175.45.176.0/22',
5654 'KR': '175.192.0.0/10',
5655 'KW': '37.36.0.0/14',
5656 'KY': '64.96.0.0/15',
5657 'KZ': '2.72.0.0/13',
5658 'LA': '115.84.64.0/18',
5659 'LB': '178.135.0.0/16',
5660 'LC': '24.92.144.0/20',
5661 'LI': '82.117.0.0/19',
5662 'LK': '112.134.0.0/15',
5663 'LR': '102.183.0.0/16',
5664 'LS': '129.232.0.0/17',
5665 'LT': '78.56.0.0/13',
5666 'LU': '188.42.0.0/16',
5667 'LV': '46.109.0.0/16',
5668 'LY': '41.252.0.0/14',
5669 'MA': '105.128.0.0/11',
5670 'MC': '88.209.64.0/18',
5671 'MD': '37.246.0.0/16',
5672 'ME': '178.175.0.0/17',
5673 'MF': '74.112.232.0/21',
5674 'MG': '154.126.0.0/17',
5675 'MH': '117.103.88.0/21',
5676 'MK': '77.28.0.0/15',
5677 'ML': '154.118.128.0/18',
5678 'MM': '37.111.0.0/17',
5679 'MN': '49.0.128.0/17',
5680 'MO': '60.246.0.0/16',
5681 'MP': '202.88.64.0/20',
5682 'MQ': '109.203.224.0/19',
5683 'MR': '41.188.64.0/18',
5684 'MS': '208.90.112.0/22',
5685 'MT': '46.11.0.0/16',
5686 'MU': '105.16.0.0/12',
5687 'MV': '27.114.128.0/18',
5688 'MW': '102.70.0.0/15',
5689 'MX': '187.192.0.0/11',
5690 'MY': '175.136.0.0/13',
5691 'MZ': '197.218.0.0/15',
5692 'NA': '41.182.0.0/16',
5693 'NC': '101.101.0.0/18',
5694 'NE': '197.214.0.0/18',
5695 'NF': '203.17.240.0/22',
5696 'NG': '105.112.0.0/12',
5697 'NI': '186.76.0.0/15',
5698 'NL': '145.96.0.0/11',
5699 'NO': '84.208.0.0/13',
5700 'NP': '36.252.0.0/15',
5701 'NR': '203.98.224.0/19',
5702 'NU': '49.156.48.0/22',
5703 'NZ': '49.224.0.0/14',
5704 'OM': '5.36.0.0/15',
5705 'PA': '186.72.0.0/15',
5706 'PE': '186.160.0.0/14',
5707 'PF': '123.50.64.0/18',
5708 'PG': '124.240.192.0/19',
5709 'PH': '49.144.0.0/13',
5710 'PK': '39.32.0.0/11',
5711 'PL': '83.0.0.0/11',
5712 'PM': '70.36.0.0/20',
5713 'PR': '66.50.0.0/16',
5714 'PS': '188.161.0.0/16',
5715 'PT': '85.240.0.0/13',
5716 'PW': '202.124.224.0/20',
5717 'PY': '181.120.0.0/14',
5718 'QA': '37.210.0.0/15',
5719 'RE': '102.35.0.0/16',
5720 'RO': '79.112.0.0/13',
5721 'RS': '93.86.0.0/15',
5722 'RU': '5.136.0.0/13',
5723 'RW': '41.186.0.0/16',
5724 'SA': '188.48.0.0/13',
5725 'SB': '202.1.160.0/19',
5726 'SC': '154.192.0.0/11',
5727 'SD': '102.120.0.0/13',
5728 'SE': '78.64.0.0/12',
5729 'SG': '8.128.0.0/10',
5730 'SI': '188.196.0.0/14',
5731 'SK': '78.98.0.0/15',
5732 'SL': '102.143.0.0/17',
5733 'SM': '89.186.32.0/19',
5734 'SN': '41.82.0.0/15',
5735 'SO': '154.115.192.0/18',
5736 'SR': '186.179.128.0/17',
5737 'SS': '105.235.208.0/21',
5738 'ST': '197.159.160.0/19',
5739 'SV': '168.243.0.0/16',
5740 'SX': '190.102.0.0/20',
5741 'SY': '5.0.0.0/16',
5742 'SZ': '41.84.224.0/19',
5743 'TC': '65.255.48.0/20',
5744 'TD': '154.68.128.0/19',
5745 'TG': '196.168.0.0/14',
5746 'TH': '171.96.0.0/13',
5747 'TJ': '85.9.128.0/18',
5748 'TK': '27.96.24.0/21',
5749 'TL': '180.189.160.0/20',
5750 'TM': '95.85.96.0/19',
5751 'TN': '197.0.0.0/11',
5752 'TO': '175.176.144.0/21',
5753 'TR': '78.160.0.0/11',
5754 'TT': '186.44.0.0/15',
5755 'TV': '202.2.96.0/19',
5756 'TW': '120.96.0.0/11',
5757 'TZ': '156.156.0.0/14',
5758 'UA': '37.52.0.0/14',
5759 'UG': '102.80.0.0/13',
5760 'US': '6.0.0.0/8',
5761 'UY': '167.56.0.0/13',
5762 'UZ': '84.54.64.0/18',
5763 'VA': '212.77.0.0/19',
5764 'VC': '207.191.240.0/21',
5765 'VE': '186.88.0.0/13',
5766 'VG': '66.81.192.0/20',
5767 'VI': '146.226.0.0/16',
5768 'VN': '14.160.0.0/11',
5769 'VU': '202.80.32.0/20',
5770 'WF': '117.20.32.0/21',
5771 'WS': '202.4.32.0/19',
5772 'YE': '134.35.0.0/16',
5773 'YT': '41.242.116.0/22',
5774 'ZA': '41.0.0.0/11',
5775 'ZM': '102.144.0.0/13',
5776 'ZW': '102.177.192.0/18',
5777 }
5778
5779 @classmethod
5780 def random_ipv4(cls, code_or_block):
5781 if len(code_or_block) == 2:
5782 block = cls._country_ip_map.get(code_or_block.upper())
5783 if not block:
5784 return None
5785 else:
5786 block = code_or_block
5787 addr, preflen = block.split('/')
5788 addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
5789 addr_max = addr_min | (0xffffffff >> int(preflen))
5790 return compat_str(socket.inet_ntoa(
5791 compat_struct_pack('!L', random.randint(addr_min, addr_max))))
5792
5793
5794 class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
5795 def __init__(self, proxies=None):
5796 # Set default handlers
5797 for type in ('http', 'https'):
5798 setattr(self, '%s_open' % type,
5799 lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
5800 meth(r, proxy, type))
5801 compat_urllib_request.ProxyHandler.__init__(self, proxies)
5802
5803 def proxy_open(self, req, proxy, type):
5804 req_proxy = req.headers.get('Ytdl-request-proxy')
5805 if req_proxy is not None:
5806 proxy = req_proxy
5807 del req.headers['Ytdl-request-proxy']
5808
5809 if proxy == '__noproxy__':
5810 return None # No Proxy
5811 if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
5812 req.add_header('Ytdl-socks-proxy', proxy)
5813 # yt-dlp's http/https handlers do wrapping the socket with socks
5814 return None
5815 return compat_urllib_request.ProxyHandler.proxy_open(
5816 self, req, proxy, type)
5817
5818
5819 # Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
5820 # released into Public Domain
5821 # https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
5822
5823 def long_to_bytes(n, blocksize=0):
5824 """long_to_bytes(n:long, blocksize:int) : string
5825 Convert a long integer to a byte string.
5826
5827 If optional blocksize is given and greater than zero, pad the front of the
5828 byte string with binary zeros so that the length is a multiple of
5829 blocksize.
5830 """
5831 # after much testing, this algorithm was deemed to be the fastest
5832 s = b''
5833 n = int(n)
5834 while n > 0:
5835 s = compat_struct_pack('>I', n & 0xffffffff) + s
5836 n = n >> 32
5837 # strip off leading zeros
5838 for i in range(len(s)):
5839 if s[i] != b'\000'[0]:
5840 break
5841 else:
5842 # only happens when n == 0
5843 s = b'\000'
5844 i = 0
5845 s = s[i:]
5846 # add back some pad bytes. this could be done more efficiently w.r.t. the
5847 # de-padding being done above, but sigh...
5848 if blocksize > 0 and len(s) % blocksize:
5849 s = (blocksize - len(s) % blocksize) * b'\000' + s
5850 return s
5851
5852
5853 def bytes_to_long(s):
5854 """bytes_to_long(string) : long
5855 Convert a byte string to a long integer.
5856
5857 This is (essentially) the inverse of long_to_bytes().
5858 """
5859 acc = 0
5860 length = len(s)
5861 if length % 4:
5862 extra = (4 - length % 4)
5863 s = b'\000' * extra + s
5864 length = length + extra
5865 for i in range(0, length, 4):
5866 acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
5867 return acc
5868
5869
5870 def ohdave_rsa_encrypt(data, exponent, modulus):
5871 '''
5872 Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
5873
5874 Input:
5875 data: data to encrypt, bytes-like object
5876 exponent, modulus: parameter e and N of RSA algorithm, both integer
5877 Output: hex string of encrypted data
5878
5879 Limitation: supports one block encryption only
5880 '''
5881
5882 payload = int(binascii.hexlify(data[::-1]), 16)
5883 encrypted = pow(payload, exponent, modulus)
5884 return '%x' % encrypted
5885
5886
5887 def pkcs1pad(data, length):
5888 """
5889 Padding input data with PKCS#1 scheme
5890
5891 @param {int[]} data input data
5892 @param {int} length target length
5893 @returns {int[]} padded data
5894 """
5895 if len(data) > length - 11:
5896 raise ValueError('Input data too long for PKCS#1 padding')
5897
5898 pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
5899 return [0, 2] + pseudo_random + [0] + data
5900
5901
5902 def encode_base_n(num, n, table=None):
5903 FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
5904 if not table:
5905 table = FULL_TABLE[:n]
5906
5907 if n > len(table):
5908 raise ValueError('base %d exceeds table length %d' % (n, len(table)))
5909
5910 if num == 0:
5911 return table[0]
5912
5913 ret = ''
5914 while num:
5915 ret = table[num % n] + ret
5916 num = num // n
5917 return ret
5918
5919
5920 def decode_packed_codes(code):
5921 mobj = re.search(PACKED_CODES_RE, code)
5922 obfuscated_code, base, count, symbols = mobj.groups()
5923 base = int(base)
5924 count = int(count)
5925 symbols = symbols.split('|')
5926 symbol_table = {}
5927
5928 while count:
5929 count -= 1
5930 base_n_count = encode_base_n(count, base)
5931 symbol_table[base_n_count] = symbols[count] or base_n_count
5932
5933 return re.sub(
5934 r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
5935 obfuscated_code)
5936
5937
5938 def caesar(s, alphabet, shift):
5939 if shift == 0:
5940 return s
5941 l = len(alphabet)
5942 return ''.join(
5943 alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
5944 for c in s)
5945
5946
5947 def rot47(s):
5948 return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
5949
5950
5951 def parse_m3u8_attributes(attrib):
5952 info = {}
5953 for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
5954 if val.startswith('"'):
5955 val = val[1:-1]
5956 info[key] = val
5957 return info
5958
5959
5960 def urshift(val, n):
5961 return val >> n if val >= 0 else (val + 0x100000000) >> n
5962
5963
5964 # Based on png2str() written by @gdkchan and improved by @yokrysty
5965 # Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
5966 def decode_png(png_data):
5967 # Reference: https://www.w3.org/TR/PNG/
5968 header = png_data[8:]
5969
5970 if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
5971 raise IOError('Not a valid PNG file.')
5972
5973 int_map = {1: '>B', 2: '>H', 4: '>I'}
5974 unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
5975
5976 chunks = []
5977
5978 while header:
5979 length = unpack_integer(header[:4])
5980 header = header[4:]
5981
5982 chunk_type = header[:4]
5983 header = header[4:]
5984
5985 chunk_data = header[:length]
5986 header = header[length:]
5987
5988 header = header[4:] # Skip CRC
5989
5990 chunks.append({
5991 'type': chunk_type,
5992 'length': length,
5993 'data': chunk_data
5994 })
5995
5996 ihdr = chunks[0]['data']
5997
5998 width = unpack_integer(ihdr[:4])
5999 height = unpack_integer(ihdr[4:8])
6000
6001 idat = b''
6002
6003 for chunk in chunks:
6004 if chunk['type'] == b'IDAT':
6005 idat += chunk['data']
6006
6007 if not idat:
6008 raise IOError('Unable to read PNG data.')
6009
6010 decompressed_data = bytearray(zlib.decompress(idat))
6011
6012 stride = width * 3
6013 pixels = []
6014
6015 def _get_pixel(idx):
6016 x = idx % stride
6017 y = idx // stride
6018 return pixels[y][x]
6019
6020 for y in range(height):
6021 basePos = y * (1 + stride)
6022 filter_type = decompressed_data[basePos]
6023
6024 current_row = []
6025
6026 pixels.append(current_row)
6027
6028 for x in range(stride):
6029 color = decompressed_data[1 + basePos + x]
6030 basex = y * stride + x
6031 left = 0
6032 up = 0
6033
6034 if x > 2:
6035 left = _get_pixel(basex - 3)
6036 if y > 0:
6037 up = _get_pixel(basex - stride)
6038
6039 if filter_type == 1: # Sub
6040 color = (color + left) & 0xff
6041 elif filter_type == 2: # Up
6042 color = (color + up) & 0xff
6043 elif filter_type == 3: # Average
6044 color = (color + ((left + up) >> 1)) & 0xff
6045 elif filter_type == 4: # Paeth
6046 a = left
6047 b = up
6048 c = 0
6049
6050 if x > 2 and y > 0:
6051 c = _get_pixel(basex - stride - 3)
6052
6053 p = a + b - c
6054
6055 pa = abs(p - a)
6056 pb = abs(p - b)
6057 pc = abs(p - c)
6058
6059 if pa <= pb and pa <= pc:
6060 color = (color + a) & 0xff
6061 elif pb <= pc:
6062 color = (color + b) & 0xff
6063 else:
6064 color = (color + c) & 0xff
6065
6066 current_row.append(color)
6067
6068 return width, height, pixels
6069
6070
6071 def write_xattr(path, key, value):
6072 # This mess below finds the best xattr tool for the job
6073 try:
6074 # try the pyxattr module...
6075 import xattr
6076
6077 if hasattr(xattr, 'set'): # pyxattr
6078 # Unicode arguments are not supported in python-pyxattr until
6079 # version 0.5.0
6080 # See https://github.com/ytdl-org/youtube-dl/issues/5498
6081 pyxattr_required_version = '0.5.0'
6082 if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
6083 # TODO: fallback to CLI tools
6084 raise XAttrUnavailableError(
6085 'python-pyxattr is detected but is too old. '
6086 'yt-dlp requires %s or above while your version is %s. '
6087 'Falling back to other xattr implementations' % (
6088 pyxattr_required_version, xattr.__version__))
6089
6090 setxattr = xattr.set
6091 else: # xattr
6092 setxattr = xattr.setxattr
6093
6094 try:
6095 setxattr(path, key, value)
6096 except EnvironmentError as e:
6097 raise XAttrMetadataError(e.errno, e.strerror)
6098
6099 except ImportError:
6100 if compat_os_name == 'nt':
6101 # Write xattrs to NTFS Alternate Data Streams:
6102 # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
6103 assert ':' not in key
6104 assert os.path.exists(path)
6105
6106 ads_fn = path + ':' + key
6107 try:
6108 with open(ads_fn, 'wb') as f:
6109 f.write(value)
6110 except EnvironmentError as e:
6111 raise XAttrMetadataError(e.errno, e.strerror)
6112 else:
6113 user_has_setfattr = check_executable('setfattr', ['--version'])
6114 user_has_xattr = check_executable('xattr', ['-h'])
6115
6116 if user_has_setfattr or user_has_xattr:
6117
6118 value = value.decode('utf-8')
6119 if user_has_setfattr:
6120 executable = 'setfattr'
6121 opts = ['-n', key, '-v', value]
6122 elif user_has_xattr:
6123 executable = 'xattr'
6124 opts = ['-w', key, value]
6125
6126 cmd = ([encodeFilename(executable, True)]
6127 + [encodeArgument(o) for o in opts]
6128 + [encodeFilename(path, True)])
6129
6130 try:
6131 p = subprocess.Popen(
6132 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
6133 except EnvironmentError as e:
6134 raise XAttrMetadataError(e.errno, e.strerror)
6135 stdout, stderr = process_communicate_or_kill(p)
6136 stderr = stderr.decode('utf-8', 'replace')
6137 if p.returncode != 0:
6138 raise XAttrMetadataError(p.returncode, stderr)
6139
6140 else:
6141 # On Unix, and can't find pyxattr, setfattr, or xattr.
6142 if sys.platform.startswith('linux'):
6143 raise XAttrUnavailableError(
6144 "Couldn't find a tool to set the xattrs. "
6145 "Install either the python 'pyxattr' or 'xattr' "
6146 "modules, or the GNU 'attr' package "
6147 "(which contains the 'setfattr' tool).")
6148 else:
6149 raise XAttrUnavailableError(
6150 "Couldn't find a tool to set the xattrs. "
6151 "Install either the python 'xattr' module, "
6152 "or the 'xattr' binary.")
6153
6154
6155 def random_birthday(year_field, month_field, day_field):
6156 start_date = datetime.date(1950, 1, 1)
6157 end_date = datetime.date(1995, 12, 31)
6158 offset = random.randint(0, (end_date - start_date).days)
6159 random_date = start_date + datetime.timedelta(offset)
6160 return {
6161 year_field: str(random_date.year),
6162 month_field: str(random_date.month),
6163 day_field: str(random_date.day),
6164 }
6165
6166
6167 # Templates for internet shortcut files, which are plain text files.
6168 DOT_URL_LINK_TEMPLATE = '''
6169 [InternetShortcut]
6170 URL=%(url)s
6171 '''.lstrip()
6172
6173 DOT_WEBLOC_LINK_TEMPLATE = '''
6174 <?xml version="1.0" encoding="UTF-8"?>
6175 <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
6176 <plist version="1.0">
6177 <dict>
6178 \t<key>URL</key>
6179 \t<string>%(url)s</string>
6180 </dict>
6181 </plist>
6182 '''.lstrip()
6183
6184 DOT_DESKTOP_LINK_TEMPLATE = '''
6185 [Desktop Entry]
6186 Encoding=UTF-8
6187 Name=%(filename)s
6188 Type=Link
6189 URL=%(url)s
6190 Icon=text-html
6191 '''.lstrip()
6192
6193
6194 def iri_to_uri(iri):
6195 """
6196 Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
6197
6198 The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
6199 """
6200
6201 iri_parts = compat_urllib_parse_urlparse(iri)
6202
6203 if '[' in iri_parts.netloc:
6204 raise ValueError('IPv6 URIs are not, yet, supported.')
6205 # Querying `.netloc`, when there's only one bracket, also raises a ValueError.
6206
6207 # The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
6208
6209 net_location = ''
6210 if iri_parts.username:
6211 net_location += compat_urllib_parse_quote(iri_parts.username, safe=r"!$%&'()*+,~")
6212 if iri_parts.password is not None:
6213 net_location += ':' + compat_urllib_parse_quote(iri_parts.password, safe=r"!$%&'()*+,~")
6214 net_location += '@'
6215
6216 net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
6217 # The 'idna' encoding produces ASCII text.
6218 if iri_parts.port is not None and iri_parts.port != 80:
6219 net_location += ':' + str(iri_parts.port)
6220
6221 return compat_urllib_parse_urlunparse(
6222 (iri_parts.scheme,
6223 net_location,
6224
6225 compat_urllib_parse_quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
6226
6227 # Unsure about the `safe` argument, since this is a legacy way of handling parameters.
6228 compat_urllib_parse_quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
6229
6230 # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
6231 compat_urllib_parse_quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
6232
6233 compat_urllib_parse_quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
6234
6235 # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
6236
6237
6238 def to_high_limit_path(path):
6239 if sys.platform in ['win32', 'cygwin']:
6240 # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
6241 return r'\\?\ '.rstrip() + os.path.abspath(path)
6242
6243 return path
6244
6245
6246 def format_field(obj, field=None, template='%s', ignore=(None, ''), default='', func=None):
6247 if field is None:
6248 val = obj if obj is not None else default
6249 else:
6250 val = obj.get(field, default)
6251 if func and val not in ignore:
6252 val = func(val)
6253 return template % val if val not in ignore else default
6254
6255
6256 def clean_podcast_url(url):
6257 return re.sub(r'''(?x)
6258 (?:
6259 (?:
6260 chtbl\.com/track|
6261 media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
6262 play\.podtrac\.com
6263 )/[^/]+|
6264 (?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
6265 flex\.acast\.com|
6266 pd(?:
6267 cn\.co| # https://podcorn.com/analytics-prefix/
6268 st\.fm # https://podsights.com/docs/
6269 )/e
6270 )/''', '', url)
6271
6272
6273 _HEX_TABLE = '0123456789abcdef'
6274
6275
6276 def random_uuidv4():
6277 return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
6278
6279
6280 def make_dir(path, to_screen=None):
6281 try:
6282 dn = os.path.dirname(path)
6283 if dn and not os.path.exists(dn):
6284 os.makedirs(dn)
6285 return True
6286 except (OSError, IOError) as err:
6287 if callable(to_screen) is not None:
6288 to_screen('unable to create directory ' + error_to_compat_str(err))
6289 return False
6290
6291
6292 def get_executable_path():
6293 from zipimport import zipimporter
6294 if hasattr(sys, 'frozen'): # Running from PyInstaller
6295 path = os.path.dirname(sys.executable)
6296 elif isinstance(globals().get('__loader__'), zipimporter): # Running from ZIP
6297 path = os.path.join(os.path.dirname(__file__), '../..')
6298 else:
6299 path = os.path.join(os.path.dirname(__file__), '..')
6300 return os.path.abspath(path)
6301
6302
6303 def load_plugins(name, suffix, namespace):
6304 plugin_info = [None]
6305 classes = {}
6306 try:
6307 plugin_info = imp.find_module(
6308 name, [os.path.join(get_executable_path(), 'ytdlp_plugins')])
6309 plugins = imp.load_module(name, *plugin_info)
6310 for name in dir(plugins):
6311 if name in namespace:
6312 continue
6313 if not name.endswith(suffix):
6314 continue
6315 klass = getattr(plugins, name)
6316 classes[name] = namespace[name] = klass
6317 except ImportError:
6318 pass
6319 finally:
6320 if plugin_info[0] is not None:
6321 plugin_info[0].close()
6322 return classes
6323
6324
6325 def traverse_obj(
6326 obj, *path_list, default=None, expected_type=None, get_all=True,
6327 casesense=True, is_user_input=False, traverse_string=False):
6328 ''' Traverse nested list/dict/tuple
6329 @param path_list A list of paths which are checked one by one.
6330 Each path is a list of keys where each key is a string,
6331 a tuple of strings or "...". When a tuple is given,
6332 all the keys given in the tuple are traversed, and
6333 "..." traverses all the keys in the object
6334 @param default Default value to return
6335 @param expected_type Only accept final value of this type (Can also be any callable)
6336 @param get_all Return all the values obtained from a path or only the first one
6337 @param casesense Whether to consider dictionary keys as case sensitive
6338 @param is_user_input Whether the keys are generated from user input. If True,
6339 strings are converted to int/slice if necessary
6340 @param traverse_string Whether to traverse inside strings. If True, any
6341 non-compatible object will also be converted into a string
6342 # TODO: Write tests
6343 '''
6344 if not casesense:
6345 _lower = lambda k: (k.lower() if isinstance(k, str) else k)
6346 path_list = (map(_lower, variadic(path)) for path in path_list)
6347
6348 def _traverse_obj(obj, path, _current_depth=0):
6349 nonlocal depth
6350 if obj is None:
6351 return None
6352 path = tuple(variadic(path))
6353 for i, key in enumerate(path):
6354 if isinstance(key, (list, tuple)):
6355 obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
6356 key = ...
6357 if key is ...:
6358 obj = (obj.values() if isinstance(obj, dict)
6359 else obj if isinstance(obj, (list, tuple, LazyList))
6360 else str(obj) if traverse_string else [])
6361 _current_depth += 1
6362 depth = max(depth, _current_depth)
6363 return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
6364 elif isinstance(obj, dict) and not (is_user_input and key == ':'):
6365 obj = (obj.get(key) if casesense or (key in obj)
6366 else next((v for k, v in obj.items() if _lower(k) == key), None))
6367 else:
6368 if is_user_input:
6369 key = (int_or_none(key) if ':' not in key
6370 else slice(*map(int_or_none, key.split(':'))))
6371 if key == slice(None):
6372 return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth)
6373 if not isinstance(key, (int, slice)):
6374 return None
6375 if not isinstance(obj, (list, tuple, LazyList)):
6376 if not traverse_string:
6377 return None
6378 obj = str(obj)
6379 try:
6380 obj = obj[key]
6381 except IndexError:
6382 return None
6383 return obj
6384
6385 if isinstance(expected_type, type):
6386 type_test = lambda val: val if isinstance(val, expected_type) else None
6387 elif expected_type is not None:
6388 type_test = expected_type
6389 else:
6390 type_test = lambda val: val
6391
6392 for path in path_list:
6393 depth = 0
6394 val = _traverse_obj(obj, path)
6395 if val is not None:
6396 if depth:
6397 for _ in range(depth - 1):
6398 val = itertools.chain.from_iterable(v for v in val if v is not None)
6399 val = [v for v in map(type_test, val) if v is not None]
6400 if val:
6401 return val if get_all else val[0]
6402 else:
6403 val = type_test(val)
6404 if val is not None:
6405 return val
6406 return default
6407
6408
6409 def traverse_dict(dictn, keys, casesense=True):
6410 ''' For backward compatibility. Do not use '''
6411 return traverse_obj(dictn, keys, casesense=casesense,
6412 is_user_input=True, traverse_string=True)
6413
6414
6415 def variadic(x, allowed_types=(str, bytes)):
6416 return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
6417
6418
6419 # create a JSON Web Signature (jws) with HS256 algorithm
6420 # the resulting format is in JWS Compact Serialization
6421 # implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
6422 # implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
6423 def jwt_encode_hs256(payload_data, key, headers={}):
6424 header_data = {
6425 'alg': 'HS256',
6426 'typ': 'JWT',
6427 }
6428 if headers:
6429 header_data.update(headers)
6430 header_b64 = base64.b64encode(json.dumps(header_data).encode('utf-8'))
6431 payload_b64 = base64.b64encode(json.dumps(payload_data).encode('utf-8'))
6432 h = hmac.new(key.encode('utf-8'), header_b64 + b'.' + payload_b64, hashlib.sha256)
6433 signature_b64 = base64.b64encode(h.digest())
6434 token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
6435 return token