]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/YoutubeDL.py
[ie/crunchyroll] Fix auth and remove cookies support (#9749)
[yt-dlp.git] / yt_dlp / YoutubeDL.py
index 56366760019dc394159ebf54a93e69bd07e86577..9f730d03846b05180eca59f58d214a5b108946b5 100644 (file)
@@ -1,7 +1,7 @@
 import collections
 import contextlib
 import copy
-import datetime
+import datetime as dt
 import errno
 import fileinput
 import http.cookiejar
@@ -25,7 +25,7 @@
 
 from .cache import Cache
 from .compat import functools, urllib  # isort: split
-from .compat import compat_os_name, compat_shlex_quote, urllib_req_to_req
+from .compat import compat_os_name, urllib_req_to_req
 from .cookies import LenientSimpleCookie, load_cookies
 from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
 from .downloader.rtmp import rtmpdump_version
     UserNotLive,
     YoutubeDLError,
     age_restricted,
-    args_to_str,
     bug_reports_message,
     date_from_str,
     deprecation_warning,
     sanitize_filename,
     sanitize_path,
     sanitize_url,
+    shell_quote,
     str_or_none,
     strftime_or_none,
     subtitles_filename,
     supports_terminal_sequences,
     system_identifier,
+    filesize_from_tbr,
     timetuple_from_msec,
     to_high_limit_path,
     traverse_obj,
@@ -480,7 +481,7 @@ class YoutubeDL:
     nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
     max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
     continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
-    external_downloader_args, concurrent_fragment_downloads.
+    external_downloader_args, concurrent_fragment_downloads, progress_delta.
 
     The following options are used by the post processors:
     ffmpeg_location:   Location of the ffmpeg/avconv binary; either the path
@@ -822,7 +823,7 @@ def warn_if_short_id(self, argv):
             self.report_warning(
                 'Long argument string detected. '
                 'Use -- to separate parameters and URLs, like this:\n%s' %
-                args_to_str(correct_argv))
+                shell_quote(correct_argv))
 
     def add_info_extractor(self, ie):
         """Add an InfoExtractor object to the end of the list."""
@@ -1354,7 +1355,7 @@ def create_key(outer_mobj):
                 value, fmt = escapeHTML(str(value)), str_fmt
             elif fmt[-1] == 'q':  # quoted
                 value = map(str, variadic(value) if '#' in flags else [value])
-                value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
+                value, fmt = shell_quote(value, shell=True), str_fmt
             elif fmt[-1] == 'B':  # bytes
                 value = f'%{str_fmt}'.encode() % str(value).encode()
                 value, fmt = value.decode('utf-8', 'ignore'), 's'
@@ -2628,7 +2629,7 @@ def _fill_common_fields(self, info_dict, final=True):
                 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
                 # see http://bugs.python.org/issue1646728)
                 with contextlib.suppress(ValueError, OverflowError, OSError):
-                    upload_date = datetime.datetime.fromtimestamp(info_dict[ts_key], datetime.timezone.utc)
+                    upload_date = dt.datetime.fromtimestamp(info_dict[ts_key], dt.timezone.utc)
                     info_dict[date_key] = upload_date.strftime('%Y%m%d')
 
         if not info_dict.get('release_year'):
@@ -2782,7 +2783,7 @@ def sanitize_numeric_fields(info):
 
         get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
         if not get_from_start:
-            info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
+            info_dict['title'] += ' ' + dt.datetime.now().strftime('%Y-%m-%d %H:%M')
         if info_dict.get('is_live') and formats:
             formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
             if get_from_start and not formats:
@@ -2826,9 +2827,8 @@ def is_wellformed(f):
                 format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
             # For fragmented formats, "tbr" is often max bitrate and not average
             if (('manifest-filesize-approx' in self.params['compat_opts'] or not format.get('manifest_url'))
-                    and info_dict.get('duration') and format.get('tbr')
                     and not format.get('filesize') and not format.get('filesize_approx')):
-                format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
+                format['filesize_approx'] = filesize_from_tbr(format.get('tbr'), info_dict.get('duration'))
             format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict), load_cookies=True)
 
         # Safeguard against old/insecure infojson when using --load-info-json
@@ -3878,8 +3878,8 @@ def simplified_codec(f, field):
                 delim, (
                     format_field(f, 'filesize', ' \t%s', func=format_bytes)
                     or format_field(f, 'filesize_approx', '≈\t%s', func=format_bytes)
-                    or format_field(try_call(lambda: format_bytes(int(info_dict['duration'] * f['tbr'] * (1024 / 8)))),
-                                    None, self._format_out('~\t%s', self.Styles.SUPPRESS))),
+                    or format_field(filesize_from_tbr(f.get('tbr'), info_dict.get('duration')), None,
+                                    self._format_out('~\t%s', self.Styles.SUPPRESS), func=format_bytes)),
                 format_field(f, 'tbr', '\t%dk', func=round),
                 shorten_protocol_name(f.get('protocol', '')),
                 delim,