]> jfr.im git - yt-dlp.git/commitdiff
Release 2021.03.01 2021.03.01
authorpukkandan <redacted>
Mon, 1 Mar 2021 00:09:50 +0000 (05:39 +0530)
committerpukkandan <redacted>
Mon, 1 Mar 2021 00:09:50 +0000 (05:39 +0530)
CONTRIBUTORS
Changelog.md
README.md
yt_dlp/YoutubeDL.py
yt_dlp/downloader/common.py
yt_dlp/downloader/fragment.py
yt_dlp/downloader/niconico.py
yt_dlp/extractor/common.py
yt_dlp/extractor/youtube.py

index 1457f2246515c8492a5a5905f36fd114923a8bb1..7f4f25e56e2f2a907fe737162b428fa80d892dec 100644 (file)
@@ -23,3 +23,5 @@ tsukumi
 bbepis
 Pccode66
 Ashish
+RobinD42
+hseg
\ No newline at end of file
index e21cc7cc29370d5569e552b32d7ea9fcfdbf04cf..07b34eafb2b4298ff522fbf9b950466ff92782e4 100644 (file)
@@ -17,6 +17,27 @@ # Instuctions for creating release
 -->
 
 
+### 2021.03.01
+* Allow specifying path in `--external-downloader`
+* Add option `--sleep-requests` to sleep b/w requests
+* Add option `--extractor-retries` to retry on known extractor errors
+* Extract comments only when needed
+* `--get-comments` doesn't imply `--write-info-json` if `-J`, `-j` or `--print-json` are used
+* [youtube] Retry on more known errors than just HTTP-5xx
+* [tennistv] Fix format sorting
+* [readthedocs] Improvements by [shirt](https://github.com/shirt-dev)
+* [hls] Fix bug with m3u8 format extraction
+* [bilibiliaudio] Recognize the file as audio-only
+* [hrfensehen] Fix wrong import
+* [youtube] Fix inconsistent `webpage_url`
+* [hls] Enable `--hls-use-mpegts` by default when downloading live-streams
+* [viki] Fix viki play pass authentication by [RobinD42](https://github.com/RobinD42)
+* [embedthumbnail] Fix bug with deleting original thumbnail
+* [build] Fix completion paths, zsh pip completion install by [hseg](https://github.com/hseg)
+* [ci] Disable download tests unless specifically invoked
+* Cleanup some code and fix typos
+
+
 ### 2021.02.24
 * Moved project to an organization [yt-dlp](https://github.com/yt-dlp)
 * **Completely changed project name to yt-dlp** by [Pccode66](https://github.com/Pccode66) and [pukkandan](https://github.com/pukkandan)
index 0cc763491ce95e87662dcaafad498d4b85a9ef77..22125cd6bec9d95da94c2ac71b35a98d3e4c5f05 100644 (file)
--- a/README.md
+++ b/README.md
@@ -697,6 +697,8 @@ ## SponSkrub (SponsorBlock) Options:
                                      directory
 
 ## Extractor Options:
+    --extractor-retries RETRIES      Number of retries for known extractor
+                                     errors (default is 10), or "infinite"
     --allow-dynamic-mpd              Process dynamic DASH manifests (default)
                                      (Alias: --no-ignore-dynamic-mpd)
     --ignore-dynamic-mpd             Do not process dynamic DASH manifests
index d1f365814d2c7e7ee1d33cc4ea02fce70c2d47a7..e58f7a32f87902641dd20b08dd2639fff6cf7a2d 100644 (file)
@@ -2958,7 +2958,7 @@ def _write_thumbnails(self, info_dict, filename):  # return the extensions
                 self.to_screen('[%s] %s: Thumbnail %sis already present' %
                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
             else:
-                self.to_screen('[%s] %s: Downloading thumbnail %s...' %
+                self.to_screen('[%s] %s: Downloading thumbnail %s ...' %
                                (info_dict['extractor'], info_dict['id'], thumb_display_id))
                 try:
                     uf = self.urlopen(t['url'])
index 7f72969157a713988d53f4df0f95fcb6c9992453..2a9a62df49b163cc389ce607f2bffb7307b15a22 100644 (file)
@@ -312,7 +312,7 @@ def report_resuming_byte(self, resume_len):
     def report_retry(self, err, count, retries):
         """Report retry in case of HTTP error 5xx"""
         self.to_screen(
-            '[download] Got server HTTP error: %s. Retrying (attempt %d of %s)...'
+            '[download] Got server HTTP error: %s. Retrying (attempt %d of %s) ...'
             % (error_to_compat_str(err), count, self.format_retries(retries)))
 
     def report_file_already_downloaded(self, file_name):
@@ -359,7 +359,7 @@ def download(self, filename, info_dict, subtitle=False):
                 max_sleep_interval = self.params.get('max_sleep_interval', min_sleep_interval)
                 sleep_interval = random.uniform(min_sleep_interval, max_sleep_interval)
                 self.to_screen(
-                    '[download] Sleeping %s seconds...' % (
+                    '[download] Sleeping %s seconds ...' % (
                         int(sleep_interval) if sleep_interval.is_integer()
                         else '%.2f' % sleep_interval))
                 time.sleep(sleep_interval)
@@ -369,7 +369,7 @@ def download(self, filename, info_dict, subtitle=False):
                 sleep_interval_sub = self.params.get('sleep_interval_subtitles')
             if sleep_interval_sub > 0:
                 self.to_screen(
-                    '[download] Sleeping %s seconds...' % (
+                    '[download] Sleeping %s seconds ...' % (
                         sleep_interval_sub))
                 time.sleep(sleep_interval_sub)
         return self.real_download(filename, info_dict), True
index 44beed06644a5e6ccdb221b1d4e15d85b28c1b90..a0c1d13ac2a3149cac7eac2bd907e89c3bd9da12 100644 (file)
@@ -55,11 +55,11 @@ class FragmentFD(FileDownloader):
 
     def report_retry_fragment(self, err, frag_index, count, retries):
         self.to_screen(
-            '[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s)...'
+            '[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s) ...'
             % (error_to_compat_str(err), frag_index, count, self.format_retries(retries)))
 
     def report_skip_fragment(self, frag_index):
-        self.to_screen('[download] Skipping fragment %d...' % frag_index)
+        self.to_screen('[download] Skipping fragment %d ...' % frag_index)
 
     def _prepare_url(self, info_dict, url):
         headers = info_dict.get('http_headers')
@@ -174,7 +174,7 @@ def _prepare_frag_download(self, ctx):
                         '.ytdl file is corrupt' if is_corrupt else
                         'Inconsistent state of incomplete fragment download')
                     self.report_warning(
-                        '%s. Restarting from the beginning...' % message)
+                        '%s. Restarting from the beginning ...' % message)
                     ctx['fragment_index'] = resume_len = 0
                     if 'ytdl_corrupt' in ctx:
                         del ctx['ytdl_corrupt']
index 38476783f67528a645149d41dc6dad73e4b95cf5..dc49dff585cb1e2bcd995aa90b1f2aecc06d1c0d 100644 (file)
@@ -29,7 +29,7 @@ def real_download(self, filename, info_dict):
         heartbeat_url = heartbeat_info_dict['url']
         heartbeat_data = heartbeat_info_dict['data']
         heartbeat_interval = heartbeat_info_dict.get('interval', 30)
-        self.to_screen('[%s] Heartbeat with %s second interval...' % (self.FD_NAME, heartbeat_interval))
+        self.to_screen('[%s] Heartbeat with %s second interval ...' % (self.FD_NAME, heartbeat_interval))
 
         def heartbeat():
             try:
index 3326d436bb817821b9cec6e1f88718cbaef00e84..45bc229ff2c2ce3b3e864b9d19afc3b62d62ca86 100644 (file)
@@ -617,7 +617,7 @@ def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fa
         if not self._downloader._first_webpage_request:
             sleep_interval = float_or_none(self._downloader.params.get('sleep_interval_requests')) or 0
             if sleep_interval > 0:
-                self.to_screen('Sleeping %s seconds...' % sleep_interval)
+                self.to_screen('Sleeping %s seconds ...' % sleep_interval)
                 time.sleep(sleep_interval)
         else:
             self._downloader._first_webpage_request = False
index 2e4ce4c128a5a2c4f1373ffe41d57ad4ad00cbb5..8b0d12bb5cb802b60f3af50f988a3ba2fb040394 100644 (file)
@@ -3020,7 +3020,8 @@ def _real_extract(self, url):
             # See: https://github.com/yt-dlp/yt-dlp/issues/116
             if count:
                 self.report_warning('Incomplete yt initial data recieved. Retrying ...')
-            webpage = self._download_webpage(url, item_id,
+            webpage = self._download_webpage(
+                url, item_id,
                 'Downloading webpage%s' % ' (retry #%d)' % count if count else '')
             identity_token = self._extract_identity_token(webpage, item_id)
             data = self._extract_yt_initial_data(item_id, webpage)