]> jfr.im git - yt-dlp.git/commitdiff
[cleanup] Misc fixes
authorpukkandan <redacted>
Sun, 27 Mar 2022 02:20:43 +0000 (07:50 +0530)
committerpukkandan <redacted>
Tue, 5 Apr 2022 12:42:18 +0000 (18:12 +0530)
Closes https://github.com/yt-dlp/yt-dlp/pull/3213, Closes https://github.com/yt-dlp/yt-dlp/pull/3117

Related: https://github.com/yt-dlp/yt-dlp/issues/3146#issuecomment-1077323114, https://github.com/yt-dlp/yt-dlp/pull/3277#discussion_r841019671, https://github.com/yt-dlp/yt-dlp/commit/a825ffbffa0bea322e3ccb44c6f8e01d8d9572fb#commitcomment-68538986, https://github.com/yt-dlp/yt-dlp/issues/2360, https://github.com/yt-dlp/yt-dlp/commit/5fa3c9a88f597625296981a4a26be723e65d4842#r70393519, https://github.com/yt-dlp/yt-dlp/commit/5fa3c9a88f597625296981a4a26be723e65d4842#r70393254

33 files changed:
.github/ISSUE_TEMPLATE/1_broken_site.yml
.github/ISSUE_TEMPLATE/2_site_support_request.yml
.github/ISSUE_TEMPLATE/3_site_feature_request.yml
.github/ISSUE_TEMPLATE/4_bug_report.yml
.github/ISSUE_TEMPLATE/5_feature_request.yml
.github/ISSUE_TEMPLATE/6_question.yml
.github/ISSUE_TEMPLATE_tmpl/1_broken_site.yml
.github/ISSUE_TEMPLATE_tmpl/2_site_support_request.yml
.github/ISSUE_TEMPLATE_tmpl/3_site_feature_request.yml
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
.github/ISSUE_TEMPLATE_tmpl/5_feature_request.yml
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
.gitignore
README.md
yt_dlp/YoutubeDL.py
yt_dlp/__init__.py
yt_dlp/cookies.py
yt_dlp/downloader/fragment.py
yt_dlp/downloader/http.py
yt_dlp/extractor/bilibili.py
yt_dlp/extractor/canvas.py
yt_dlp/extractor/common.py
yt_dlp/extractor/dropout.py
yt_dlp/extractor/facebook.py
yt_dlp/extractor/generic.py
yt_dlp/extractor/limelight.py
yt_dlp/extractor/niconico.py
yt_dlp/extractor/tiktok.py
yt_dlp/extractor/yandexvideo.py
yt_dlp/extractor/youtube.py
yt_dlp/options.py
yt_dlp/postprocessor/ffmpeg.py
yt_dlp/utils.py

index 4d91871434bd1658135c813518fb6bac3c05c22a..c671a19105d44bbde50e6bf0d07074305d761977 100644 (file)
@@ -11,7 +11,7 @@ body:
       options:
         - label: I'm reporting a broken site
           required: true
-        - label: I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
+        - label: I've verified that I'm running yt-dlp version **2022.03.08.1** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
           required: true
         - label: I've checked that all provided URLs are alive and playable in a browser
           required: true
index cff73b555a983aee5358705dff491e3e936f332a..5ff022a040b2626ca50a37ebd17525c286d7e2b4 100644 (file)
@@ -11,7 +11,7 @@ body:
       options:
         - label: I'm reporting a new site support request
           required: true
-        - label: I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
+        - label: I've verified that I'm running yt-dlp version **2022.03.08.1** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
           required: true
         - label: I've checked that all provided URLs are alive and playable in a browser
           required: true
index 44012044a0455906b28873864669aac8137f880a..acdfeb0384536a2c66acfa4bfeb94cda706e8f1e 100644 (file)
@@ -11,7 +11,7 @@ body:
       options:
         - label: I'm reporting a site feature request
           required: true
-        - label: I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
+        - label: I've verified that I'm running yt-dlp version **2022.03.08.1** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
           required: true
         - label: I've checked that all provided URLs are alive and playable in a browser
           required: true
index d93380725cf34e71b2239806737eeafac2477b71..a4a038fc8941ca98a01bb01e1c77a2db8dd12f46 100644 (file)
@@ -11,7 +11,7 @@ body:
       options:
         - label: I'm reporting a bug unrelated to a specific site
           required: true
-        - label: I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
+        - label: I've verified that I'm running yt-dlp version **2022.03.08.1** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
           required: true
         - label: I've checked that all provided URLs are alive and playable in a browser
           required: true
index 51987d5336e8760b8866b1e2aba411937e6fc894..1bdafc441925422603e1d01a5573e80f7fd45d9d 100644 (file)
@@ -13,7 +13,7 @@ body:
           required: true
         - label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
           required: true
-        - label: I've verified that I'm running yt-dlp version **2022.03.08.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
+        - label: I've verified that I'm running yt-dlp version **2022.03.08.1** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
           required: true
         - label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
           required: true
@@ -30,3 +30,24 @@ body:
       placeholder: WRITE DESCRIPTION HERE
     validations:
       required: true
+  - type: textarea
+    id: log
+    attributes:
+      label: Verbose log
+      description: |
+        If your feature request involves an existing yt-dlp command, provide the complete verbose output of that command.
+        Add the `-vU` flag to **your** command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
+        It should look similar to this:
+      placeholder: |
+        [debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
+        [debug] Portable config file: yt-dlp.conf
+        [debug] Portable config: ['-i']
+        [debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
+        [debug] yt-dlp version 2021.12.01 (exe)
+        [debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
+        [debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
+        [debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
+        [debug] Proxy map: {}
+        yt-dlp is up to date (2021.12.01)
+        <more lines>
+      render: shell
index 061158ed3667e6785137f6b1236f0d269a4a1867..030d2cfe7812ce1eccf091b7c90cde93016e2eb8 100644 (file)
@@ -35,7 +35,7 @@ body:
     attributes:
       label: Verbose log
       description: |
-        If your question involes a yt-dlp command, provide the complete verbose output of that command.
+        If your question involves a yt-dlp command, provide the complete verbose output of that command.
         Add the `-vU` flag to **your** command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
         It should look similar to this:
       placeholder: |
index fd6435ba6093365d790a6b118c98cb3433ec9a81..422af9c7205ce1d3a61156ff0f88a58953c98ad3 100644 (file)
@@ -11,7 +11,7 @@ body:
       options:
         - label: I'm reporting a broken site
           required: true
-        - label: I've verified that I'm running yt-dlp version **%(version)s**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
+        - label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
           required: true
         - label: I've checked that all provided URLs are alive and playable in a browser
           required: true
index f380c13312fc118d31927d39b20cc43bf0f842cb..fec50559a4e455359c89f8840a659a8809a75d5b 100644 (file)
@@ -11,7 +11,7 @@ body:
       options:
         - label: I'm reporting a new site support request
           required: true
-        - label: I've verified that I'm running yt-dlp version **%(version)s**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
+        - label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
           required: true
         - label: I've checked that all provided URLs are alive and playable in a browser
           required: true
index 88b1f1217e7b2764403782dbebbbd708c33577cb..266408c19906d61447b49b7056d3b4bab4586eaf 100644 (file)
@@ -11,7 +11,7 @@ body:
       options:
         - label: I'm reporting a site feature request
           required: true
-        - label: I've verified that I'm running yt-dlp version **%(version)s**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
+        - label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
           required: true
         - label: I've checked that all provided URLs are alive and playable in a browser
           required: true
index 03a6ba5519dc833b405140e738887081a67e590b..8b49b63851fdd8f512e363fb21ad0af474217e78 100644 (file)
@@ -11,7 +11,7 @@ body:
       options:
         - label: I'm reporting a bug unrelated to a specific site
           required: true
-        - label: I've verified that I'm running yt-dlp version **%(version)s**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
+        - label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
           required: true
         - label: I've checked that all provided URLs are alive and playable in a browser
           required: true
index eb5d3d63475e5bcd5bfdafbe34321544b8fcab9a..1f33f09dc94d1a192128d05157de85b9a848a454 100644 (file)
@@ -13,7 +13,7 @@ body:
           required: true
         - label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
           required: true
-        - label: I've verified that I'm running yt-dlp version **%(version)s**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
+        - label: I've verified that I'm running yt-dlp version **%(version)s** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
           required: true
         - label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
           required: true
@@ -30,3 +30,24 @@ body:
       placeholder: WRITE DESCRIPTION HERE
     validations:
       required: true
+  - type: textarea
+    id: log
+    attributes:
+      label: Verbose log
+      description: |
+        If your feature request involves an existing yt-dlp command, provide the complete verbose output of that command.
+        Add the `-vU` flag to **your** command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
+        It should look similar to this:
+      placeholder: |
+        [debug] Command-line config: ['-vU', 'http://www.youtube.com/watch?v=BaW_jenozKc']
+        [debug] Portable config file: yt-dlp.conf
+        [debug] Portable config: ['-i']
+        [debug] Encodings: locale cp1252, fs utf-8, stdout utf-8, stderr utf-8, pref cp1252
+        [debug] yt-dlp version 2021.12.01 (exe)
+        [debug] Python version 3.8.8 (CPython 64bit) - Windows-10-10.0.19041-SP0
+        [debug] exe versions: ffmpeg 3.0.1, ffprobe 3.0.1
+        [debug] Optional libraries: Cryptodome, keyring, mutagen, sqlite, websockets
+        [debug] Proxy map: {}
+        yt-dlp is up to date (2021.12.01)
+        <more lines>
+      render: shell
index 061158ed3667e6785137f6b1236f0d269a4a1867..030d2cfe7812ce1eccf091b7c90cde93016e2eb8 100644 (file)
@@ -35,7 +35,7 @@ body:
     attributes:
       label: Verbose log
       description: |
-        If your question involes a yt-dlp command, provide the complete verbose output of that command.
+        If your question involves a yt-dlp command, provide the complete verbose output of that command.
         Add the `-vU` flag to **your** command line you run yt-dlp with (`yt-dlp -vU <your command line>`), copy the WHOLE output and insert it below.
         It should look similar to this:
       placeholder: |
index fd51ad66eb9fdcead3c4f2cdf2847bb85dcd30e9..c815538e82537f15bc478afb8cc745826c0cca43 100644 (file)
@@ -116,3 +116,6 @@ yt-dlp.zip
 ytdlp_plugins/extractor/*
 !ytdlp_plugins/extractor/__init__.py
 !ytdlp_plugins/extractor/sample.py
+ytdlp_plugins/postprocessor/*
+!ytdlp_plugins/postprocessor/__init__.py
+!ytdlp_plugins/postprocessor/sample.py
index a75441e355d89e11c8bb6fbf0ab8f0ec80cd0626..6b4f39b9e817705a53ea89bf27247f1b948a5b67 100644 (file)
--- a/README.md
+++ b/README.md
@@ -125,6 +125,7 @@ ### Differences in default behavior
 
 * The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
 * `avconv` is not supported as an alternative to `ffmpeg`
+* yt-dlp stores config files in slightly different locations to youtube-dl. See [configuration](#configuration) for a list of correct locations
 * The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s-%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
 * The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
 * The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be preferred. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
@@ -431,24 +432,24 @@ ## Video Selection:
     --dateafter DATE                 Download only videos uploaded on or after
                                      this date. The date formats accepted is the
                                      same as --date
-    --match-filter FILTER            Generic video filter. Any field (see
+    --match-filters FILTER           Generic video filter. Any field (see
                                      "OUTPUT TEMPLATE") can be compared with a
                                      number or a string using the operators
                                      defined in "Filtering formats". You can
                                      also simply specify a field to match if the
-                                     field is present and "!field" to check if
-                                     the field is not present. In addition,
-                                     Python style regular expression matching
-                                     can be done using "~=", and multiple
-                                     filters can be checked with "&". Use a "\"
-                                     to escape "&" or quotes if needed. Eg:
-                                     --match-filter "!is_live & like_count>?100
-                                     description~='(?i)\bcats \& dogs\b'"
-                                     matches only videos that are not live, has
-                                     a like count more than 100 (or the like
-                                     field is not available), and also has a
-                                     description that contains the phrase "cats
-                                     & dogs" (ignoring case)
+                                     field is present, use "!field" to check if
+                                     the field is not present, and "&" to check
+                                     multiple conditions. Use a "\" to escape
+                                     "&" or quotes if needed. If used multiple
+                                     times, the filter matches if atleast one of
+                                     the conditions are met. Eg: --match-filter
+                                     !is_live --match-filter "like_count>?100 &
+                                     description~='(?i)\bcats \& dogs\b'"
+                                     matches only videos that are not live OR
+                                     those that have a like count more than 100
+                                     (or the like field is not available) and
+                                     also has a description that contains the
+                                     phrase "cats & dogs" (ignoring case)
     --no-match-filter                Do not use generic video filter (default)
     --no-playlist                    Download only the video, if the URL refers
                                      to a video and a playlist
@@ -840,15 +841,17 @@ ## Post-Processing Options:
                                      (requires ffmpeg and ffprobe)
     --audio-format FORMAT            Specify audio format to convert the audio
                                      to when -x is used. Currently supported
-                                     formats are: best (default) or one of
-                                     best|aac|flac|mp3|m4a|opus|vorbis|wav|alac
-    --audio-quality QUALITY          Specify ffmpeg audio quality, insert a
+                                     formats are: best (default) or one of aac,
+                                     flac, mp3, m4a, opus, vorbis, wav, alac
+    --audio-quality QUALITY          Specify ffmpeg audio quality to use when
+                                     converting the audio with -x. Insert a
                                      value between 0 (best) and 10 (worst) for
                                      VBR or a specific bitrate like 128K
                                      (default 5)
     --remux-video FORMAT             Remux the video into another container if
-                                     necessary (currently supported: mp4|mkv|flv
-                                     |webm|mov|avi|mp3|mka|m4a|ogg|opus). If
+                                     necessary (currently supported: mp4, mkv,
+                                     flv, webm, mov, avi, mka, ogg, aac, flac,
+                                     mp3, m4a, opus, vorbis, wav, alac). If
                                      target container does not support the
                                      video/audio codec, remuxing will fail. You
                                      can specify multiple rules; Eg.
@@ -948,10 +951,10 @@ ## Post-Processing Options:
                                      option can be used multiple times
     --no-exec                        Remove any previously defined --exec
     --convert-subs FORMAT            Convert the subtitles to another format
-                                     (currently supported: srt|vtt|ass|lrc)
+                                     (currently supported: srt, vtt, ass, lrc)
                                      (Alias: --convert-subtitles)
     --convert-thumbnails FORMAT      Convert the thumbnails to another format
-                                     (currently supported: jpg|png|webp)
+                                     (currently supported: jpg, png, webp)
     --split-chapters                 Split video into multiple files based on
                                      internal chapters. The "chapter:" prefix
                                      can be used with "--paths" and "--output"
@@ -1638,7 +1641,11 @@ # Regex example
 # Set title as "Series name S01E05"
 $ yt-dlp --parse-metadata "%(series)s S%(season_number)02dE%(episode_number)02d:%(title)s"
 
-# Set "comment" field in video metadata using description instead of webpage_url
+# Prioritize uploader as the "artist" field in video metadata
+$ yt-dlp --parse-metadata "%(uploader|)s:%(meta_artist)s" --add-metadata
+
+# Set "comment" field in video metadata using description instead of webpage_url,
+# handling multiple lines correctly
 $ yt-dlp --parse-metadata "description:(?s)(?P<meta_comment>.+)" --add-metadata
 
 # Remove "formats" field from the infojson by setting it to an empty string
@@ -1651,7 +1658,7 @@ # Replace all spaces and "_" in title and uploader with a `-`
 
 # EXTRACTOR ARGUMENTS
 
-Some extractors accept additional arguments which can be passed using `--extractor-args KEY:ARGS`. `ARGS` is a `;` (semicolon) separated string of `ARG=VAL1,VAL2`. Eg: `--extractor-args "youtube:player-client=android_agegate,web;include_live_dash" --extractor-args "funimation:version=uncut"`
+Some extractors accept additional arguments which can be passed using `--extractor-args KEY:ARGS`. `ARGS` is a `;` (semicolon) separated string of `ARG=VAL1,VAL2`. Eg: `--extractor-args "youtube:player-client=android_embedded,web;include_live_dash" --extractor-args "funimation:version=uncut"`
 
 The following extractors use this feature:
 
@@ -1661,10 +1668,8 @@ #### youtube
 * `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause some issues. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) for more details
 * `include_live_dash`: Include live dash formats even without `--live-from-start` (These formats don't download properly)
 * `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side)
-* `max_comments`: Limit the amount of comments to gather. Comma-separated list of integers representing `max-comments,max-parents,max-replies,max-replies-per-thread`. Default is `all,all,all,all`.
-    * E.g. `all,all,1000,10` will get a maximum of 1000 replies total, with up to 10 replies per thread. `1000,all,100` will get a maximum of 1000 comments, with a maximum of 100 replies total.
-* `max_comment_depth` Maximum depth for nested comments. YouTube supports depths 1 or 2 (default)
-    * **Deprecated**: Set `max-replies` to `0` or `all` in `max_comments` instead (e.g. `max_comments=all,all,0` to get no replies) 
+* `max_comments`: Limit the amount of comments to gather. Comma-separated list of integers representing `max-comments,max-parents,max-replies,max-replies-per-thread`. Default is `all,all,all,all`
+    * E.g. `all,all,1000,10` will get a maximum of 1000 replies total, with up to 10 replies per thread. `1000,all,100` will get a maximum of 1000 comments, with a maximum of 100 replies total
 
 #### youtubetab (YouTube playlists, channels, feeds, etc.)
 * `skip`: One or more of `webpage` (skip initial webpage download), `authcheck` (allow the download of playlists requiring authentication when no initial webpage is downloaded. This may cause unwanted behavior, see [#1122](https://github.com/yt-dlp/yt-dlp/pull/1122) for more details)
@@ -1743,7 +1748,7 @@ # EMBEDDING YT-DLP
     ydl.download(['https://www.youtube.com/watch?v=BaW_jenozKc'])
 ```
 
-Most likely, you'll want to use various options. For a list of options available, have a look at [`yt_dlp/YoutubeDL.py`](yt_dlp/YoutubeDL.py#L191).
+Most likely, you'll want to use various options. For a list of options available, have a look at [`yt_dlp/YoutubeDL.py`](yt_dlp/YoutubeDL.py#L195).
 
 Here's a more complete example demonstrating various functionality:
 
index 51d83bde06d9b8619a3880ebaea98992b758f12f..d03229d864cd0897ef116ce5ca96214df3f7586c 100644 (file)
@@ -517,7 +517,7 @@ class YoutubeDL(object):
 
     _format_fields = {
         # NB: Keep in sync with the docstring of extractor/common.py
-        'url', 'manifest_url', 'ext', 'format', 'format_id', 'format_note',
+        'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
         'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
         'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
         'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
@@ -938,7 +938,7 @@ def report_warning(self, message, only_once=False):
 
     def deprecation_warning(self, message):
         if self.params.get('logger') is not None:
-            self.params['logger'].warning('DeprecationWarning: {message}')
+            self.params['logger'].warning(f'DeprecationWarning: {message}')
         else:
             self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
 
@@ -2478,8 +2478,9 @@ def sanitize_numeric_fields(info):
         if info_dict.get('is_live') and formats:
             formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
             if get_from_start and not formats:
-                self.raise_no_formats(info_dict, msg='--live-from-start is passed, but there are no formats that can be downloaded from the start. '
-                                                     'If you want to download from the current time, pass --no-live-from-start')
+                self.raise_no_formats(info_dict, msg=(
+                    '--live-from-start is passed, but there are no formats that can be downloaded from the start. '
+                    'If you want to download from the current time, use --no-live-from-start'))
 
         if not formats:
             self.raise_no_formats(info_dict)
index a445d862128dcbe1e5d359858728309f5c9cc53d..ebf2d227a3349704bbfae433e4ac2beb5d5a59b5 100644 (file)
@@ -379,7 +379,7 @@ def metadataparser_actions(f):
             'To let yt-dlp download and merge the best available formats, simply do not pass any format selection',
             'If you know what you are doing and want only the best pre-merged format, use "-f b" instead to suppress this warning')))
 
-    # --(post-processor/downloader)-args without name
+    # --(postprocessor/downloader)-args without name
     def report_args_compat(name, value, key1, key2=None):
         if key1 in value and key2 not in value:
             warnings.append(f'{name} arguments given without specifying name. The arguments will be given to all {name}s')
index 7265cad815b262ebadadf25e54964be7696a7ceb..1f08a3664d3290a721aed92020638763decfa091 100644 (file)
@@ -21,6 +21,7 @@
     compat_cookiejar_Cookie,
 )
 from .utils import (
+    error_to_str,
     expand_path,
     Popen,
     YoutubeDLCookieJar,
@@ -721,7 +722,7 @@ def _get_kwallet_network_wallet(logger):
             network_wallet = stdout.decode('utf-8').strip()
             logger.debug('NetworkWallet = "{}"'.format(network_wallet))
             return network_wallet
-    except BaseException as e:
+    except Exception as e:
         logger.warning('exception while obtaining NetworkWallet: {}'.format(e))
         return default_wallet
 
@@ -766,8 +767,8 @@ def _get_kwallet_password(browser_keyring_name, logger):
                 if stdout[-1:] == b'\n':
                     stdout = stdout[:-1]
                 return stdout
-    except BaseException as e:
-        logger.warning(f'exception running kwallet-query: {type(e).__name__}({e})')
+    except Exception as e:
+        logger.warning(f'exception running kwallet-query: {error_to_str(e)}')
         return b''
 
 
@@ -823,8 +824,8 @@ def _get_mac_keyring_password(browser_keyring_name, logger):
         if stdout[-1:] == b'\n':
             stdout = stdout[:-1]
         return stdout
-    except BaseException as e:
-        logger.warning(f'exception running find-generic-password: {type(e).__name__}({e})')
+    except Exception as e:
+        logger.warning(f'exception running find-generic-password: {error_to_str(e)}')
         return None
 
 
index 6b75dfc622cec9c180e6abf7f27c53b9da25cc96..c45a8a4767ce436a22e1fe7e3b633b3706361bfe 100644 (file)
@@ -403,7 +403,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
                 pass
 
         if compat_os_name == 'nt':
-            def bindoj_result(future):
+            def future_result(future):
                 while True:
                     try:
                         return future.result(0.1)
@@ -412,7 +412,7 @@ def bindoj_result(future):
                     except concurrent.futures.TimeoutError:
                         continue
         else:
-            def bindoj_result(future):
+            def future_result(future):
                 return future.result()
 
         def interrupt_trigger_iter(fg):
@@ -430,7 +430,7 @@ def interrupt_trigger_iter(fg):
         result = True
         for tpe, job in spins:
             try:
-                result = result and bindoj_result(job)
+                result = result and future_result(job)
             except KeyboardInterrupt:
                 interrupt_trigger[0] = False
             finally:
@@ -494,16 +494,14 @@ def download_fragment(fragment, ctx):
                 self.report_error('Giving up after %s fragment retries' % fragment_retries)
 
         def append_fragment(frag_content, frag_index, ctx):
-            if not frag_content:
-                if not is_fatal(frag_index - 1):
-                    self.report_skip_fragment(frag_index, 'fragment not found')
-                    return True
-                else:
-                    ctx['dest_stream'].close()
-                    self.report_error(
-                        'fragment %s not found, unable to continue' % frag_index)
-                    return False
-            self._append_fragment(ctx, pack_func(frag_content, frag_index))
+            if frag_content:
+                self._append_fragment(ctx, pack_func(frag_content, frag_index))
+            elif not is_fatal(frag_index - 1):
+                self.report_skip_fragment(frag_index, 'fragment not found')
+            else:
+                ctx['dest_stream'].close()
+                self.report_error(f'fragment {frag_index} not found, unable to continue')
+                return False
             return True
 
         decrypt_fragment = self.decrypter(info_dict)
index cabf401a7b197aaec6fc34202971a207f8f1acb7..591a9b08dca172c9b4c1937ad149ed3177bf4911 100644 (file)
@@ -7,7 +7,6 @@
 
 from .common import FileDownloader
 from ..compat import (
-    compat_str,
     compat_urllib_error,
     compat_http_client
 )
@@ -58,8 +57,6 @@ class DownloadContext(dict):
         ctx.resume_len = 0
         ctx.block_size = self.params.get('buffersize', 1024)
         ctx.start_time = time.time()
-        ctx.chunk_size = None
-        throttle_start = None
 
         # parse given Range
         req_start, req_end, _ = parse_http_range(headers.get('Range'))
@@ -85,12 +82,6 @@ def __init__(self, source_error):
         class NextFragment(Exception):
             pass
 
-        def set_range(req, start, end):
-            range_header = 'bytes=%d-' % start
-            if end:
-                range_header += compat_str(end)
-            req.add_header('Range', range_header)
-
         def establish_connection():
             ctx.chunk_size = (random.randint(int(chunk_size * 0.95), chunk_size)
                               if not is_test and chunk_size else chunk_size)
@@ -131,7 +122,7 @@ def establish_connection():
             request = sanitized_Request(url, request_data, headers)
             has_range = range_start is not None
             if has_range:
-                set_range(request, range_start, range_end)
+                request.add_header('Range', f'bytes={int(range_start)}-{int_or_none(range_end) or ""}')
             # Establish connection
             try:
                 ctx.data = self.ydl.urlopen(request)
@@ -214,7 +205,6 @@ def establish_connection():
                 raise RetryDownload(err)
 
         def download():
-            nonlocal throttle_start
             data_len = ctx.data.info().get('Content-length', None)
 
             # Range HTTP header may be ignored/unsupported by a webserver
@@ -329,14 +319,14 @@ def retry(e):
                 if speed and speed < (self.params.get('throttledratelimit') or 0):
                     # The speed must stay below the limit for 3 seconds
                     # This prevents raising error when the speed temporarily goes down
-                    if throttle_start is None:
-                        throttle_start = now
-                    elif now - throttle_start > 3:
+                    if ctx.throttle_start is None:
+                        ctx.throttle_start = now
+                    elif now - ctx.throttle_start > 3:
                         if ctx.stream is not None and ctx.tmpfilename != '-':
                             ctx.stream.close()
                         raise ThrottledDownload()
                 elif speed:
-                    throttle_start = None
+                    ctx.throttle_start = None
 
             if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len:
                 ctx.resume_len = byte_counter
index dd1ff512ebe8b7e5b92464ddd9804f6f42ef377c..3212f33285ead6264fc6408b4907d2471b4daa80 100644 (file)
@@ -926,9 +926,9 @@ def _real_extract(self, url):
         if season_id and not video_data:
             # Non-Bstation layout, read through episode list
             season_json = self._call_api(f'/web/v2/ogv/play/episodes?season_id={season_id}&platform=web', video_id)
-            video_data = next(
-                episode for episode in traverse_obj(season_json, ('sections', ..., 'episodes', ...), expected_type=dict)
-                if str(episode.get('episode_id')) == ep_id)
+            video_data = traverse_obj(season_json,
+                                      ('sections', ..., 'episodes', lambda _, v: str(v['episode_id']) == ep_id),
+                                      expected_type=dict, get_all=False)
         return self._extract_video_info(video_data, ep_id=ep_id, aid=aid)
 
 
index 31e7d7de6794afd5b3af17346a13e024f242cf71..8b9903774d7f81643ae4ea7ff05823ae0a90f74b 100644 (file)
@@ -245,10 +245,6 @@ class VrtNUIE(GigyaBaseIE):
             'upload_date': '20200727',
         },
         'skip': 'This video is only available for registered users',
-        'params': {
-            'username': '<snip>',
-            'password': '<snip>',
-        },
         'expected_warnings': ['is not a supported codec'],
     }, {
         # Only available via new API endpoint
@@ -264,10 +260,6 @@ class VrtNUIE(GigyaBaseIE):
             'episode_number': 5,
         },
         'skip': 'This video is only available for registered users',
-        'params': {
-            'username': '<snip>',
-            'password': '<snip>',
-        },
         'expected_warnings': ['Unable to download asset JSON', 'is not a supported codec', 'Unknown MIME type'],
     }]
     _NETRC_MACHINE = 'vrtnu'
index 81688eb547d228dd7d45ac4b6903f9a0e814bc88..e2605c1f4530f3b226131c10149c8a72b1bc7f38 100644 (file)
@@ -139,6 +139,8 @@ class InfoExtractor(object):
                                    for HDS - URL of the F4M manifest,
                                    for DASH - URL of the MPD manifest,
                                    for MSS - URL of the ISM manifest.
+                    * manifest_stream_number  (For internal use only)
+                                 The index of the stream in the manifest file
                     * ext        Will be calculated from URL if missing
                     * format     A human-readable description of the format
                                  ("mp4 container with h264/opus").
@@ -215,7 +217,7 @@ class InfoExtractor(object):
                                  (HTTP or RTMP) download. Boolean.
                     * has_drm    The format has DRM and cannot be downloaded. Boolean
                     * downloader_options  A dictionary of downloader options as
-                                 described in FileDownloader
+                                 described in FileDownloader (For internal use only)
                     RTMP formats can also have the additional fields: page_url,
                     app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn,
                     rtmp_protocol, rtmp_real_time
@@ -3684,9 +3686,9 @@ def _get_comments(self, *args, **kwargs):
     def _merge_subtitle_items(subtitle_list1, subtitle_list2):
         """ Merge subtitle items for one language. Items with duplicated URLs/data
         will be dropped. """
-        list1_data = set([item.get('url') or item['data'] for item in subtitle_list1])
+        list1_data = set((item.get('url'), item.get('data')) for item in subtitle_list1)
         ret = list(subtitle_list1)
-        ret.extend([item for item in subtitle_list2 if (item.get('url') or item['data']) not in list1_data])
+        ret.extend(item for item in subtitle_list2 if (item.get('url'), item.get('data')) not in list1_data)
         return ret
 
     @classmethod
index a7442d8f0d5b689c0756ea7f0c5f039ddb6ebe94..2fa61950c203b0aa76640a6af88b4961e802bdf3 100644 (file)
@@ -123,7 +123,7 @@ def _real_extract(self, url):
             self._login(display_id)
             webpage = self._download_webpage(url, display_id, note='Downloading video webpage')
         finally:
-            self._download_webpage('https://www.dropout.tv/logout', display_id, note='Logging out')
+            self._download_webpage('https://www.dropout.tv/logout', display_id, note='Logging out', fatal=False)
 
         embed_url = self._search_regex(r'embed_url:\s*["\'](.+?)["\']', webpage, 'embed url')
         thumbnail = self._og_search_thumbnail(webpage)
@@ -139,7 +139,7 @@ def _real_extract(self, url):
             '_type': 'url_transparent',
             'ie_key': VHXEmbedIE.ie_key(),
             'url': embed_url,
-            'id': self._search_regex(r'embed.vhx.tv/videos/(.+?)\?', embed_url, 'id'),
+            'id': self._search_regex(r'embed\.vhx\.tv/videos/(.+?)\?', embed_url, 'id'),
             'display_id': display_id,
             'title': title,
             'description': self._html_search_meta('description', webpage, fatal=False),
index 2deed585f1f684e6f2c0de6a19e3f97754dd798e..5e0e2facf3dce5858416ca72106e5c81d9345591 100644 (file)
@@ -397,8 +397,10 @@ def extract_metadata(webpage):
                 r'handleWithCustomApplyEach\(\s*ScheduledApplyEach\s*,\s*(\{.+?\})\s*\);', webpage)]
             post = traverse_obj(post_data, (
                 ..., 'require', ..., ..., ..., '__bbox', 'result', 'data'), expected_type=dict) or []
-            media = [m for m in traverse_obj(post, (..., 'attachments', ..., 'media'), expected_type=dict) or []
-                     if str(m.get('id')) == video_id and m.get('__typename') == 'Video']
+            media = traverse_obj(
+                post,
+                (..., 'attachments', ..., 'media', lambda _, m: str(m['id']) == video_id and m['__typename'] == 'Video'),
+                expected_type=dict)
             title = get_first(media, ('title', 'text'))
             description = get_first(media, ('creation_story', 'comet_sections', 'message', 'story', 'message', 'text'))
             uploader_data = get_first(media, 'owner') or get_first(post, ('node', 'actors', ...)) or {}
index 2c503e58176bd351958ce4ce305fe2cc92aa7bae..bd56ad289512294845f468a5042594c1509ecd42 100644 (file)
@@ -2523,7 +2523,7 @@ class GenericIE(InfoExtractor):
                 'title': 'Riku ja Tunna lähtevät peurajahtiin tv:stä tutun biologin kanssa – metsästysreissu huipentuu kasvissyöjän painajaiseen!',
                 'thumbnail': r're:^https?://.+\.jpg$',
                 'duration': 108,
-                'series' : 'Madventures Suomi',
+                'series': 'Madventures Suomi',
                 'description': 'md5:aa55b44bd06a1e337a6f1d0b46507381',
                 'categories': ['Matkailu', 'Elämäntyyli'],
                 'age_limit': 0,
@@ -3886,8 +3886,8 @@ def check_video(vurl):
             if RtmpIE.suitable(vurl):
                 return True
             vpath = compat_urlparse.urlparse(vurl).path
-            vext = determine_ext(vpath)
-            return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js', 'xml')
+            vext = determine_ext(vpath, None)
+            return vext not in (None, 'swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js', 'xml')
 
         def filter_video(urls):
             return list(filter(check_video, urls))
index 369141d6797b967dcb3ffdacd569697f094c5f28..b20681ad117d39520df6a0ff8d36cf9e6d4eec83 100644 (file)
@@ -194,7 +194,7 @@ def _extract_info(self, pc, mobile, i, referer):
                     cc_url = cc.get('webvttFileUrl')
                     if not cc_url:
                         continue
-                    lang = cc.get('languageCode') or self._search_regex(r'/[a-z]{2}\.vtt', cc_url, 'lang', default='en')
+                    lang = cc.get('languageCode') or self._search_regex(r'/([a-z]{2})\.vtt', cc_url, 'lang', default='en')
                     subtitles.setdefault(lang, []).append({
                         'url': cc_url,
                     })
index a5a1a01e09a17f3c64a6ce2e0000319d7a2987e1..4eb6ed070ad493f939c2dceb9cbf0dfa7586fa71 100644 (file)
@@ -469,7 +469,7 @@ def _get_subtitles(self, video_id, api_data, session_api_data):
         comment_user_key = traverse_obj(api_data, ('comment', 'keys', 'userKey'))
         user_id_str = session_api_data.get('serviceUserId')
 
-        thread_ids = [x for x in traverse_obj(api_data, ('comment', 'threads')) or [] if x['isActive']]
+        thread_ids = traverse_obj(api_data, ('comment', 'threads', lambda _, v: v['isActive']))
         raw_danmaku = self._extract_all_comments(video_id, thread_ids, user_id_str, comment_user_key)
         if not raw_danmaku:
             self.report_warning(f'Failed to get comments. {bug_reports_message()}')
index 6f8c32882e51d04cca243fc5981610bbfd046b09..c1d6c547725efd7ee2fdf1c6418386bcd9a84f87 100644 (file)
@@ -264,7 +264,7 @@ def extract_addr(addr, add_meta={}):
         return {
             'id': aweme_id,
             'title': aweme_detail.get('desc'),
-            'description': aweme_detail['desc'],
+            'description': aweme_detail.get('desc'),
             'view_count': int_or_none(stats_info.get('play_count')),
             'like_count': int_or_none(stats_info.get('digg_count')),
             'repost_count': int_or_none(stats_info.get('share_count')),
@@ -387,6 +387,9 @@ class TikTokIE(TikTokBaseIE):
             'like_count': int,
             'repost_count': int,
             'comment_count': int,
+            'artist': 'Ysrbeats',
+            'album': 'Lehanga',
+            'track': 'Lehanga',
         }
     }, {
         'url': 'https://www.tiktok.com/@patroxofficial/video/6742501081818877190?langCountry=en',
@@ -410,6 +413,8 @@ class TikTokIE(TikTokBaseIE):
             'like_count': int,
             'repost_count': int,
             'comment_count': int,
+            'artist': 'Evan Todd, Jessica Keenan Wynn, Alice Lee, Barrett Wilbert Weed & Jon Eidson',
+            'track': 'Big Fun',
         }
     }, {
         # Banned audio, only available on the app
@@ -463,7 +468,7 @@ class TikTokIE(TikTokBaseIE):
         'info_dict': {
             'id': '7059698374567611694',
             'ext': 'mp4',
-            'title': 'N/A',
+            'title': 'tiktok video #7059698374567611694',
             'description': '',
             'uploader': 'pokemonlife22',
             'creator': 'Pokemon',
@@ -480,7 +485,7 @@ class TikTokIE(TikTokBaseIE):
             'repost_count': int,
             'comment_count': int,
         },
-        'expected_warnings': ['Video not available']
+        'expected_warnings': ['Video not available', 'Creating a generic title']
     }, {
         # Auto-captions available
         'url': 'https://www.tiktok.com/@hankgreen1/video/7047596209028074758',
index a101af67e7f9c53d31da12afa9850ee7a4579f25..7d3966bf1a7435027fb0358e004559d8ac8ad953 100644 (file)
@@ -163,7 +163,6 @@ class YandexVideoPreviewIE(InfoExtractor):
             'thumbnail': 'https://i.mycdn.me/videoPreview?id=544866765315&type=37&idx=13&tkn=TY5qjLYZHxpmcnK8U2LgzYkgmaU&fn=external_8',
             'uploader_id': '481054701571',
             'title': 'LOFT - summer, summer, summer HD',
-            'manifest_stream_number': 0,
             'uploader': 'АРТЁМ КУДРОВ',
         },
     }, {  # youtube
index 485849ba99ec161b4254259e7a7574a1b0cd0526..017554c88be6acef3b83c9b2a128162a683b9fe1 100644 (file)
@@ -837,17 +837,20 @@ def _extract_video(self, renderer):
 
         uploader = self._get_text(renderer, 'ownerText', 'shortBylineText')
         channel_id = traverse_obj(
-            renderer, ('shortBylineText', 'runs', ..., 'navigationEndpoint', 'browseEndpoint', 'browseId'), expected_type=str, get_all=False)
+            renderer, ('shortBylineText', 'runs', ..., 'navigationEndpoint', 'browseEndpoint', 'browseId'),
+            expected_type=str, get_all=False)
         timestamp, time_text = self._extract_time_text(renderer, 'publishedTimeText')
         scheduled_timestamp = str_to_int(traverse_obj(renderer, ('upcomingEventData', 'startTime'), get_all=False))
         overlay_style = traverse_obj(
-            renderer, ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'style'), get_all=False, expected_type=str)
+            renderer, ('thumbnailOverlays', ..., 'thumbnailOverlayTimeStatusRenderer', 'style'),
+            get_all=False, expected_type=str)
         badges = self._extract_badges(renderer)
         thumbnails = self._extract_thumbnails(renderer, 'thumbnail')
         navigation_url = urljoin('https://www.youtube.com/', traverse_obj(
-            renderer, ('navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'), expected_type=str))
+            renderer, ('navigationEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'),
+            expected_type=str)) or ''
         url = f'https://www.youtube.com/watch?v={video_id}'
-        if overlay_style == 'SHORTS' or (navigation_url and '/shorts/' in navigation_url):
+        if overlay_style == 'SHORTS' or '/shorts/' in navigation_url:
             url = f'https://www.youtube.com/shorts/{video_id}'
 
         return {
@@ -862,7 +865,9 @@ def _extract_video(self, renderer):
             'uploader': uploader,
             'channel_id': channel_id,
             'thumbnails': thumbnails,
-            'upload_date': strftime_or_none(timestamp, '%Y%m%d') if self._configuration_arg('approximate_date', ie_key='youtubetab') else None,
+            'upload_date': (strftime_or_none(timestamp, '%Y%m%d')
+                            if self._configuration_arg('approximate_date', ie_key='youtubetab')
+                            else None),
             'live_status': ('is_upcoming' if scheduled_timestamp is not None
                             else 'was_live' if 'streamed' in time_text.lower()
                             else 'is_live' if overlay_style is not None and overlay_style == 'LIVE' or 'live now' in badges
index eb306898ab1c08eaf7e2c2e7e44fd812bf1d5fc6..06c613262b95cada4b82b1754b323a76760043ab 100644 (file)
@@ -163,6 +163,8 @@ def _set_from_options_callback(
         values = [process(value)] if delim is None else list(map(process, value.split(delim)[::-1]))
         while values:
             actual_val = val = values.pop()
+            if not val:
+                raise optparse.OptionValueError(f'Invalid {option.metavar} for {opt_str}: {value}')
             if val == 'all':
                 current.update(allowed_values)
             elif val == '-all':
@@ -1311,7 +1313,7 @@ def _dict_from_options_callback(
         '--audio-format', metavar='FORMAT', dest='audioformat', default='best',
         help=(
             'Specify audio format to convert the audio to when -x is used. Currently supported formats are: '
-            'best (default) or one of %s' % '|'.join(FFmpegExtractAudioPP.SUPPORTED_EXTS)))
+            'best (default) or one of %s' % ''.join(FFmpegExtractAudioPP.SUPPORTED_EXTS)))
     postproc.add_option(
         '--audio-quality', metavar='QUALITY',
         dest='audioquality', default='5',
@@ -1323,7 +1325,7 @@ def _dict_from_options_callback(
             'Remux the video into another container if necessary (currently supported: %s). '
             'If target container does not support the video/audio codec, remuxing will fail. '
             'You can specify multiple rules; Eg. "aac>m4a/mov>mp4/mkv" will remux aac to m4a, mov to mp4 '
-            'and anything else to mkv.' % '|'.join(FFmpegVideoRemuxerPP.SUPPORTED_EXTS)))
+            'and anything else to mkv.' % ''.join(FFmpegVideoRemuxerPP.SUPPORTED_EXTS)))
     postproc.add_option(
         '--recode-video',
         metavar='FORMAT', dest='recodevideo', default=None,
@@ -1438,7 +1440,7 @@ def _dict_from_options_callback(
             '"multi_video" (default; only when the videos form a single show). '
             'All the video files must have same codecs and number of streams to be concatable. '
             'The "pl_video:" prefix can be used with "--paths" and "--output" to '
-            'set the output filename for the split files. See "OUTPUT TEMPLATE" for details'))
+            'set the output filename for the concatenated files. See "OUTPUT TEMPLATE" for details'))
     postproc.add_option(
         '--fixup',
         metavar='POLICY', dest='fixup', default=None,
@@ -1486,20 +1488,20 @@ def _dict_from_options_callback(
         help=optparse.SUPPRESS_HELP)
     postproc.add_option(
         '--no-exec-before-download',
-        action='store_const', dest='exec_before_dl_cmd', const=[],
+        action='store_const', dest='exec_before_dl_cmd', const=None,
         help=optparse.SUPPRESS_HELP)
     postproc.add_option(
         '--convert-subs', '--convert-sub', '--convert-subtitles',
         metavar='FORMAT', dest='convertsubtitles', default=None,
         help=(
             'Convert the subtitles to another format (currently supported: %s) '
-            '(Alias: --convert-subtitles)' % '|'.join(FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS)))
+            '(Alias: --convert-subtitles)' % ''.join(FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS)))
     postproc.add_option(
         '--convert-thumbnails',
         metavar='FORMAT', dest='convertthumbnails', default=None,
         help=(
             'Convert the thumbnails to another format '
-            '(currently supported: %s) ' % '|'.join(FFmpegThumbnailsConvertorPP.SUPPORTED_EXTS)))
+            '(currently supported: %s) ' % ''.join(FFmpegThumbnailsConvertorPP.SUPPORTED_EXTS)))
     postproc.add_option(
         '--split-chapters', '--split-tracks',
         dest='split_chapters', action='store_true', default=False,
index 5216acbfb652a8253924d76690cc30abef9b2f09..64329028600e70fc9b7a6f91db060a36b0e56298 100644 (file)
@@ -500,6 +500,9 @@ def run(self, information):
         temp_path = new_path = prefix + sep + extension
 
         if new_path == path:
+            if acodec == 'copy':
+                self.to_screen(f'File is already in target format {self._preferredcodec}, skipping')
+                return [], information
             orig_path = prepend_extension(path, 'orig')
             temp_path = prepend_extension(path, 'temp')
         if (self._nopostoverwrites and os.path.exists(encodeFilename(new_path))
@@ -1122,6 +1125,11 @@ def __init__(self, downloader, only_multi_video=False):
         self._only_multi_video = only_multi_video
         super().__init__(downloader)
 
+    def _get_codecs(self, file):
+        codecs = traverse_obj(self.get_metadata_object(file), ('streams', ..., 'codec_name'))
+        self.write_debug(f'Codecs = {", ".join(codecs)}')
+        return tuple(codecs)
+
     def concat_files(self, in_files, out_file):
         if not self._downloader._ensure_dir_exists(out_file):
             return
@@ -1131,8 +1139,7 @@ def concat_files(self, in_files, out_file):
             os.replace(in_files[0], out_file)
             return []
 
-        codecs = [traverse_obj(self.get_metadata_object(file), ('streams', ..., 'codec_name')) for file in in_files]
-        if len(set(map(tuple, codecs))) > 1:
+        if len(set(map(self._get_codecs, in_files))) > 1:
             raise PostProcessingError(
                 'The files have different streams/codecs and cannot be concatenated. '
                 'Either select different formats or --recode-video them to a common format')
@@ -1146,7 +1153,7 @@ def run(self, info):
         entries = info.get('entries') or []
         if not any(entries) or (self._only_multi_video and info['_type'] != 'multi_video'):
             return [], info
-        elif any(len(entry) > 1 for entry in traverse_obj(entries, (..., 'requested_downloads')) or []):
+        elif traverse_obj(entries, (..., 'requested_downloads', lambda _, v: len(v) > 1)):
             raise PostProcessingError('Concatenation is not supported when downloading multiple separate formats')
 
         in_files = traverse_obj(entries, (..., 'requested_downloads', 0, 'filepath')) or []
index ce918750d4285e491bbe69b905e4fa8816b3abd1..6663583fcfe3d316fd27fdab62a2aa1917653d99 100644 (file)
@@ -1040,7 +1040,7 @@ def make_HTTPS_handler(params, **kwargs):
 
 
 def bug_reports_message(before=';'):
-    msg = ('please report this issue on  https://github.com/yt-dlp/yt-dlp , '
+    msg = ('please report this issue on  https://github.com/yt-dlp/yt-dlp/issues?q= , '
            'filling out the appropriate issue template. '
            'Confirm you are on the latest version using  yt-dlp -U')
 
@@ -2883,6 +2883,7 @@ def __getitem__(self, idx):
 
 
 class OnDemandPagedList(PagedList):
+    """Download pages until a page with less than maximum results"""
     def _getslice(self, start, end):
         for pagenum in itertools.count(start // self._pagesize):
             firstid = pagenum * self._pagesize
@@ -2922,6 +2923,7 @@ def _getslice(self, start, end):
 
 
 class InAdvancePagedList(PagedList):
+    """PagedList with total number of pages known in advance"""
     def __init__(self, pagefunc, pagecount, pagesize):
         PagedList.__init__(self, pagefunc, pagesize, True)
         self._pagecount = pagecount
@@ -3090,13 +3092,10 @@ def multipart_encode(data, boundary=None):
 
 
 def dict_get(d, key_or_keys, default=None, skip_false_values=True):
-    if isinstance(key_or_keys, (list, tuple)):
-        for key in key_or_keys:
-            if key not in d or d[key] is None or skip_false_values and not d[key]:
-                continue
-            return d[key]
-        return default
-    return d.get(key_or_keys, default)
+    for val in map(d.get, variadic(key_or_keys)):
+        if val is not None and (val or not skip_false_values):
+            return val
+    return default
 
 
 def try_call(*funcs, expected_type=None, args=[], kwargs={}):
@@ -3324,6 +3323,10 @@ def error_to_compat_str(err):
     return err_str
 
 
+def error_to_str(err):
+    return f'{type(err).__name__}: {err}'
+
+
 def mimetype2ext(mt):
     if mt is None:
         return None