]> jfr.im git - yt-dlp.git/blobdiff - youtube_dlc/YoutubeDL.py
[readme] Cleanup options
[yt-dlp.git] / youtube_dlc / YoutubeDL.py
index 2d3eacfebdbb29eea385c0e10cfb60a155355456..4242a5ef95724a31de04715587a3445dd301df1f 100644 (file)
@@ -58,6 +58,7 @@
     encode_compat_str,
     encodeFilename,
     error_to_compat_str,
+    ExistingVideoReached,
     expand_path,
     ExtractorError,
     format_bytes,
@@ -81,6 +82,7 @@
     register_socks_protocols,
     render_table,
     replace_extension,
+    RejectedVideoReached,
     SameFileError,
     sanitize_filename,
     sanitize_path,
@@ -230,9 +232,11 @@ class YoutubeDL(object):
     download_archive:  File name of a file where all downloads are recorded.
                        Videos already present in the file are not downloaded
                        again.
-    break_on_existing: Stop the download process after attempting to download a file that's
-                       in the archive.
-    cookiefile:        File name where cookies should be read from and dumped to.
+    break_on_existing: Stop the download process after attempting to download a
+                       file that is in the archive.
+    break_on_reject:   Stop the download process when encountering a video that
+                       has been filtered out.
+    cookiefile:        File name where cookies should be read from and dumped to
     nocheckcertificate:Do not verify SSL certificates
     prefer_insecure:   Use HTTP instead of HTTPS to retrieve information.
                        At the moment, this is only supported by YouTube.
@@ -366,6 +370,8 @@ class YoutubeDL(object):
     _pps = []
     _download_retcode = None
     _num_downloads = None
+    _playlist_level = 0
+    _playlist_urls = set()
     _screen_file = None
 
     def __init__(self, params=None, auto_init=True):
@@ -797,44 +803,53 @@ def prepare_filename(self, info_dict):
     def _match_entry(self, info_dict, incomplete):
         """ Returns None if the file should be downloaded """
 
-        video_title = info_dict.get('title', info_dict.get('id', 'video'))
-        if 'title' in info_dict:
-            # This can happen when we're just evaluating the playlist
-            title = info_dict['title']
-            matchtitle = self.params.get('matchtitle', False)
-            if matchtitle:
-                if not re.search(matchtitle, title, re.IGNORECASE):
-                    return '"' + title + '" title did not match pattern "' + matchtitle + '"'
-            rejecttitle = self.params.get('rejecttitle', False)
-            if rejecttitle:
-                if re.search(rejecttitle, title, re.IGNORECASE):
-                    return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
-        date = info_dict.get('upload_date')
-        if date is not None:
-            dateRange = self.params.get('daterange', DateRange())
-            if date not in dateRange:
-                return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
-        view_count = info_dict.get('view_count')
-        if view_count is not None:
-            min_views = self.params.get('min_views')
-            if min_views is not None and view_count < min_views:
-                return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
-            max_views = self.params.get('max_views')
-            if max_views is not None and view_count > max_views:
-                return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
-        if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
-            return 'Skipping "%s" because it is age restricted' % video_title
-        if self.in_download_archive(info_dict):
-            return '%s has already been recorded in archive' % video_title
-
-        if not incomplete:
-            match_filter = self.params.get('match_filter')
-            if match_filter is not None:
-                ret = match_filter(info_dict)
-                if ret is not None:
-                    return ret
-
-        return None
+        def check_filter():
+            video_title = info_dict.get('title', info_dict.get('id', 'video'))
+            if 'title' in info_dict:
+                # This can happen when we're just evaluating the playlist
+                title = info_dict['title']
+                matchtitle = self.params.get('matchtitle', False)
+                if matchtitle:
+                    if not re.search(matchtitle, title, re.IGNORECASE):
+                        return '"' + title + '" title did not match pattern "' + matchtitle + '"'
+                rejecttitle = self.params.get('rejecttitle', False)
+                if rejecttitle:
+                    if re.search(rejecttitle, title, re.IGNORECASE):
+                        return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
+            date = info_dict.get('upload_date')
+            if date is not None:
+                dateRange = self.params.get('daterange', DateRange())
+                if date not in dateRange:
+                    return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
+            view_count = info_dict.get('view_count')
+            if view_count is not None:
+                min_views = self.params.get('min_views')
+                if min_views is not None and view_count < min_views:
+                    return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
+                max_views = self.params.get('max_views')
+                if max_views is not None and view_count > max_views:
+                    return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
+            if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
+                return 'Skipping "%s" because it is age restricted' % video_title
+            if self.in_download_archive(info_dict):
+                return '%s has already been recorded in archive' % video_title
+
+            if not incomplete:
+                match_filter = self.params.get('match_filter')
+                if match_filter is not None:
+                    ret = match_filter(info_dict)
+                    if ret is not None:
+                        return ret
+            return None
+
+        reason = check_filter()
+        if reason is not None:
+            self.to_screen('[download] ' + reason)
+            if reason.endswith('has already been recorded in the archive') and self.params.get('break_on_existing', False):
+                raise ExistingVideoReached()
+            elif self.params.get('break_on_reject', False):
+                raise RejectedVideoReached()
+        return reason
 
     @staticmethod
     def add_extra_info(info_dict, extra_info):
@@ -895,7 +910,7 @@ def wrapper(self, *args, **kwargs):
                 self.report_error(msg)
             except ExtractorError as e:  # An error we somewhat expected
                 self.report_error(compat_str(e), e.format_traceback())
-            except MaxDownloadsReached:
+            except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
                 raise
             except Exception as e:
                 if self.params.get('ignoreerrors', False):
@@ -1000,119 +1015,23 @@ def process_ie_result(self, ie_result, download=True, extra_info={}):
             return self.process_ie_result(
                 new_result, download=download, extra_info=extra_info)
         elif result_type in ('playlist', 'multi_video'):
-            # We process each entry in the playlist
-            playlist = ie_result.get('title') or ie_result.get('id')
-            self.to_screen('[download] Downloading playlist: %s' % playlist)
-
-            playlist_results = []
-
-            playliststart = self.params.get('playliststart', 1) - 1
-            playlistend = self.params.get('playlistend')
-            # For backwards compatibility, interpret -1 as whole list
-            if playlistend == -1:
-                playlistend = None
-
-            playlistitems_str = self.params.get('playlist_items')
-            playlistitems = None
-            if playlistitems_str is not None:
-                def iter_playlistitems(format):
-                    for string_segment in format.split(','):
-                        if '-' in string_segment:
-                            start, end = string_segment.split('-')
-                            for item in range(int(start), int(end) + 1):
-                                yield int(item)
-                        else:
-                            yield int(string_segment)
-                playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
-
-            ie_entries = ie_result['entries']
-
-            def make_playlistitems_entries(list_ie_entries):
-                num_entries = len(list_ie_entries)
-                return [
-                    list_ie_entries[i - 1] for i in playlistitems
-                    if -num_entries <= i - 1 < num_entries]
-
-            def report_download(num_entries):
+            # Protect from infinite recursion due to recursively nested playlists
+            # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
+            webpage_url = ie_result['webpage_url']
+            if webpage_url in self._playlist_urls:
                 self.to_screen(
-                    '[%s] playlist %s: Downloading %d videos' %
-                    (ie_result['extractor'], playlist, num_entries))
+                    '[download] Skipping already downloaded playlist: %s'
+                    % ie_result.get('title') or ie_result.get('id'))
+                return
 
-            if isinstance(ie_entries, list):
-                n_all_entries = len(ie_entries)
-                if playlistitems:
-                    entries = make_playlistitems_entries(ie_entries)
-                else:
-                    entries = ie_entries[playliststart:playlistend]
-                n_entries = len(entries)
-                self.to_screen(
-                    '[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
-                    (ie_result['extractor'], playlist, n_all_entries, n_entries))
-            elif isinstance(ie_entries, PagedList):
-                if playlistitems:
-                    entries = []
-                    for item in playlistitems:
-                        entries.extend(ie_entries.getslice(
-                            item - 1, item
-                        ))
-                else:
-                    entries = ie_entries.getslice(
-                        playliststart, playlistend)
-                n_entries = len(entries)
-                report_download(n_entries)
-            else:  # iterable
-                if playlistitems:
-                    entries = make_playlistitems_entries(list(itertools.islice(
-                        ie_entries, 0, max(playlistitems))))
-                else:
-                    entries = list(itertools.islice(
-                        ie_entries, playliststart, playlistend))
-                n_entries = len(entries)
-                report_download(n_entries)
-
-            if self.params.get('playlistreverse', False):
-                entries = entries[::-1]
-
-            if self.params.get('playlistrandom', False):
-                random.shuffle(entries)
-
-            x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
-
-            for i, entry in enumerate(entries, 1):
-                self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
-                # This __x_forwarded_for_ip thing is a bit ugly but requires
-                # minimal changes
-                if x_forwarded_for:
-                    entry['__x_forwarded_for_ip'] = x_forwarded_for
-                extra = {
-                    'n_entries': n_entries,
-                    'playlist': playlist,
-                    'playlist_id': ie_result.get('id'),
-                    'playlist_title': ie_result.get('title'),
-                    'playlist_uploader': ie_result.get('uploader'),
-                    'playlist_uploader_id': ie_result.get('uploader_id'),
-                    'playlist_index': playlistitems[i - 1] if playlistitems else i + playliststart,
-                    'extractor': ie_result['extractor'],
-                    'webpage_url': ie_result['webpage_url'],
-                    'webpage_url_basename': url_basename(ie_result['webpage_url']),
-                    'extractor_key': ie_result['extractor_key'],
-                }
-
-                reason = self._match_entry(entry, incomplete=True)
-                if reason is not None:
-                    if reason.endswith('has already been recorded in the archive') and self.params.get('break_on_existing'):
-                        print('[download] tried downloading a file that\'s already in the archive, stopping since --break-on-existing is set.')
-                        break
-                    else:
-                        self.to_screen('[download] ' + reason)
-                        continue
-
-                entry_result = self.__process_iterable_entry(entry, download, extra)
-                # TODO: skip failed (empty) entries?
-                playlist_results.append(entry_result)
-            ie_result['entries'] = playlist_results
-            self.to_screen('[download] Finished downloading playlist: %s' % playlist)
-            return ie_result
+            self._playlist_level += 1
+            self._playlist_urls.add(webpage_url)
+            try:
+                return self.__process_playlist(ie_result, download)
+            finally:
+                self._playlist_level -= 1
+                if not self._playlist_level:
+                    self._playlist_urls.clear()
         elif result_type == 'compat_list':
             self.report_warning(
                 'Extractor %s returned a compat_list result. '
@@ -1137,6 +1056,115 @@ def _fixup(r):
         else:
             raise Exception('Invalid result type: %s' % result_type)
 
+    def __process_playlist(self, ie_result, download):
+        # We process each entry in the playlist
+        playlist = ie_result.get('title') or ie_result.get('id')
+        self.to_screen('[download] Downloading playlist: %s' % playlist)
+
+        playlist_results = []
+
+        playliststart = self.params.get('playliststart', 1) - 1
+        playlistend = self.params.get('playlistend')
+        # For backwards compatibility, interpret -1 as whole list
+        if playlistend == -1:
+            playlistend = None
+
+        playlistitems_str = self.params.get('playlist_items')
+        playlistitems = None
+        if playlistitems_str is not None:
+            def iter_playlistitems(format):
+                for string_segment in format.split(','):
+                    if '-' in string_segment:
+                        start, end = string_segment.split('-')
+                        for item in range(int(start), int(end) + 1):
+                            yield int(item)
+                    else:
+                        yield int(string_segment)
+            playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
+
+        ie_entries = ie_result['entries']
+
+        def make_playlistitems_entries(list_ie_entries):
+            num_entries = len(list_ie_entries)
+            return [
+                list_ie_entries[i - 1] for i in playlistitems
+                if -num_entries <= i - 1 < num_entries]
+
+        def report_download(num_entries):
+            self.to_screen(
+                '[%s] playlist %s: Downloading %d videos' %
+                (ie_result['extractor'], playlist, num_entries))
+
+        if isinstance(ie_entries, list):
+            n_all_entries = len(ie_entries)
+            if playlistitems:
+                entries = make_playlistitems_entries(ie_entries)
+            else:
+                entries = ie_entries[playliststart:playlistend]
+            n_entries = len(entries)
+            self.to_screen(
+                '[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
+                (ie_result['extractor'], playlist, n_all_entries, n_entries))
+        elif isinstance(ie_entries, PagedList):
+            if playlistitems:
+                entries = []
+                for item in playlistitems:
+                    entries.extend(ie_entries.getslice(
+                        item - 1, item
+                    ))
+            else:
+                entries = ie_entries.getslice(
+                    playliststart, playlistend)
+            n_entries = len(entries)
+            report_download(n_entries)
+        else:  # iterable
+            if playlistitems:
+                entries = make_playlistitems_entries(list(itertools.islice(
+                    ie_entries, 0, max(playlistitems))))
+            else:
+                entries = list(itertools.islice(
+                    ie_entries, playliststart, playlistend))
+            n_entries = len(entries)
+            report_download(n_entries)
+
+        if self.params.get('playlistreverse', False):
+            entries = entries[::-1]
+
+        if self.params.get('playlistrandom', False):
+            random.shuffle(entries)
+
+        x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
+
+        for i, entry in enumerate(entries, 1):
+            self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
+            # This __x_forwarded_for_ip thing is a bit ugly but requires
+            # minimal changes
+            if x_forwarded_for:
+                entry['__x_forwarded_for_ip'] = x_forwarded_for
+            extra = {
+                'n_entries': n_entries,
+                'playlist': playlist,
+                'playlist_id': ie_result.get('id'),
+                'playlist_title': ie_result.get('title'),
+                'playlist_uploader': ie_result.get('uploader'),
+                'playlist_uploader_id': ie_result.get('uploader_id'),
+                'playlist_index': playlistitems[i - 1] if playlistitems else i + playliststart,
+                'extractor': ie_result['extractor'],
+                'webpage_url': ie_result['webpage_url'],
+                'webpage_url_basename': url_basename(ie_result['webpage_url']),
+                'extractor_key': ie_result['extractor_key'],
+            }
+
+            if self._match_entry(entry, incomplete=True) is not None:
+                continue
+
+            entry_result = self.__process_iterable_entry(entry, download, extra)
+            # TODO: skip failed (empty) entries?
+            playlist_results.append(entry_result)
+        ie_result['entries'] = playlist_results
+        self.to_screen('[download] Finished downloading playlist: %s' % playlist)
+        return ie_result
+
     @__handle_extraction_exceptions
     def __process_iterable_entry(self, entry, download, extra_info):
         return self.process_ie_result(
@@ -1870,9 +1898,7 @@ def process_info(self, info_dict):
         if 'format' not in info_dict:
             info_dict['format'] = info_dict['ext']
 
-        reason = self._match_entry(info_dict, incomplete=False)
-        if reason is not None:
-            self.to_screen('[download] ' + reason)
+        if self._match_entry(info_dict, incomplete=False) is not None:
             return
 
         self._num_downloads += 1
@@ -2260,7 +2286,13 @@ def download(self, url_list):
             except UnavailableVideoError:
                 self.report_error('unable to download video')
             except MaxDownloadsReached:
-                self.to_screen('[info] Maximum number of downloaded files reached.')
+                self.to_screen('[info] Maximum number of downloaded files reached')
+                raise
+            except ExistingVideoReached:
+                self.to_screen('[info] Encountered a file that is already in the archive, stopping due to --break-on-existing')
+                raise
+            except RejectedVideoReached:
+                self.to_screen('[info] Encountered a file that did not match filter, stopping due to --break-on-reject')
                 raise
             else:
                 if self.params.get('dump_single_json', False):