]> jfr.im git - yt-dlp.git/blobdiff - yt_dlp/YoutubeDL.py
[utils] Improve `traverse_obj`
[yt-dlp.git] / yt_dlp / YoutubeDL.py
index b4ac1f00adc4c55b4ec6d9318b2b726c9939e515..3dfab69b2dcf20843922ae1eff2bacbdd9979009 100644 (file)
     str_or_none,
     strftime_or_none,
     subtitles_filename,
+    ThrottledDownload,
     to_high_limit_path,
     traverse_obj,
+    try_get,
     UnavailableVideoError,
     url_basename,
     version_tuple,
 )
 from .downloader.rtmp import rtmpdump_version
 from .postprocessor import (
+    get_postprocessor,
+    FFmpegFixupDurationPP,
     FFmpegFixupM3u8PP,
     FFmpegFixupM4aPP,
     FFmpegFixupStretchedPP,
+    FFmpegFixupTimestampPP,
     FFmpegMergerPP,
     FFmpegPostProcessor,
-    # FFmpegSubtitlesConvertorPP,
-    get_postprocessor,
     MoveFilesAfterDownloadPP,
 )
 from .version import __version__
@@ -206,6 +209,9 @@ class YoutubeDL(object):
                        into a single file
     allow_multiple_audio_streams:   Allow multiple audio streams to be merged
                        into a single file
+    check_formats      Whether to test if the formats are downloadable.
+                       Can be True (check all), False (check none)
+                       or None (check only if requested by extractor)
     paths:             Dictionary of output paths. The allowed keys are 'home'
                        'temp' and the keys of OUTTMPL_TYPES (in utils.py)
     outtmpl:           Dictionary of templates for output names. Allowed keys
@@ -389,18 +395,15 @@ class YoutubeDL(object):
                        if True, otherwise use ffmpeg/avconv if False, otherwise
                        use downloader suggested by extractor if None.
     compat_opts:       Compatibility options. See "Differences in default behavior".
-                       Note that only format-sort, format-spec, no-live-chat,
-                       no-attach-info-json, playlist-index, list-formats,
-                       no-direct-merge, embed-thumbnail-atomicparsley,
-                       no-youtube-unavailable-videos, no-youtube-channel-redirect,
-                       works when used via the API
+                       The following options do not work when used through the API:
+                       filename, abort-on-error, multistreams, no-live-chat,
+                       no-playlist-metafiles. Refer __init__.py for their implementation
 
     The following parameters are not used by YoutubeDL itself, they are used by
     the downloader (see yt_dlp/downloader/common.py):
-    nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
-    noresizebuffer, retries, continuedl, noprogress, consoletitle,
-    xattr_set_filesize, external_downloader_args, hls_use_mpegts,
-    http_chunk_size.
+    nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
+    max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle,
+    xattr_set_filesize, external_downloader_args, hls_use_mpegts, http_chunk_size.
 
     The following options are used by the post processors:
     prefer_ffmpeg:     If False, use avconv instead of ffmpeg if both are available,
@@ -418,11 +421,16 @@ class YoutubeDL(object):
     dynamic_mpd:       Whether to process dynamic DASH manifests (default: True)
     hls_split_discontinuity: Split HLS playlists to different formats at
                        discontinuities such as ad breaks (default: False)
-    youtube_include_dash_manifest: If True (default), DASH manifests and related
+    extractor_args:    A dictionary of arguments to be passed to the extractors.
+                       See "EXTRACTOR ARGUMENTS" for details.
+                       Eg: {'youtube': {'skip': ['dash', 'hls']}}
+    youtube_include_dash_manifest: Deprecated - Use extractor_args instead.
+                       If True (default), DASH manifests and related
                        data will be downloaded and processed by extractor.
                        You can reduce network I/O by disabling it if you don't
                        care about DASH. (only for youtube)
-    youtube_include_hls_manifest: If True (default), HLS manifests and related
+    youtube_include_hls_manifest: Deprecated - Use extractor_args instead.
+                       If True (default), HLS manifests and related
                        data will be downloaded and processed by extractor.
                        You can reduce network I/O by disabling it if you don't
                        care about HLS. (only for youtube)
@@ -442,7 +450,7 @@ class YoutubeDL(object):
     params = None
     _ies = []
     _pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
-    __prepare_filename_warned = False
+    _reported_warnings = set()
     _first_webpage_request = True
     _download_retcode = None
     _num_downloads = None
@@ -457,7 +465,7 @@ def __init__(self, params=None, auto_init=True):
         self._ies = []
         self._ies_instances = {}
         self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
-        self.__prepare_filename_warned = False
+        self._reported_warnings = set()
         self._first_webpage_request = True
         self._post_hooks = []
         self._progress_hooks = []
@@ -747,11 +755,15 @@ def to_screen(self, message, skip_eol=False):
         self.to_stdout(
             message, skip_eol, quiet=self.params.get('quiet', False))
 
-    def report_warning(self, message):
+    def report_warning(self, message, only_once=False):
         '''
         Print the message to stderr, it will be prefixed with 'WARNING:'
         If stderr is a tty file the 'WARNING:' will be colored
         '''
+        if only_once:
+            if message in self._reported_warnings:
+                return
+            self._reported_warnings.add(message)
         if self.params.get('logger') is not None:
             self.params['logger'].warning(message)
         else:
@@ -1009,13 +1021,13 @@ def prepare_filename(self, info_dict, dir_type='', warn=False):
 
         filename = self._prepare_filename(info_dict, dir_type or 'default')
 
-        if warn and not self.__prepare_filename_warned:
+        if warn:
             if not self.params.get('paths'):
                 pass
             elif filename == '-':
-                self.report_warning('--paths is ignored when an outputting to stdout')
+                self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
             elif os.path.isabs(filename):
-                self.report_warning('--paths is ignored since an absolute path is given in output template')
+                self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
             self.__prepare_filename_warned = True
         if filename == '-' or not filename:
             return filename
@@ -1144,6 +1156,10 @@ def wrapper(self, *args, **kwargs):
                 self.report_error(msg)
             except ExtractorError as e:  # An error we somewhat expected
                 self.report_error(compat_str(e), e.format_traceback())
+            except ThrottledDownload:
+                self.to_stderr('\r')
+                self.report_warning('The download speed is below throttle limit. Re-extracting data')
+                return wrapper(self, *args, **kwargs)
             except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
                 raise
             except Exception as e:
@@ -1171,13 +1187,17 @@ def __extract_info(self, url, ie, download, extra_info, process):
             return ie_result
 
     def add_default_extra_info(self, ie_result, ie, url):
-        self.add_extra_info(ie_result, {
-            'extractor': ie.IE_NAME,
-            'webpage_url': url,
-            'original_url': url,
-            'webpage_url_basename': url_basename(url),
-            'extractor_key': ie.ie_key(),
-        })
+        if url is not None:
+            self.add_extra_info(ie_result, {
+                'webpage_url': url,
+                'original_url': url,
+                'webpage_url_basename': url_basename(url),
+            })
+        if ie is not None:
+            self.add_extra_info(ie_result, {
+                'extractor': ie.IE_NAME,
+                'extractor_key': ie.ie_key(),
+            })
 
     def process_ie_result(self, ie_result, download=True, extra_info={}):
         """
@@ -1196,8 +1216,8 @@ def process_ie_result(self, ie_result, download=True, extra_info={}):
                     or extract_flat is True):
                 info_copy = ie_result.copy()
                 self.add_extra_info(info_copy, extra_info)
-                self.add_default_extra_info(
-                    info_copy, self.get_info_extractor(ie_result.get('ie_key')), ie_result['url'])
+                ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
+                self.add_default_extra_info(info_copy, ie, ie_result['url'])
                 self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
                 return ie_result
 
@@ -1347,13 +1367,18 @@ def iter_playlistitems(format):
         if not isinstance(ie_entries, (list, PagedList)):
             ie_entries = LazyList(ie_entries)
 
+        def get_entry(i):
+            return YoutubeDL.__handle_extraction_exceptions(
+                lambda self, i: ie_entries[i - 1]
+            )(self, i)
+
         entries = []
         for i in playlistitems or itertools.count(playliststart):
             if playlistitems is None and playlistend is not None and playlistend < i:
                 break
             entry = None
             try:
-                entry = ie_entries[i - 1]
+                entry = get_entry(i)
                 if entry is None:
                     raise EntryNotInPlaylist()
             except (IndexError, EntryNotInPlaylist):
@@ -1743,6 +1768,9 @@ def _merge(formats_pair):
             return new_dict
 
         def _check_formats(formats):
+            if not check_formats:
+                yield from formats
+                return
             for f in formats:
                 self.to_screen('[info] Testing format %s' % f['format_id'])
                 temp_file = tempfile.NamedTemporaryFile(
@@ -1750,16 +1778,16 @@ def _check_formats(formats):
                     dir=self.get_output_path('temp') or None)
                 temp_file.close()
                 try:
-                    dl, _ = self.dl(temp_file.name, f, test=True)
-                except (ExtractorError, IOError, OSError, ValueError) + network_exceptions:
-                    dl = False
+                    success, _ = self.dl(temp_file.name, f, test=True)
+                except (DownloadError, IOError, OSError, ValueError) + network_exceptions:
+                    success = False
                 finally:
                     if os.path.exists(temp_file.name):
                         try:
                             os.remove(temp_file.name)
                         except OSError:
                             self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
-                if dl:
+                if success:
                     yield f
                 else:
                     self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
@@ -1770,8 +1798,7 @@ def _build_selector_function(selector):
 
                 def selector_function(ctx):
                     for f in fs:
-                        for format in f(ctx):
-                            yield format
+                        yield from f(ctx)
                 return selector_function
 
             elif selector.type == GROUP:  # ()
@@ -1787,22 +1814,24 @@ def selector_function(ctx):
                             return picked_formats
                     return []
 
+            elif selector.type == MERGE:  # +
+                selector_1, selector_2 = map(_build_selector_function, selector.selector)
+
+                def selector_function(ctx):
+                    for pair in itertools.product(
+                            selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
+                        yield _merge(pair)
+
             elif selector.type == SINGLE:  # atom
                 format_spec = selector.selector or 'best'
 
                 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
                 if format_spec == 'all':
                     def selector_function(ctx):
-                        formats = list(ctx['formats'])
-                        if check_formats:
-                            formats = _check_formats(formats)
-                        for f in formats:
-                            yield f
+                        yield from _check_formats(ctx['formats'])
                 elif format_spec == 'mergeall':
                     def selector_function(ctx):
-                        formats = ctx['formats']
-                        if check_formats:
-                            formats = list(_check_formats(formats))
+                        formats = list(_check_formats(ctx['formats']))
                         if not formats:
                             return
                         merged_format = formats[-1]
@@ -1840,29 +1869,17 @@ def selector_function(ctx):
 
                     def selector_function(ctx):
                         formats = list(ctx['formats'])
-                        if not formats:
-                            return
                         matches = list(filter(filter_f, formats)) if filter_f is not None else formats
                         if format_fallback and ctx['incomplete_formats'] and not matches:
                             # for extractors with incomplete formats (audio only (soundcloud)
                             # or video only (imgur)) best/worst will fallback to
                             # best/worst {video,audio}-only format
                             matches = formats
-                        if format_reverse:
-                            matches = matches[::-1]
-                        if check_formats:
-                            matches = list(itertools.islice(_check_formats(matches), format_idx))
-                        n = len(matches)
-                        if -n <= format_idx - 1 < n:
+                        matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
+                        try:
                             yield matches[format_idx - 1]
-
-            elif selector.type == MERGE:        # +
-                selector_1, selector_2 = map(_build_selector_function, selector.selector)
-
-                def selector_function(ctx):
-                    for pair in itertools.product(
-                            selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
-                        yield _merge(pair)
+                        except IndexError:
+                            return
 
             filters = [self._build_format_filter(f) for f in selector.filters]
 
@@ -1939,15 +1956,27 @@ def _sanitize_thumbnails(self, info_dict):
                 t.get('id') if t.get('id') is not None else '',
                 t.get('url')))
 
-            def test_thumbnail(t):
-                self.to_screen('[info] Testing thumbnail %s' % t['id'])
-                try:
-                    self.urlopen(HEADRequest(t['url']))
-                except network_exceptions as err:
-                    self.to_screen('[info] Unable to connect to thumbnail %s URL "%s" - %s. Skipping...' % (
-                        t['id'], t['url'], error_to_compat_str(err)))
-                    return False
-                return True
+            def thumbnail_tester():
+                if self.params.get('check_formats'):
+                    test_all = True
+                    to_screen = lambda msg: self.to_screen(f'[info] {msg}')
+                else:
+                    test_all = False
+                    to_screen = self.write_debug
+
+                def test_thumbnail(t):
+                    if not test_all and not t.get('_test_url'):
+                        return True
+                    to_screen('Testing thumbnail %s' % t['id'])
+                    try:
+                        self.urlopen(HEADRequest(t['url']))
+                    except network_exceptions as err:
+                        to_screen('Unable to connect to thumbnail %s URL "%s" - %s. Skipping...' % (
+                            t['id'], t['url'], error_to_compat_str(err)))
+                        return False
+                    return True
+
+                return test_thumbnail
 
             for i, t in enumerate(thumbnails):
                 if t.get('id') is None:
@@ -1955,8 +1984,11 @@ def test_thumbnail(t):
                 if t.get('width') and t.get('height'):
                     t['resolution'] = '%dx%d' % (t['width'], t['height'])
                 t['url'] = sanitize_url(t['url'])
-            if self.params.get('check_formats'):
-                info_dict['thumbnails'] = reversed(LazyList(filter(test_thumbnail, thumbnails[::-1])))
+
+            if self.params.get('check_formats') is not False:
+                info_dict['thumbnails'] = LazyList(filter(thumbnail_tester(), thumbnails[::-1])).reverse()
+            else:
+                info_dict['thumbnails'] = thumbnails
 
     def process_video_result(self, info_dict, download=True):
         assert info_dict.get('_type', 'video') == 'video'
@@ -1996,10 +2028,6 @@ def sanitize_numeric_fields(info):
 
         self._sanitize_thumbnails(info_dict)
 
-        if self.params.get('list_thumbnails'):
-            self.list_thumbnails(info_dict)
-            return
-
         thumbnail = info_dict.get('thumbnail')
         thumbnails = info_dict.get('thumbnails')
         if thumbnail:
@@ -2042,13 +2070,6 @@ def sanitize_numeric_fields(info):
         automatic_captions = info_dict.get('automatic_captions')
         subtitles = info_dict.get('subtitles')
 
-        if self.params.get('listsubtitles', False):
-            if 'automatic_captions' in info_dict:
-                self.list_subtitles(
-                    info_dict['id'], automatic_captions, 'automatic captions')
-            self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
-            return
-
         info_dict['requested_subtitles'] = self.process_subtitles(
             info_dict['id'], subtitles, automatic_captions)
 
@@ -2136,10 +2157,20 @@ def is_wellformed(f):
 
         info_dict, _ = self.pre_process(info_dict)
 
-        if self.params.get('listformats'):
-            if not info_dict.get('formats'):
-                raise ExtractorError('No video formats found', expected=True)
-            self.list_formats(info_dict)
+        list_only = self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles')
+        if list_only:
+            self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
+            if self.params.get('list_thumbnails'):
+                self.list_thumbnails(info_dict)
+            if self.params.get('listformats'):
+                if not info_dict.get('formats'):
+                    raise ExtractorError('No video formats found', expected=True)
+                self.list_formats(info_dict)
+            if self.params.get('listsubtitles'):
+                if 'automatic_captions' in info_dict:
+                    self.list_subtitles(
+                        info_dict['id'], automatic_captions, 'automatic captions')
+                self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
             return
 
         format_selector = self.format_selector
@@ -2180,6 +2211,8 @@ def is_wellformed(f):
                 raise ExtractorError('Requested format is not available', expected=True)
             else:
                 self.report_warning('Requested format is not available')
+                # Process what we can, even without any available formats.
+                self.process_info(dict(info_dict))
         elif download:
             self.to_screen(
                 '[info] %s: Downloading %d format(s): %s' % (
@@ -2344,7 +2377,7 @@ def process_info(self, info_dict):
         # TODO: backward compatibility, to be removed
         info_dict['fulltitle'] = info_dict['title']
 
-        if 'format' not in info_dict:
+        if 'format' not in info_dict and 'ext' in info_dict:
             info_dict['format'] = info_dict['ext']
 
         if self._match_entry(info_dict) is not None:
@@ -2359,7 +2392,7 @@ def process_info(self, info_dict):
         files_to_move = {}
 
         # Forced printings
-        self.__forced_printings(info_dict, full_filename, incomplete=False)
+        self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
 
         if self.params.get('simulate', False):
             if self.params.get('force_write_download_archive', False):
@@ -2584,17 +2617,10 @@ def compatible_formats(formats):
 
                     requested_formats = info_dict['requested_formats']
                     old_ext = info_dict['ext']
-                    if self.params.get('merge_output_format') is None:
-                        if not compatible_formats(requested_formats):
-                            info_dict['ext'] = 'mkv'
-                            self.report_warning(
-                                'Requested formats are incompatible for merge and will be merged into mkv.')
-                        if (info_dict['ext'] == 'webm'
-                                and self.params.get('writethumbnail', False)
-                                and info_dict.get('thumbnails')):
-                            info_dict['ext'] = 'mkv'
-                            self.report_warning(
-                                'webm doesn\'t support embedding a thumbnail, mkv will be used.')
+                    if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
+                        info_dict['ext'] = 'mkv'
+                        self.report_warning(
+                            'Requested formats are incompatible for merge and will be merged into mkv.')
 
                     def correct_ext(filename):
                         filename_real_ext = os.path.splitext(filename)[1][1:]
@@ -2723,6 +2749,8 @@ def ffmpeg_fixup(cndn, msg, cls):
                     downloader = (get_suitable_downloader(info_dict, self.params).__name__
                                   if 'protocol' in info_dict else None)
                     ffmpeg_fixup(downloader == 'HlsFD', 'malformed AAC bitstream detected', FFmpegFixupM3u8PP)
+                    ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed timestamps detected', FFmpegFixupTimestampPP)
+                    ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'malformed duration detected', FFmpegFixupDurationPP)
 
                 fixup()
                 try:
@@ -2784,7 +2812,7 @@ def download_with_info_file(self, info_filename):
             info = self.filter_requested_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
         try:
             self.process_ie_result(info, download=True)
-        except (DownloadError, EntryNotInPlaylist):
+        except (DownloadError, EntryNotInPlaylist, ThrottledDownload):
             webpage_url = info.get('webpage_url')
             if webpage_url is not None:
                 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
@@ -2988,22 +3016,11 @@ def _format_note(self, fdict):
             res += '~' + format_bytes(fdict['filesize_approx'])
         return res
 
-    def _format_note_table(self, f):
-        def join_fields(*vargs):
-            return ', '.join((val for val in vargs if val != ''))
-
-        return join_fields(
-            'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
-            format_field(f, 'language', '[%s]'),
-            format_field(f, 'format_note'),
-            format_field(f, 'container', ignore=(None, f.get('ext'))),
-            format_field(f, 'asr', '%5dHz'))
-
     def list_formats(self, info_dict):
         formats = info_dict.get('formats', [info_dict])
         new_format = (
             'list-formats' not in self.params.get('compat_opts', [])
-            and self.params.get('list_formats_as_table', True) is not False)
+            and self.params.get('listformats_table', True) is not False)
         if new_format:
             table = [
                 [
@@ -3021,11 +3038,15 @@ def list_formats(self, info_dict):
                     format_field(f, 'acodec', default='unknown').replace('none', ''),
                     format_field(f, 'abr', '%3dk'),
                     format_field(f, 'asr', '%5dHz'),
-                    self._format_note_table(f)]
-                for f in formats
-                if f.get('preference') is None or f['preference'] >= -1000]
+                    ', '.join(filter(None, (
+                        'UNSUPPORTED' if f.get('ext') in ('f4f', 'f4m') else '',
+                        format_field(f, 'language', '[%s]'),
+                        format_field(f, 'format_note'),
+                        format_field(f, 'container', ignore=(None, f.get('ext'))),
+                        format_field(f, 'asr', '%5dHz')))),
+                ] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
             header_line = ['ID', 'EXT', 'RESOLUTION', 'FPS', '|', ' FILESIZE', '  TBR', 'PROTO',
-                           '|', 'VCODEC', '  VBR', 'ACODEC', ' ABR', ' ASR', 'NOTE']
+                           '|', 'VCODEC', '  VBR', 'ACODEC', ' ABR', ' ASR', 'MORE INFO']
         else:
             table = [
                 [
@@ -3038,12 +3059,9 @@ def list_formats(self, info_dict):
             header_line = ['format code', 'extension', 'resolution', 'note']
 
         self.to_screen(
-            '[info] Available formats for %s:\n%s' % (info_dict['id'], render_table(
-                header_line,
-                table,
-                delim=new_format,
-                extraGap=(0 if new_format else 1),
-                hideEmpty=new_format)))
+            '[info] Available formats for %s:' % info_dict['id'])
+        self.to_stdout(render_table(
+            header_line, table, delim=new_format, extraGap=(0 if new_format else 1), hideEmpty=new_format))
 
     def list_thumbnails(self, info_dict):
         thumbnails = list(info_dict.get('thumbnails'))
@@ -3053,7 +3071,7 @@ def list_thumbnails(self, info_dict):
 
         self.to_screen(
             '[info] Thumbnails for %s:' % info_dict['id'])
-        self.to_screen(render_table(
+        self.to_stdout(render_table(
             ['ID', 'width', 'height', 'URL'],
             [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
 
@@ -3065,12 +3083,12 @@ def list_subtitles(self, video_id, subtitles, name='subtitles'):
             'Available %s for %s:' % (name, video_id))
 
         def _row(lang, formats):
-            exts, names = zip(*((f['ext'], f.get('name', 'unknown')) for f in reversed(formats)))
+            exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
             if len(set(names)) == 1:
                 names = [] if names[0] == 'unknown' else names[:1]
             return [lang, ', '.join(names), ', '.join(exts)]
 
-        self.to_screen(render_table(
+        self.to_stdout(render_table(
             ['Language', 'Name', 'Formats'],
             [_row(lang, formats) for lang, formats in subtitles.items()],
             hideEmpty=True))
@@ -3248,7 +3266,7 @@ def _write_thumbnails(self, info_dict, filename):  # return the extensions
         multiple = write_all and len(thumbnails) > 1
 
         ret = []
-        for t in thumbnails[::1 if write_all else -1]:
+        for t in thumbnails[::-1]:
             thumb_ext = determine_ext(t['url'], 'jpg')
             suffix = '%s.' % t['id'] if multiple else ''
             thumb_display_id = '%s ' % t['id'] if multiple else ''