4 from __future__
import absolute_import
, unicode_literals
32 from string
import ascii_letters
36 compat_get_terminal_size
,
43 compat_tokenize_tokenize
,
45 compat_urllib_request
,
46 compat_urllib_request_DataHandler
,
47 windows_enable_vt_mode
,
49 from .cookies
import load_cookies
90 PerRequestProxyHandler
,
97 register_socks_protocols
,
112 supports_terminal_sequences
,
117 UnavailableVideoError
,
123 YoutubeDLCookieProcessor
,
125 YoutubeDLRedirectHandler
,
127 from .cache
import Cache
128 from .minicurses
import format_text
129 from .extractor
import (
130 gen_extractor_classes
,
133 _PLUGIN_CLASSES
as plugin_extractors
135 from .extractor
.openload
import PhantomJSwrapper
136 from .downloader
import (
138 get_suitable_downloader
,
139 shorten_protocol_name
141 from .downloader
.rtmp
import rtmpdump_version
142 from .postprocessor
import (
145 FFmpegFixupDurationPP
,
148 FFmpegFixupStretchedPP
,
149 FFmpegFixupTimestampPP
,
152 MoveFilesAfterDownloadPP
,
153 _PLUGIN_CLASSES
as plugin_postprocessors
155 from .update
import detect_variant
156 from .version
import __version__
, RELEASE_GIT_HEAD
158 if compat_os_name
== 'nt':
162 class YoutubeDL(object):
165 YoutubeDL objects are the ones responsible of downloading the
166 actual video file and writing it to disk if the user has requested
167 it, among some other tasks. In most cases there should be one per
168 program. As, given a video URL, the downloader doesn't know how to
169 extract all the needed information, task that InfoExtractors do, it
170 has to pass the URL to one of them.
172 For this, YoutubeDL objects have a method that allows
173 InfoExtractors to be registered in a given order. When it is passed
174 a URL, the YoutubeDL object handles it to the first InfoExtractor it
175 finds that reports being able to handle it. The InfoExtractor extracts
176 all the information about the video or videos the URL refers to, and
177 YoutubeDL process the extracted information, possibly using a File
178 Downloader to download the video.
180 YoutubeDL objects accept a lot of parameters. In order not to saturate
181 the object constructor with arguments, it receives a dictionary of
182 options instead. These options are available through the params
183 attribute for the InfoExtractors to use. The YoutubeDL also
184 registers itself as the downloader in charge for the InfoExtractors
185 that are added to it, so this is a "mutual registration".
189 username: Username for authentication purposes.
190 password: Password for authentication purposes.
191 videopassword: Password for accessing a video.
192 ap_mso: Adobe Pass multiple-system operator identifier.
193 ap_username: Multiple-system operator account username.
194 ap_password: Multiple-system operator account password.
195 usenetrc: Use netrc for authentication instead.
196 verbose: Print additional info to stdout.
197 quiet: Do not print messages to stdout.
198 no_warnings: Do not print out anything for warnings.
199 forceprint: A list of templates to force print
200 forceurl: Force printing final URL. (Deprecated)
201 forcetitle: Force printing title. (Deprecated)
202 forceid: Force printing ID. (Deprecated)
203 forcethumbnail: Force printing thumbnail URL. (Deprecated)
204 forcedescription: Force printing description. (Deprecated)
205 forcefilename: Force printing final filename. (Deprecated)
206 forceduration: Force printing duration. (Deprecated)
207 forcejson: Force printing info_dict as JSON.
208 dump_single_json: Force printing the info_dict of the whole playlist
209 (or video) as a single JSON line.
210 force_write_download_archive: Force writing download archive regardless
211 of 'skip_download' or 'simulate'.
212 simulate: Do not download the video files. If unset (or None),
213 simulate only if listsubtitles, listformats or list_thumbnails is used
214 format: Video format code. see "FORMAT SELECTION" for more details.
215 You can also pass a function. The function takes 'ctx' as
216 argument and returns the formats to download.
217 See "build_format_selector" for an implementation
218 allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
219 ignore_no_formats_error: Ignore "No video formats" error. Usefull for
220 extracting metadata even if the video is not actually
221 available for download (experimental)
222 format_sort: A list of fields by which to sort the video formats.
223 See "Sorting Formats" for more details.
224 format_sort_force: Force the given format_sort. see "Sorting Formats"
226 allow_multiple_video_streams: Allow multiple video streams to be merged
228 allow_multiple_audio_streams: Allow multiple audio streams to be merged
230 check_formats Whether to test if the formats are downloadable.
231 Can be True (check all), False (check none),
232 'selected' (check selected formats),
233 or None (check only if requested by extractor)
234 paths: Dictionary of output paths. The allowed keys are 'home'
235 'temp' and the keys of OUTTMPL_TYPES (in utils.py)
236 outtmpl: Dictionary of templates for output names. Allowed keys
237 are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
238 For compatibility with youtube-dl, a single string can also be used
239 outtmpl_na_placeholder: Placeholder for unavailable meta fields.
240 restrictfilenames: Do not allow "&" and spaces in file names
241 trim_file_name: Limit length of filename (extension excluded)
242 windowsfilenames: Force the filenames to be windows compatible
243 ignoreerrors: Do not stop on download/postprocessing errors.
244 Can be 'only_download' to ignore only download errors.
245 Default is 'only_download' for CLI, but False for API
246 skip_playlist_after_errors: Number of allowed failures until the rest of
247 the playlist is skipped
248 force_generic_extractor: Force downloader to use the generic extractor
249 overwrites: Overwrite all video and metadata files if True,
250 overwrite only non-video files if None
251 and don't overwrite any file if False
252 For compatibility with youtube-dl,
253 "nooverwrites" may also be used instead
254 playliststart: Playlist item to start at.
255 playlistend: Playlist item to end at.
256 playlist_items: Specific indices of playlist to download.
257 playlistreverse: Download playlist items in reverse order.
258 playlistrandom: Download playlist items in random order.
259 matchtitle: Download only matching titles.
260 rejecttitle: Reject downloads for matching titles.
261 logger: Log messages to a logging.Logger instance.
262 logtostderr: Log messages to stderr instead of stdout.
263 consoletitle: Display progress in console window's titlebar.
264 writedescription: Write the video description to a .description file
265 writeinfojson: Write the video description to a .info.json file
266 clean_infojson: Remove private fields from the infojson
267 getcomments: Extract video comments. This will not be written to disk
268 unless writeinfojson is also given
269 writeannotations: Write the video annotations to a .annotations.xml file
270 writethumbnail: Write the thumbnail image to a file
271 allow_playlist_files: Whether to write playlists' description, infojson etc
272 also to disk when using the 'write*' options
273 write_all_thumbnails: Write all thumbnail formats to files
274 writelink: Write an internet shortcut file, depending on the
275 current platform (.url/.webloc/.desktop)
276 writeurllink: Write a Windows internet shortcut file (.url)
277 writewebloclink: Write a macOS internet shortcut file (.webloc)
278 writedesktoplink: Write a Linux internet shortcut file (.desktop)
279 writesubtitles: Write the video subtitles to a file
280 writeautomaticsub: Write the automatically generated subtitles to a file
281 allsubtitles: Deprecated - Use subtitleslangs = ['all']
282 Downloads all the subtitles of the video
283 (requires writesubtitles or writeautomaticsub)
284 listsubtitles: Lists all available subtitles for the video
285 subtitlesformat: The format code for subtitles
286 subtitleslangs: List of languages of the subtitles to download (can be regex).
287 The list may contain "all" to refer to all the available
288 subtitles. The language can be prefixed with a "-" to
289 exclude it from the requested languages. Eg: ['all', '-live_chat']
290 keepvideo: Keep the video file after post-processing
291 daterange: A DateRange object, download only if the upload_date is in the range.
292 skip_download: Skip the actual download of the video file
293 cachedir: Location of the cache files in the filesystem.
294 False to disable filesystem cache.
295 noplaylist: Download single video instead of a playlist if in doubt.
296 age_limit: An integer representing the user's age in years.
297 Unsuitable videos for the given age are skipped.
298 min_views: An integer representing the minimum view count the video
299 must have in order to not be skipped.
300 Videos without view count information are always
301 downloaded. None for no limit.
302 max_views: An integer representing the maximum view count.
303 Videos that are more popular than that are not
305 Videos without view count information are always
306 downloaded. None for no limit.
307 download_archive: File name of a file where all downloads are recorded.
308 Videos already present in the file are not downloaded
310 break_on_existing: Stop the download process after attempting to download a
311 file that is in the archive.
312 break_on_reject: Stop the download process when encountering a video that
313 has been filtered out.
314 break_per_url: Whether break_on_reject and break_on_existing
315 should act on each input URL as opposed to for the entire queue
316 cookiefile: File name where cookies should be read from and dumped to
317 cookiesfrombrowser: A tuple containing the name of the browser and the profile
318 name/path from where cookies are loaded.
319 Eg: ('chrome', ) or ('vivaldi', 'default')
320 nocheckcertificate:Do not verify SSL certificates
321 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
322 At the moment, this is only supported by YouTube.
323 proxy: URL of the proxy server to use
324 geo_verification_proxy: URL of the proxy to use for IP address verification
325 on geo-restricted sites.
326 socket_timeout: Time to wait for unresponsive hosts, in seconds
327 bidi_workaround: Work around buggy terminals without bidirectional text
328 support, using fridibi
329 debug_printtraffic:Print out sent and received HTTP traffic
330 include_ads: Download ads as well
331 default_search: Prepend this string if an input url is not valid.
332 'auto' for elaborate guessing
333 encoding: Use this encoding instead of the system-specified.
334 extract_flat: Do not resolve URLs, return the immediate result.
335 Pass in 'in_playlist' to only show this behavior for
337 wait_for_video: If given, wait for scheduled streams to become available.
338 The value should be a tuple containing the range
339 (min_secs, max_secs) to wait between retries
340 postprocessors: A list of dictionaries, each with an entry
341 * key: The name of the postprocessor. See
342 yt_dlp/postprocessor/__init__.py for a list.
343 * when: When to run the postprocessor. Can be one of
344 pre_process|before_dl|post_process|after_move.
345 Assumed to be 'post_process' if not given
346 post_hooks: Deprecated - Register a custom postprocessor instead
347 A list of functions that get called as the final step
348 for each video file, after all postprocessors have been
349 called. The filename will be passed as the only argument.
350 progress_hooks: A list of functions that get called on download
351 progress, with a dictionary with the entries
352 * status: One of "downloading", "error", or "finished".
353 Check this first and ignore unknown values.
354 * info_dict: The extracted info_dict
356 If status is one of "downloading", or "finished", the
357 following properties may also be present:
358 * filename: The final filename (always present)
359 * tmpfilename: The filename we're currently writing to
360 * downloaded_bytes: Bytes on disk
361 * total_bytes: Size of the whole file, None if unknown
362 * total_bytes_estimate: Guess of the eventual file size,
364 * elapsed: The number of seconds since download started.
365 * eta: The estimated time in seconds, None if unknown
366 * speed: The download speed in bytes/second, None if
368 * fragment_index: The counter of the currently
369 downloaded video fragment.
370 * fragment_count: The number of fragments (= individual
371 files that will be merged)
373 Progress hooks are guaranteed to be called at least once
374 (with status "finished") if the download is successful.
375 postprocessor_hooks: A list of functions that get called on postprocessing
376 progress, with a dictionary with the entries
377 * status: One of "started", "processing", or "finished".
378 Check this first and ignore unknown values.
379 * postprocessor: Name of the postprocessor
380 * info_dict: The extracted info_dict
382 Progress hooks are guaranteed to be called at least twice
383 (with status "started" and "finished") if the processing is successful.
384 merge_output_format: Extension to use when merging formats.
385 final_ext: Expected final extension; used to detect when the file was
386 already downloaded and converted
387 fixup: Automatically correct known faults of the file.
389 - "never": do nothing
390 - "warn": only emit a warning
391 - "detect_or_warn": check whether we can do anything
392 about it, warn otherwise (default)
393 source_address: Client-side IP address to bind to.
394 call_home: Boolean, true iff we are allowed to contact the
395 yt-dlp servers for debugging. (BROKEN)
396 sleep_interval_requests: Number of seconds to sleep between requests
398 sleep_interval: Number of seconds to sleep before each download when
399 used alone or a lower bound of a range for randomized
400 sleep before each download (minimum possible number
401 of seconds to sleep) when used along with
403 max_sleep_interval:Upper bound of a range for randomized sleep before each
404 download (maximum possible number of seconds to sleep).
405 Must only be used along with sleep_interval.
406 Actual sleep time will be a random float from range
407 [sleep_interval; max_sleep_interval].
408 sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
409 listformats: Print an overview of available video formats and exit.
410 list_thumbnails: Print a table of all thumbnails and exit.
411 match_filter: A function that gets called with the info_dict of
413 If it returns a message, the video is ignored.
414 If it returns None, the video is downloaded.
415 match_filter_func in utils.py is one example for this.
416 no_color: Do not emit color codes in output.
417 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
420 Two-letter ISO 3166-2 country code that will be used for
421 explicit geographic restriction bypassing via faking
422 X-Forwarded-For HTTP header
424 IP range in CIDR notation that will be used similarly to
427 The following options determine which downloader is picked:
428 external_downloader: A dictionary of protocol keys and the executable of the
429 external downloader to use for it. The allowed protocols
430 are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
431 Set the value to 'native' to use the native downloader
432 hls_prefer_native: Deprecated - Use external_downloader = {'m3u8': 'native'}
433 or {'m3u8': 'ffmpeg'} instead.
434 Use the native HLS downloader instead of ffmpeg/avconv
435 if True, otherwise use ffmpeg/avconv if False, otherwise
436 use downloader suggested by extractor if None.
437 compat_opts: Compatibility options. See "Differences in default behavior".
438 The following options do not work when used through the API:
439 filename, abort-on-error, multistreams, no-live-chat, format-sort
440 no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
441 Refer __init__.py for their implementation
442 progress_template: Dictionary of templates for progress outputs.
443 Allowed keys are 'download', 'postprocess',
444 'download-title' (console title) and 'postprocess-title'.
445 The template is mapped on a dictionary with keys 'progress' and 'info'
447 The following parameters are not used by YoutubeDL itself, they are used by
448 the downloader (see yt_dlp/downloader/common.py):
449 nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
450 max_filesize, test, noresizebuffer, retries, fragment_retries, continuedl,
451 noprogress, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
452 external_downloader_args, concurrent_fragment_downloads.
454 The following options are used by the post processors:
455 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
456 otherwise prefer ffmpeg. (avconv support is deprecated)
457 ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
458 to the binary or its containing directory.
459 postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
460 and a list of additional command-line arguments for the
461 postprocessor/executable. The dict can also have "PP+EXE" keys
462 which are used when the given exe is used by the given PP.
463 Use 'default' as the name for arguments to passed to all PP
464 For compatibility with youtube-dl, a single list of args
467 The following options are used by the extractors:
468 extractor_retries: Number of times to retry for known errors
469 dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
470 hls_split_discontinuity: Split HLS playlists to different formats at
471 discontinuities such as ad breaks (default: False)
472 extractor_args: A dictionary of arguments to be passed to the extractors.
473 See "EXTRACTOR ARGUMENTS" for details.
474 Eg: {'youtube': {'skip': ['dash', 'hls']}}
475 youtube_include_dash_manifest: Deprecated - Use extractor_args instead.
476 If True (default), DASH manifests and related
477 data will be downloaded and processed by extractor.
478 You can reduce network I/O by disabling it if you don't
479 care about DASH. (only for youtube)
480 youtube_include_hls_manifest: Deprecated - Use extractor_args instead.
481 If True (default), HLS manifests and related
482 data will be downloaded and processed by extractor.
483 You can reduce network I/O by disabling it if you don't
484 care about HLS. (only for youtube)
487 _NUMERIC_FIELDS
= set((
488 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
489 'timestamp', 'release_timestamp',
490 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
491 'average_rating', 'comment_count', 'age_limit',
492 'start_time', 'end_time',
493 'chapter_number', 'season_number', 'episode_number',
494 'track_number', 'disc_number', 'release_year',
497 _format_selection_exts
= {
498 'audio': {'m4a', 'mp3', 'ogg', 'aac'}
,
499 'video': {'mp4', 'flv', 'webm', '3gp'}
,
500 'storyboards': {'mhtml'}
,
505 _pps
= {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
506 _printed_messages
= set()
507 _first_webpage_request
= True
508 _download_retcode
= None
509 _num_downloads
= None
511 _playlist_urls
= set()
514 def __init__(self
, params
=None, auto_init
=True):
515 """Create a FileDownloader object with the given options.
516 @param auto_init Whether to load the default extractors and print header (if verbose).
517 Set to 'no_verbose_header' to not print the header
522 self
._ies
_instances
= {}
523 self
._pps
= {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
524 self
._printed
_messages
= set()
525 self
._first
_webpage
_request
= True
526 self
._post
_hooks
= []
527 self
._progress
_hooks
= []
528 self
._postprocessor
_hooks
= []
529 self
._download
_retcode
= 0
530 self
._num
_downloads
= 0
531 self
._screen
_file
= [sys
.stdout
, sys
.stderr
][params
.get('logtostderr', False)]
532 self
._err
_file
= sys
.stderr
534 self
.cache
= Cache(self
)
536 windows_enable_vt_mode()
537 self
._allow
_colors
= {
538 'screen': not self
.params
.get('no_color') and supports_terminal_sequences(self
._screen
_file
),
539 'err': not self
.params
.get('no_color') and supports_terminal_sequences(self
._err
_file
),
542 if sys
.version_info
< (3, 6):
544 'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys
.version_info
[:2])
546 if self
.params
.get('allow_unplayable_formats'):
548 f
'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
549 'This is a developer option intended for debugging. \n'
550 ' If you experience any issues while using this option, '
551 f
'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
553 def check_deprecated(param
, option
, suggestion
):
554 if self
.params
.get(param
) is not None:
555 self
.report_warning('%s is deprecated. Use %s instead' % (option
, suggestion
))
559 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
560 if self
.params
.get('geo_verification_proxy') is None:
561 self
.params
['geo_verification_proxy'] = self
.params
['cn_verification_proxy']
563 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
564 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
565 check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
567 for msg
in self
.params
.get('_warnings', []):
568 self
.report_warning(msg
)
569 for msg
in self
.params
.get('_deprecation_warnings', []):
570 self
.deprecation_warning(msg
)
572 if 'list-formats' in self
.params
.get('compat_opts', []):
573 self
.params
['listformats_table'] = False
575 if 'overwrites' not in self
.params
and self
.params
.get('nooverwrites') is not None:
576 # nooverwrites was unnecessarily changed to overwrites
577 # in 0c3d0f51778b153f65c21906031c2e091fcfb641
578 # This ensures compatibility with both keys
579 self
.params
['overwrites'] = not self
.params
['nooverwrites']
580 elif self
.params
.get('overwrites') is None:
581 self
.params
.pop('overwrites', None)
583 self
.params
['nooverwrites'] = not self
.params
['overwrites']
585 if params
.get('bidi_workaround', False):
588 master
, slave
= pty
.openpty()
589 width
= compat_get_terminal_size().columns
593 width_args
= ['-w', str(width
)]
595 stdin
=subprocess
.PIPE
,
597 stderr
=self
._err
_file
)
599 self
._output
_process
= Popen(['bidiv'] + width_args
, **sp_kwargs
)
601 self
._output
_process
= Popen(['fribidi', '-c', 'UTF-8'] + width_args
, **sp_kwargs
)
602 self
._output
_channel
= os
.fdopen(master
, 'rb')
603 except OSError as ose
:
604 if ose
.errno
== errno
.ENOENT
:
606 'Could not find fribidi executable, ignoring --bidi-workaround. '
607 'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
611 if (sys
.platform
!= 'win32'
612 and sys
.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
613 and not params
.get('restrictfilenames', False)):
614 # Unicode filesystem API will throw errors (#1474, #13027)
616 'Assuming --restrict-filenames since file system encoding '
617 'cannot encode all characters. '
618 'Set the LC_ALL environment variable to fix this.')
619 self
.params
['restrictfilenames'] = True
621 self
.outtmpl_dict
= self
.parse_outtmpl()
623 # Creating format selector here allows us to catch syntax errors before the extraction
624 self
.format_selector
= (
625 None if self
.params
.get('format') is None
626 else self
.params
['format'] if callable(self
.params
['format'])
627 else self
.build_format_selector(self
.params
['format']))
632 if auto_init
!= 'no_verbose_header':
633 self
.print_debug_header()
634 self
.add_default_info_extractors()
636 for pp_def_raw
in self
.params
.get('postprocessors', []):
637 pp_def
= dict(pp_def_raw
)
638 when
= pp_def
.pop('when', 'post_process')
639 pp_class
= get_postprocessor(pp_def
.pop('key'))
640 pp
= pp_class(self
, **compat_kwargs(pp_def
))
641 self
.add_post_processor(pp
, when
=when
)
644 'post_hooks': self
.add_post_hook
,
645 'progress_hooks': self
.add_progress_hook
,
646 'postprocessor_hooks': self
.add_postprocessor_hook
,
648 for opt
, fn
in hooks
.items():
649 for ph
in self
.params
.get(opt
, []):
652 register_socks_protocols()
654 def preload_download_archive(fn
):
655 """Preload the archive, if any is specified"""
658 self
.write_debug(f
'Loading archive file {fn!r}')
660 with locked_file(fn
, 'r', encoding
='utf-8') as archive_file
:
661 for line
in archive_file
:
662 self
.archive
.add(line
.strip())
663 except IOError as ioe
:
664 if ioe
.errno
!= errno
.ENOENT
:
670 preload_download_archive(self
.params
.get('download_archive'))
672 def warn_if_short_id(self
, argv
):
673 # short YouTube ID starting with dash?
675 i
for i
, a
in enumerate(argv
)
676 if re
.match(r
'^-[0-9A-Za-z_-]{10}$', a
)]
680 + [a
for i
, a
in enumerate(argv
) if i
not in idxs
]
681 + ['--'] + [argv
[i
] for i
in idxs
]
684 'Long argument string detected. '
685 'Use -- to separate parameters and URLs, like this:\n%s' %
686 args_to_str(correct_argv
))
688 def add_info_extractor(self
, ie
):
689 """Add an InfoExtractor object to the end of the list."""
691 self
._ies
[ie_key
] = ie
692 if not isinstance(ie
, type):
693 self
._ies
_instances
[ie_key
] = ie
694 ie
.set_downloader(self
)
696 def _get_info_extractor_class(self
, ie_key
):
697 ie
= self
._ies
.get(ie_key
)
699 ie
= get_info_extractor(ie_key
)
700 self
.add_info_extractor(ie
)
703 def get_info_extractor(self
, ie_key
):
705 Get an instance of an IE with name ie_key, it will try to get one from
706 the _ies list, if there's no instance it will create a new one and add
707 it to the extractor list.
709 ie
= self
._ies
_instances
.get(ie_key
)
711 ie
= get_info_extractor(ie_key
)()
712 self
.add_info_extractor(ie
)
715 def add_default_info_extractors(self
):
717 Add the InfoExtractors returned by gen_extractors to the end of the list
719 for ie
in gen_extractor_classes():
720 self
.add_info_extractor(ie
)
722 def add_post_processor(self
, pp
, when
='post_process'):
723 """Add a PostProcessor object to the end of the chain."""
724 self
._pps
[when
].append(pp
)
725 pp
.set_downloader(self
)
727 def add_post_hook(self
, ph
):
728 """Add the post hook"""
729 self
._post
_hooks
.append(ph
)
731 def add_progress_hook(self
, ph
):
732 """Add the download progress hook"""
733 self
._progress
_hooks
.append(ph
)
735 def add_postprocessor_hook(self
, ph
):
736 """Add the postprocessing progress hook"""
737 self
._postprocessor
_hooks
.append(ph
)
739 def _bidi_workaround(self
, message
):
740 if not hasattr(self
, '_output_channel'):
743 assert hasattr(self
, '_output_process')
744 assert isinstance(message
, compat_str
)
745 line_count
= message
.count('\n') + 1
746 self
._output
_process
.stdin
.write((message
+ '\n').encode('utf-8'))
747 self
._output
_process
.stdin
.flush()
748 res
= ''.join(self
._output
_channel
.readline().decode('utf-8')
749 for _
in range(line_count
))
750 return res
[:-len('\n')]
752 def _write_string(self
, message
, out
=None, only_once
=False):
754 if message
in self
._printed
_messages
:
756 self
._printed
_messages
.add(message
)
757 write_string(message
, out
=out
, encoding
=self
.params
.get('encoding'))
759 def to_stdout(self
, message
, skip_eol
=False, quiet
=False):
760 """Print message to stdout"""
761 if self
.params
.get('logger'):
762 self
.params
['logger'].debug(message
)
763 elif not quiet
or self
.params
.get('verbose'):
765 '%s%s' % (self
._bidi
_workaround
(message
), ('' if skip_eol
else '\n')),
766 self
._err
_file
if quiet
else self
._screen
_file
)
768 def to_stderr(self
, message
, only_once
=False):
769 """Print message to stderr"""
770 assert isinstance(message
, compat_str
)
771 if self
.params
.get('logger'):
772 self
.params
['logger'].error(message
)
774 self
._write
_string
('%s\n' % self
._bidi
_workaround
(message
), self
._err
_file
, only_once
=only_once
)
776 def to_console_title(self
, message
):
777 if not self
.params
.get('consoletitle', False):
779 if compat_os_name
== 'nt':
780 if ctypes
.windll
.kernel32
.GetConsoleWindow():
781 # c_wchar_p() might not be necessary if `message` is
782 # already of type unicode()
783 ctypes
.windll
.kernel32
.SetConsoleTitleW(ctypes
.c_wchar_p(message
))
784 elif 'TERM' in os
.environ
:
785 self
._write
_string
('\033]0;%s\007' % message
, self
._screen
_file
)
787 def save_console_title(self
):
788 if not self
.params
.get('consoletitle', False):
790 if self
.params
.get('simulate'):
792 if compat_os_name
!= 'nt' and 'TERM' in os
.environ
:
793 # Save the title on stack
794 self
._write
_string
('\033[22;0t', self
._screen
_file
)
796 def restore_console_title(self
):
797 if not self
.params
.get('consoletitle', False):
799 if self
.params
.get('simulate'):
801 if compat_os_name
!= 'nt' and 'TERM' in os
.environ
:
802 # Restore the title from stack
803 self
._write
_string
('\033[23;0t', self
._screen
_file
)
806 self
.save_console_title()
809 def __exit__(self
, *args
):
810 self
.restore_console_title()
812 if self
.params
.get('cookiefile') is not None:
813 self
.cookiejar
.save(ignore_discard
=True, ignore_expires
=True)
815 def trouble(self
, message
=None, tb
=None):
816 """Determine action to take when a download problem appears.
818 Depending on if the downloader has been configured to ignore
819 download errors or not, this method may throw an exception or
820 not when errors are found, after printing the message.
822 tb, if given, is additional traceback information.
824 if message
is not None:
825 self
.to_stderr(message
)
826 if self
.params
.get('verbose'):
828 if sys
.exc_info()[0]: # if .trouble has been called from an except block
830 if hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
831 tb
+= ''.join(traceback
.format_exception(*sys
.exc_info()[1].exc_info
))
832 tb
+= encode_compat_str(traceback
.format_exc())
834 tb_data
= traceback
.format_list(traceback
.extract_stack())
835 tb
= ''.join(tb_data
)
838 if not self
.params
.get('ignoreerrors'):
839 if sys
.exc_info()[0] and hasattr(sys
.exc_info()[1], 'exc_info') and sys
.exc_info()[1].exc_info
[0]:
840 exc_info
= sys
.exc_info()[1].exc_info
842 exc_info
= sys
.exc_info()
843 raise DownloadError(message
, exc_info
)
844 self
._download
_retcode
= 1
846 def to_screen(self
, message
, skip_eol
=False):
847 """Print message to stdout if not in quiet mode"""
849 message
, skip_eol
, quiet
=self
.params
.get('quiet', False))
853 EMPHASIS
= 'light blue'
858 SUPPRESS
= 'light black'
860 def _format_text(self
, handle
, allow_colors
, text
, f
, fallback
=None, *, test_encoding
=False):
863 encoding
= self
.params
.get('encoding') or getattr(handle
, 'encoding', 'ascii')
864 text
= text
.encode(encoding
, 'ignore').decode(encoding
)
865 if fallback
is not None and text
!= original_text
:
867 if isinstance(f
, self
.Styles
):
869 return format_text(text
, f
) if allow_colors
else text
if fallback
is None else fallback
871 def _format_screen(self
, *args
, **kwargs
):
872 return self
._format
_text
(
873 self
._screen
_file
, self
._allow
_colors
['screen'], *args
, **kwargs
)
875 def _format_err(self
, *args
, **kwargs
):
876 return self
._format
_text
(
877 self
._err
_file
, self
._allow
_colors
['err'], *args
, **kwargs
)
879 def report_warning(self
, message
, only_once
=False):
881 Print the message to stderr, it will be prefixed with 'WARNING:'
882 If stderr is a tty file the 'WARNING:' will be colored
884 if self
.params
.get('logger') is not None:
885 self
.params
['logger'].warning(message
)
887 if self
.params
.get('no_warnings'):
889 self
.to_stderr(f
'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once
)
891 def deprecation_warning(self
, message
):
892 if self
.params
.get('logger') is not None:
893 self
.params
['logger'].warning('DeprecationWarning: {message}')
895 self
.to_stderr(f
'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
897 def report_error(self
, message
, tb
=None):
899 Do the same as trouble, but prefixes the message with 'ERROR:', colored
900 in red if stderr is a tty file.
902 self
.trouble(f
'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', tb
)
904 def write_debug(self
, message
, only_once
=False):
905 '''Log debug message or Print message to stderr'''
906 if not self
.params
.get('verbose', False):
908 message
= '[debug] %s' % message
909 if self
.params
.get('logger'):
910 self
.params
['logger'].debug(message
)
912 self
.to_stderr(message
, only_once
)
914 def report_file_already_downloaded(self
, file_name
):
915 """Report file has already been fully downloaded."""
917 self
.to_screen('[download] %s has already been downloaded' % file_name
)
918 except UnicodeEncodeError:
919 self
.to_screen('[download] The file has already been downloaded')
921 def report_file_delete(self
, file_name
):
922 """Report that existing file will be deleted."""
924 self
.to_screen('Deleting existing file %s' % file_name
)
925 except UnicodeEncodeError:
926 self
.to_screen('Deleting existing file')
928 def raise_no_formats(self
, info
, forced
=False):
929 has_drm
= info
.get('__has_drm')
930 msg
= 'This video is DRM protected' if has_drm
else 'No video formats found!'
931 expected
= self
.params
.get('ignore_no_formats_error')
932 if forced
or not expected
:
933 raise ExtractorError(msg
, video_id
=info
['id'], ie
=info
['extractor'],
934 expected
=has_drm
or expected
)
936 self
.report_warning(msg
)
938 def parse_outtmpl(self
):
939 outtmpl_dict
= self
.params
.get('outtmpl', {})
940 if not isinstance(outtmpl_dict
, dict):
941 outtmpl_dict
= {'default': outtmpl_dict}
942 # Remove spaces in the default template
943 if self
.params
.get('restrictfilenames'):
944 sanitize
= lambda x
: x
.replace(' - ', ' ').replace(' ', '-')
946 sanitize
= lambda x
: x
947 outtmpl_dict
.update({
948 k
: sanitize(v
) for k
, v
in DEFAULT_OUTTMPL
.items()
949 if outtmpl_dict
.get(k
) is None})
950 for key
, val
in outtmpl_dict
.items():
951 if isinstance(val
, bytes):
953 'Parameter outtmpl is bytes, but should be a unicode string. '
954 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
957 def get_output_path(self
, dir_type
='', filename
=None):
958 paths
= self
.params
.get('paths', {})
959 assert isinstance(paths
, dict)
961 expand_path(paths
.get('home', '').strip()),
962 expand_path(paths
.get(dir_type
, '').strip()) if dir_type
else '',
965 # Temporary fix for #4787
966 # 'Treat' all problem characters by passing filename through preferredencoding
967 # to workaround encoding issues with subprocess on python2 @ Windows
968 if sys
.version_info
< (3, 0) and sys
.platform
== 'win32':
969 path
= encodeFilename(path
, True).decode(preferredencoding())
970 return sanitize_path(path
, force
=self
.params
.get('windowsfilenames'))
973 def _outtmpl_expandpath(outtmpl
):
974 # expand_path translates '%%' into '%' and '$$' into '$'
975 # correspondingly that is not what we want since we need to keep
976 # '%%' intact for template dict substitution step. Working around
977 # with boundary-alike separator hack.
978 sep
= ''.join([random
.choice(ascii_letters
) for _
in range(32)])
979 outtmpl
= outtmpl
.replace('%%', '%{0}%'.format(sep
)).replace('$$', '${0}$'.format(sep
))
981 # outtmpl should be expand_path'ed before template dict substitution
982 # because meta fields may contain env variables we don't want to
983 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
984 # title "Hello $PATH", we don't want `$PATH` to be expanded.
985 return expand_path(outtmpl
).replace(sep
, '')
988 def escape_outtmpl(outtmpl
):
989 ''' Escape any remaining strings like %s, %abc% etc. '''
991 STR_FORMAT_RE_TMPL
.format('', '(?![%(\0])'),
992 lambda mobj
: ('' if mobj
.group('has_key') else '%') + mobj
.group(0),
996 def validate_outtmpl(cls
, outtmpl
):
997 ''' @return None or Exception object '''
999 STR_FORMAT_RE_TMPL
.format('[^)]*', '[ljqBU]'),
1000 lambda mobj
: f
'{mobj.group(0)[:-1]}s',
1001 cls
._outtmpl
_expandpath
(outtmpl
))
1003 cls
.escape_outtmpl(outtmpl
) % collections
.defaultdict(int)
1005 except ValueError as err
:
1009 def _copy_infodict(info_dict
):
1010 info_dict
= dict(info_dict
)
1011 for key
in ('__original_infodict', '__postprocessors'):
1012 info_dict
.pop(key
, None)
1015 def prepare_outtmpl(self
, outtmpl
, info_dict
, sanitize
=None):
1016 """ Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict """
1017 info_dict
.setdefault('epoch', int(time
.time())) # keep epoch consistent once set
1019 info_dict
= self
._copy
_infodict
(info_dict
)
1020 info_dict
['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
1021 formatSeconds(info_dict
['duration'], '-' if sanitize
else ':')
1022 if info_dict
.get('duration', None) is not None
1024 info_dict
['autonumber'] = self
.params
.get('autonumber_start', 1) - 1 + self
._num
_downloads
1025 if info_dict
.get('resolution') is None:
1026 info_dict
['resolution'] = self
.format_resolution(info_dict
, default
=None)
1028 # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
1029 # of %(field)s to %(field)0Nd for backward compatibility
1030 field_size_compat_map
= {
1031 'playlist_index': number_of_digits(info_dict
.get('_last_playlist_index') or 0),
1032 'playlist_autonumber': number_of_digits(info_dict
.get('n_entries') or 0),
1033 'autonumber': self
.params
.get('autonumber_size') or 5,
1037 EXTERNAL_FORMAT_RE
= re
.compile(STR_FORMAT_RE_TMPL
.format('[^)]*', f
'[{STR_FORMAT_TYPES}ljqBU]'))
1042 # Field is of the form key1.key2...
1043 # where keys (except first) can be string, int or slice
1044 FIELD_RE
= r
'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num
=r
'(?:-?\d+)')
1045 MATH_FIELD_RE
= r
'''{field}|{num}'''.format(field
=FIELD_RE
, num
=r
'-?\d+(?:.\d+)?')
1046 MATH_OPERATORS_RE
= r
'(?:%s)' % '|'.join(map(re
.escape
, MATH_FUNCTIONS
.keys()))
1047 INTERNAL_FORMAT_RE
= re
.compile(r
'''(?x)
1050 (?P<maths>(?:{math_op}{math_field})*)
1051 (?:>(?P<strf_format>.+?))?
1052 (?P<alternate>(?<!\\),[^|)]+)?
1053 (?:\|(?P<default>.*?))?
1054 $'''.format(field
=FIELD_RE
, math_op
=MATH_OPERATORS_RE
, math_field
=MATH_FIELD_RE
))
1056 def _traverse_infodict(k
):
1060 return traverse_obj(info_dict
, k
, is_user_input
=True, traverse_string
=True)
1062 def get_value(mdict
):
1064 value
= _traverse_infodict(mdict
['fields'])
1067 value
= float_or_none(value
)
1068 if value
is not None:
1071 offset_key
= mdict
['maths']
1073 value
= float_or_none(value
)
1077 MATH_FIELD_RE
if operator
else MATH_OPERATORS_RE
,
1078 offset_key
).group(0)
1079 offset_key
= offset_key
[len(item
):]
1080 if operator
is None:
1081 operator
= MATH_FUNCTIONS
[item
]
1083 item
, multiplier
= (item
[1:], -1) if item
[0] == '-' else (item
, 1)
1084 offset
= float_or_none(item
)
1086 offset
= float_or_none(_traverse_infodict(item
))
1088 value
= operator(value
, multiplier
* offset
)
1089 except (TypeError, ZeroDivisionError):
1092 # Datetime formatting
1093 if mdict
['strf_format']:
1094 value
= strftime_or_none(value
, mdict
['strf_format'].replace('\\,', ','))
1098 na
= self
.params
.get('outtmpl_na_placeholder', 'NA')
1100 def _dumpjson_default(obj
):
1101 if isinstance(obj
, (set, LazyList
)):
1103 raise TypeError(f
'Object of type {type(obj).__name__} is not JSON serializable')
1105 def create_key(outer_mobj
):
1106 if not outer_mobj
.group('has_key'):
1107 return outer_mobj
.group(0)
1108 key
= outer_mobj
.group('key')
1109 mobj
= re
.match(INTERNAL_FORMAT_RE
, key
)
1110 initial_field
= mobj
.group('fields').split('.')[-1] if mobj
else ''
1111 value
, default
= None, na
1113 mobj
= mobj
.groupdict()
1114 default
= mobj
['default'] if mobj
['default'] is not None else default
1115 value
= get_value(mobj
)
1116 if value
is None and mobj
['alternate']:
1117 mobj
= re
.match(INTERNAL_FORMAT_RE
, mobj
['alternate'][1:])
1121 fmt
= outer_mobj
.group('format')
1122 if fmt
== 's' and value
is not None and key
in field_size_compat_map
.keys():
1123 fmt
= '0{:d}d'.format(field_size_compat_map
[key
])
1125 value
= default
if value
is None else value
1127 flags
= outer_mobj
.group('conversion') or ''
1128 str_fmt
= f
'{fmt[:-1]}s'
1129 if fmt
[-1] == 'l': # list
1130 delim
= '\n' if '#' in flags
else ', '
1131 value
, fmt
= delim
.join(variadic(value
)), str_fmt
1132 elif fmt
[-1] == 'j': # json
1133 value
, fmt
= json
.dumps(value
, default
=_dumpjson_default
, indent
=4 if '#' in flags
else None), str_fmt
1134 elif fmt
[-1] == 'q': # quoted
1135 value
= map(str, variadic(value
) if '#' in flags
else [value
])
1136 value
, fmt
= ' '.join(map(compat_shlex_quote
, value
)), str_fmt
1137 elif fmt
[-1] == 'B': # bytes
1138 value
= f
'%{str_fmt}'.encode('utf-8') % str(value
).encode('utf-8')
1139 value
, fmt
= value
.decode('utf-8', 'ignore'), 's'
1140 elif fmt
[-1] == 'U': # unicode normalized
1141 value
, fmt
= unicodedata
.normalize(
1142 # "+" = compatibility equivalence, "#" = NFD
1143 'NF%s%s' % ('K' if '+' in flags
else '', 'D' if '#' in flags
else 'C'),
1145 elif fmt
[-1] == 'c':
1147 value
= str(value
)[0]
1150 elif fmt
[-1] not in 'rs': # numeric
1151 value
= float_or_none(value
)
1153 value
, fmt
= default
, 's'
1157 # If value is an object, sanitize might convert it to a string
1158 # So we convert it to repr first
1159 value
, fmt
= repr(value
), str_fmt
1160 if fmt
[-1] in 'csr':
1161 value
= sanitize(initial_field
, value
)
1163 key
= '%s\0%s' % (key
.replace('%', '%\0'), outer_mobj
.group('format'))
1164 TMPL_DICT
[key
] = value
1165 return '{prefix}%({key}){fmt}'.format(key
=key
, fmt
=fmt
, prefix
=outer_mobj
.group('prefix'))
1167 return EXTERNAL_FORMAT_RE
.sub(create_key
, outtmpl
), TMPL_DICT
1169 def evaluate_outtmpl(self
, outtmpl
, info_dict
, *args
, **kwargs
):
1170 outtmpl
, info_dict
= self
.prepare_outtmpl(outtmpl
, info_dict
, *args
, **kwargs
)
1171 return self
.escape_outtmpl(outtmpl
) % info_dict
1173 def _prepare_filename(self
, info_dict
, tmpl_type
='default'):
1175 sanitize
= lambda k
, v
: sanitize_filename(
1177 restricted
=self
.params
.get('restrictfilenames'),
1178 is_id
=(k
== 'id' or k
.endswith('_id')))
1179 outtmpl
= self
._outtmpl
_expandpath
(self
.outtmpl_dict
.get(tmpl_type
, self
.outtmpl_dict
['default']))
1180 filename
= self
.evaluate_outtmpl(outtmpl
, info_dict
, sanitize
)
1182 force_ext
= OUTTMPL_TYPES
.get(tmpl_type
)
1183 if filename
and force_ext
is not None:
1184 filename
= replace_extension(filename
, force_ext
, info_dict
.get('ext'))
1186 # https://github.com/blackjack4494/youtube-dlc/issues/85
1187 trim_file_name
= self
.params
.get('trim_file_name', False)
1189 fn_groups
= filename
.rsplit('.')
1192 if len(fn_groups
) > 2:
1193 sub_ext
= fn_groups
[-2]
1194 filename
= join_nonempty(fn_groups
[0][:trim_file_name
], sub_ext
, ext
, delim
='.')
1197 except ValueError as err
:
1198 self
.report_error('Error in output template: ' + str(err
) + ' (encoding: ' + repr(preferredencoding()) + ')')
1201 def prepare_filename(self
, info_dict
, dir_type
='', warn
=False):
1202 """Generate the output filename."""
1204 filename
= self
._prepare
_filename
(info_dict
, dir_type
or 'default')
1205 if not filename
and dir_type
not in ('', 'temp'):
1209 if not self
.params
.get('paths'):
1211 elif filename
== '-':
1212 self
.report_warning('--paths is ignored when an outputting to stdout', only_once
=True)
1213 elif os
.path
.isabs(filename
):
1214 self
.report_warning('--paths is ignored since an absolute path is given in output template', only_once
=True)
1215 if filename
== '-' or not filename
:
1218 return self
.get_output_path(dir_type
, filename
)
1220 def _match_entry(self
, info_dict
, incomplete
=False, silent
=False):
1221 """ Returns None if the file should be downloaded """
1223 video_title
= info_dict
.get('title', info_dict
.get('id', 'video'))
1226 if 'title' in info_dict
:
1227 # This can happen when we're just evaluating the playlist
1228 title
= info_dict
['title']
1229 matchtitle
= self
.params
.get('matchtitle', False)
1231 if not re
.search(matchtitle
, title
, re
.IGNORECASE
):
1232 return '"' + title
+ '" title did not match pattern "' + matchtitle
+ '"'
1233 rejecttitle
= self
.params
.get('rejecttitle', False)
1235 if re
.search(rejecttitle
, title
, re
.IGNORECASE
):
1236 return '"' + title
+ '" title matched reject pattern "' + rejecttitle
+ '"'
1237 date
= info_dict
.get('upload_date')
1238 if date
is not None:
1239 dateRange
= self
.params
.get('daterange', DateRange())
1240 if date
not in dateRange
:
1241 return '%s upload date is not in range %s' % (date_from_str(date
).isoformat(), dateRange
)
1242 view_count
= info_dict
.get('view_count')
1243 if view_count
is not None:
1244 min_views
= self
.params
.get('min_views')
1245 if min_views
is not None and view_count
< min_views
:
1246 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title
, view_count
, min_views
)
1247 max_views
= self
.params
.get('max_views')
1248 if max_views
is not None and view_count
> max_views
:
1249 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title
, view_count
, max_views
)
1250 if age_restricted(info_dict
.get('age_limit'), self
.params
.get('age_limit')):
1251 return 'Skipping "%s" because it is age restricted' % video_title
1253 match_filter
= self
.params
.get('match_filter')
1254 if match_filter
is not None:
1256 ret
= match_filter(info_dict
, incomplete
=incomplete
)
1258 # For backward compatibility
1259 ret
= None if incomplete
else match_filter(info_dict
)
1264 if self
.in_download_archive(info_dict
):
1265 reason
= '%s has already been recorded in the archive' % video_title
1266 break_opt
, break_err
= 'break_on_existing', ExistingVideoReached
1268 reason
= check_filter()
1269 break_opt
, break_err
= 'break_on_reject', RejectedVideoReached
1270 if reason
is not None:
1272 self
.to_screen('[download] ' + reason
)
1273 if self
.params
.get(break_opt
, False):
1278 def add_extra_info(info_dict
, extra_info
):
1279 '''Set the keys from extra_info in info dict if they are missing'''
1280 for key
, value
in extra_info
.items():
1281 info_dict
.setdefault(key
, value
)
1283 def extract_info(self
, url
, download
=True, ie_key
=None, extra_info
=None,
1284 process
=True, force_generic_extractor
=False):
1286 Return a list with a dictionary for each video extracted.
1289 url -- URL to extract
1292 download -- whether to download videos during extraction
1293 ie_key -- extractor key hint
1294 extra_info -- dictionary containing the extra values to add to each result
1295 process -- whether to resolve all unresolved references (URLs, playlist items),
1296 must be True for download to work.
1297 force_generic_extractor -- force using the generic extractor
1300 if extra_info
is None:
1303 if not ie_key
and force_generic_extractor
:
1307 ies
= {ie_key: self._get_info_extractor_class(ie_key)}
1311 for ie_key
, ie
in ies
.items():
1312 if not ie
.suitable(url
):
1315 if not ie
.working():
1316 self
.report_warning('The program functionality for this site has been marked as broken, '
1317 'and will probably not work.')
1319 temp_id
= ie
.get_temp_id(url
)
1320 if temp_id
is not None and self
.in_download_archive({'id': temp_id, 'ie_key': ie_key}
):
1321 self
.to_screen(f
'[{ie_key}] {temp_id}: has already been recorded in the archive')
1322 if self
.params
.get('break_on_existing', False):
1323 raise ExistingVideoReached()
1325 return self
.__extract
_info
(url
, self
.get_info_extractor(ie_key
), download
, extra_info
, process
)
1327 self
.report_error('no suitable InfoExtractor for URL %s' % url
)
1329 def __handle_extraction_exceptions(func
):
1330 @functools.wraps(func
)
1331 def wrapper(self
, *args
, **kwargs
):
1333 return func(self
, *args
, **kwargs
)
1334 except GeoRestrictedError
as e
:
1337 msg
+= '\nThis video is available in %s.' % ', '.join(
1338 map(ISO3166Utils
.short2full
, e
.countries
))
1339 msg
+= '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
1340 self
.report_error(msg
)
1341 except ExtractorError
as e
: # An error we somewhat expected
1342 self
.report_error(compat_str(e
), e
.format_traceback())
1343 except ReExtractInfo
as e
:
1345 self
.to_screen(f
'{e}; Re-extracting data')
1347 self
.to_stderr('\r')
1348 self
.report_warning(f
'{e}; Re-extracting data')
1349 return wrapper(self
, *args
, **kwargs
)
1350 except (DownloadCancelled
, LazyList
.IndexError, PagedList
.IndexError):
1352 except Exception as e
:
1353 if self
.params
.get('ignoreerrors'):
1354 self
.report_error(error_to_compat_str(e
), tb
=encode_compat_str(traceback
.format_exc()))
1359 def _wait_for_video(self
, ie_result
):
1360 if (not self
.params
.get('wait_for_video')
1361 or ie_result
.get('_type', 'video') != 'video'
1362 or ie_result
.get('formats') or ie_result
.get('url')):
1365 format_dur
= lambda dur
: '%02d:%02d:%02d' % timetuple_from_msec(dur
* 1000)[:-1]
1370 self
.to_screen(msg
+ ' ' * (len(last_msg
) - len(msg
)) + '\r', skip_eol
=True)
1373 min_wait
, max_wait
= self
.params
.get('wait_for_video')
1374 diff
= try_get(ie_result
, lambda x
: x
['release_timestamp'] - time
.time())
1375 if diff
is None and ie_result
.get('live_status') == 'is_upcoming':
1376 diff
= random
.randrange(min_wait
or 0, max_wait
) if max_wait
else min_wait
1377 self
.report_warning('Release time of video is not known')
1378 elif (diff
or 0) <= 0:
1379 self
.report_warning('Video should already be available according to extracted info')
1380 diff
= min(max(diff
, min_wait
or 0), max_wait
or float('inf'))
1381 self
.to_screen(f
'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
1383 wait_till
= time
.time() + diff
1386 diff
= wait_till
- time
.time()
1389 raise ReExtractInfo('[wait] Wait period ended', expected
=True)
1390 progress(f
'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
1392 except KeyboardInterrupt:
1394 raise ReExtractInfo('[wait] Interrupted by user', expected
=True)
1395 except BaseException
as e
:
1396 if not isinstance(e
, ReExtractInfo
):
1400 @__handle_extraction_exceptions
1401 def __extract_info(self
, url
, ie
, download
, extra_info
, process
):
1402 ie_result
= ie
.extract(url
)
1403 if ie_result
is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
1405 if isinstance(ie_result
, list):
1406 # Backwards compatibility: old IE result format
1408 '_type': 'compat_list',
1409 'entries': ie_result
,
1411 if extra_info
.get('original_url'):
1412 ie_result
.setdefault('original_url', extra_info
['original_url'])
1413 self
.add_default_extra_info(ie_result
, ie
, url
)
1415 self
._wait
_for
_video
(ie_result
)
1416 return self
.process_ie_result(ie_result
, download
, extra_info
)
1420 def add_default_extra_info(self
, ie_result
, ie
, url
):
1422 self
.add_extra_info(ie_result
, {
1424 'original_url': url
,
1425 'webpage_url_basename': url_basename(url
),
1428 self
.add_extra_info(ie_result
, {
1429 'extractor': ie
.IE_NAME
,
1430 'extractor_key': ie
.ie_key(),
1433 def process_ie_result(self
, ie_result
, download
=True, extra_info
=None):
1435 Take the result of the ie(may be modified) and resolve all unresolved
1436 references (URLs, playlist items).
1438 It will also download the videos if 'download'.
1439 Returns the resolved ie_result.
1441 if extra_info
is None:
1443 result_type
= ie_result
.get('_type', 'video')
1445 if result_type
in ('url', 'url_transparent'):
1446 ie_result
['url'] = sanitize_url(ie_result
['url'])
1447 if ie_result
.get('original_url'):
1448 extra_info
.setdefault('original_url', ie_result
['original_url'])
1450 extract_flat
= self
.params
.get('extract_flat', False)
1451 if ((extract_flat
== 'in_playlist' and 'playlist' in extra_info
)
1452 or extract_flat
is True):
1453 info_copy
= ie_result
.copy()
1454 ie
= try_get(ie_result
.get('ie_key'), self
.get_info_extractor
)
1455 if ie
and not ie_result
.get('id'):
1456 info_copy
['id'] = ie
.get_temp_id(ie_result
['url'])
1457 self
.add_default_extra_info(info_copy
, ie
, ie_result
['url'])
1458 self
.add_extra_info(info_copy
, extra_info
)
1459 self
.__forced
_printings
(info_copy
, self
.prepare_filename(info_copy
), incomplete
=True)
1460 if self
.params
.get('force_write_download_archive', False):
1461 self
.record_download_archive(info_copy
)
1464 if result_type
== 'video':
1465 self
.add_extra_info(ie_result
, extra_info
)
1466 ie_result
= self
.process_video_result(ie_result
, download
=download
)
1467 additional_urls
= (ie_result
or {}).get('additional_urls')
1469 # TODO: Improve MetadataParserPP to allow setting a list
1470 if isinstance(additional_urls
, compat_str
):
1471 additional_urls
= [additional_urls
]
1473 '[info] %s: %d additional URL(s) requested' % (ie_result
['id'], len(additional_urls
)))
1474 self
.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls
))
1475 ie_result
['additional_entries'] = [
1477 url
, download
, extra_info
,
1478 force_generic_extractor
=self
.params
.get('force_generic_extractor'))
1479 for url
in additional_urls
1482 elif result_type
== 'url':
1483 # We have to add extra_info to the results because it may be
1484 # contained in a playlist
1485 return self
.extract_info(
1486 ie_result
['url'], download
,
1487 ie_key
=ie_result
.get('ie_key'),
1488 extra_info
=extra_info
)
1489 elif result_type
== 'url_transparent':
1490 # Use the information from the embedding page
1491 info
= self
.extract_info(
1492 ie_result
['url'], ie_key
=ie_result
.get('ie_key'),
1493 extra_info
=extra_info
, download
=False, process
=False)
1495 # extract_info may return None when ignoreerrors is enabled and
1496 # extraction failed with an error, don't crash and return early
1501 force_properties
= dict(
1502 (k
, v
) for k
, v
in ie_result
.items() if v
is not None)
1503 for f
in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
1504 if f
in force_properties
:
1505 del force_properties
[f
]
1506 new_result
= info
.copy()
1507 new_result
.update(force_properties
)
1509 # Extracted info may not be a video result (i.e.
1510 # info.get('_type', 'video') != video) but rather an url or
1511 # url_transparent. In such cases outer metadata (from ie_result)
1512 # should be propagated to inner one (info). For this to happen
1513 # _type of info should be overridden with url_transparent. This
1514 # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
1515 if new_result
.get('_type') == 'url':
1516 new_result
['_type'] = 'url_transparent'
1518 return self
.process_ie_result(
1519 new_result
, download
=download
, extra_info
=extra_info
)
1520 elif result_type
in ('playlist', 'multi_video'):
1521 # Protect from infinite recursion due to recursively nested playlists
1522 # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
1523 webpage_url
= ie_result
['webpage_url']
1524 if webpage_url
in self
._playlist
_urls
:
1526 '[download] Skipping already downloaded playlist: %s'
1527 % ie_result
.get('title') or ie_result
.get('id'))
1530 self
._playlist
_level
+= 1
1531 self
._playlist
_urls
.add(webpage_url
)
1532 self
._sanitize
_thumbnails
(ie_result
)
1534 return self
.__process
_playlist
(ie_result
, download
)
1536 self
._playlist
_level
-= 1
1537 if not self
._playlist
_level
:
1538 self
._playlist
_urls
.clear()
1539 elif result_type
== 'compat_list':
1540 self
.report_warning(
1541 'Extractor %s returned a compat_list result. '
1542 'It needs to be updated.' % ie_result
.get('extractor'))
1545 self
.add_extra_info(r
, {
1546 'extractor': ie_result
['extractor'],
1547 'webpage_url': ie_result
['webpage_url'],
1548 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1549 'extractor_key': ie_result
['extractor_key'],
1552 ie_result
['entries'] = [
1553 self
.process_ie_result(_fixup(r
), download
, extra_info
)
1554 for r
in ie_result
['entries']
1558 raise Exception('Invalid result type: %s' % result_type
)
1560 def _ensure_dir_exists(self
, path
):
1561 return make_dir(path
, self
.report_error
)
1563 def __process_playlist(self
, ie_result
, download
):
1564 # We process each entry in the playlist
1565 playlist
= ie_result
.get('title') or ie_result
.get('id')
1566 self
.to_screen('[download] Downloading playlist: %s' % playlist
)
1568 if 'entries' not in ie_result
:
1569 raise EntryNotInPlaylist('There are no entries')
1571 MissingEntry
= object()
1572 incomplete_entries
= bool(ie_result
.get('requested_entries'))
1573 if incomplete_entries
:
1574 def fill_missing_entries(entries
, indices
):
1575 ret
= [MissingEntry
] * max(indices
)
1576 for i
, entry
in zip(indices
, entries
):
1579 ie_result
['entries'] = fill_missing_entries(ie_result
['entries'], ie_result
['requested_entries'])
1581 playlist_results
= []
1583 playliststart
= self
.params
.get('playliststart', 1)
1584 playlistend
= self
.params
.get('playlistend')
1585 # For backwards compatibility, interpret -1 as whole list
1586 if playlistend
== -1:
1589 playlistitems_str
= self
.params
.get('playlist_items')
1590 playlistitems
= None
1591 if playlistitems_str
is not None:
1592 def iter_playlistitems(format
):
1593 for string_segment
in format
.split(','):
1594 if '-' in string_segment
:
1595 start
, end
= string_segment
.split('-')
1596 for item
in range(int(start
), int(end
) + 1):
1599 yield int(string_segment
)
1600 playlistitems
= orderedSet(iter_playlistitems(playlistitems_str
))
1602 ie_entries
= ie_result
['entries']
1604 'Downloading %d videos' if not isinstance(ie_entries
, list)
1605 else 'Collected %d videos; downloading %%d of them' % len(ie_entries
))
1607 if isinstance(ie_entries
, list):
1609 return ie_entries
[i
- 1]
1611 if not isinstance(ie_entries
, (PagedList
, LazyList
)):
1612 ie_entries
= LazyList(ie_entries
)
1615 return YoutubeDL
.__handle
_extraction
_exceptions
(
1616 lambda self
, i
: ie_entries
[i
- 1]
1620 items
= playlistitems
if playlistitems
is not None else itertools
.count(playliststart
)
1624 if playlistitems
is None and playlistend
is not None and playlistend
< i
:
1628 entry
= get_entry(i
)
1629 if entry
is MissingEntry
:
1630 raise EntryNotInPlaylist()
1631 except (IndexError, EntryNotInPlaylist
):
1632 if incomplete_entries
:
1633 raise EntryNotInPlaylist(f
'Entry {i} cannot be found')
1634 elif not playlistitems
:
1636 entries
.append(entry
)
1638 if entry
is not None:
1639 self
._match
_entry
(entry
, incomplete
=True, silent
=True)
1640 except (ExistingVideoReached
, RejectedVideoReached
):
1642 ie_result
['entries'] = entries
1644 # Save playlist_index before re-ordering
1646 ((playlistitems
[i
- 1] if playlistitems
else i
+ playliststart
- 1), entry
)
1647 for i
, entry
in enumerate(entries
, 1)
1648 if entry
is not None]
1649 n_entries
= len(entries
)
1651 if not playlistitems
and (playliststart
!= 1 or playlistend
):
1652 playlistitems
= list(range(playliststart
, playliststart
+ n_entries
))
1653 ie_result
['requested_entries'] = playlistitems
1655 _infojson_written
= False
1656 if not self
.params
.get('simulate') and self
.params
.get('allow_playlist_files', True):
1658 'playlist': playlist
,
1659 'playlist_id': ie_result
.get('id'),
1660 'playlist_title': ie_result
.get('title'),
1661 'playlist_uploader': ie_result
.get('uploader'),
1662 'playlist_uploader_id': ie_result
.get('uploader_id'),
1663 'playlist_index': 0,
1664 'n_entries': n_entries
,
1666 ie_copy
.update(dict(ie_result
))
1668 _infojson_written
= self
._write
_info
_json
(
1669 'playlist', ie_result
, self
.prepare_filename(ie_copy
, 'pl_infojson'))
1670 if _infojson_written
is None:
1672 if self
._write
_description
('playlist', ie_result
,
1673 self
.prepare_filename(ie_copy
, 'pl_description')) is None:
1675 # TODO: This should be passed to ThumbnailsConvertor if necessary
1676 self
._write
_thumbnails
('playlist', ie_copy
, self
.prepare_filename(ie_copy
, 'pl_thumbnail'))
1678 if self
.params
.get('playlistreverse', False):
1679 entries
= entries
[::-1]
1680 if self
.params
.get('playlistrandom', False):
1681 random
.shuffle(entries
)
1683 x_forwarded_for
= ie_result
.get('__x_forwarded_for_ip')
1685 self
.to_screen('[%s] playlist %s: %s' % (ie_result
['extractor'], playlist
, msg
% n_entries
))
1687 max_failures
= self
.params
.get('skip_playlist_after_errors') or float('inf')
1688 for i
, entry_tuple
in enumerate(entries
, 1):
1689 playlist_index
, entry
= entry_tuple
1690 if 'playlist-index' in self
.params
.get('compat_opts', []):
1691 playlist_index
= playlistitems
[i
- 1] if playlistitems
else i
+ playliststart
- 1
1692 self
.to_screen('[download] Downloading video %s of %s' % (i
, n_entries
))
1693 # This __x_forwarded_for_ip thing is a bit ugly but requires
1696 entry
['__x_forwarded_for_ip'] = x_forwarded_for
1698 'n_entries': n_entries
,
1699 '_last_playlist_index': max(playlistitems
) if playlistitems
else (playlistend
or n_entries
),
1700 'playlist_index': playlist_index
,
1701 'playlist_autonumber': i
,
1702 'playlist': playlist
,
1703 'playlist_id': ie_result
.get('id'),
1704 'playlist_title': ie_result
.get('title'),
1705 'playlist_uploader': ie_result
.get('uploader'),
1706 'playlist_uploader_id': ie_result
.get('uploader_id'),
1707 'extractor': ie_result
['extractor'],
1708 'webpage_url': ie_result
['webpage_url'],
1709 'webpage_url_basename': url_basename(ie_result
['webpage_url']),
1710 'extractor_key': ie_result
['extractor_key'],
1713 if self
._match
_entry
(entry
, incomplete
=True) is not None:
1716 entry_result
= self
.__process
_iterable
_entry
(entry
, download
, extra
)
1717 if not entry_result
:
1719 if failures
>= max_failures
:
1721 'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist
, failures
))
1723 playlist_results
.append(entry_result
)
1724 ie_result
['entries'] = playlist_results
1726 # Write the updated info to json
1727 if _infojson_written
and self
._write
_info
_json
(
1728 'updated playlist', ie_result
,
1729 self
.prepare_filename(ie_copy
, 'pl_infojson'), overwrite
=True) is None:
1731 self
.to_screen('[download] Finished downloading playlist: %s' % playlist
)
1734 @__handle_extraction_exceptions
1735 def __process_iterable_entry(self
, entry
, download
, extra_info
):
1736 return self
.process_ie_result(
1737 entry
, download
=download
, extra_info
=extra_info
)
1739 def _build_format_filter(self
, filter_spec
):
1740 " Returns a function to filter the formats according to the filter_spec "
1750 operator_rex
= re
.compile(r
'''(?x)\s*
1751 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
1752 (?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1753 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
1754 ''' % '|'.join(map(re
.escape
, OPERATORS
.keys())))
1755 m
= operator_rex
.fullmatch(filter_spec
)
1758 comparison_value
= int(m
.group('value'))
1760 comparison_value
= parse_filesize(m
.group('value'))
1761 if comparison_value
is None:
1762 comparison_value
= parse_filesize(m
.group('value') + 'B')
1763 if comparison_value
is None:
1765 'Invalid value %r in format specification %r' % (
1766 m
.group('value'), filter_spec
))
1767 op
= OPERATORS
[m
.group('op')]
1772 '^=': lambda attr
, value
: attr
.startswith(value
),
1773 '$=': lambda attr
, value
: attr
.endswith(value
),
1774 '*=': lambda attr
, value
: value
in attr
,
1776 str_operator_rex
= re
.compile(r
'''(?x)\s*
1777 (?P<key>[a-zA-Z0-9._-]+)\s*
1778 (?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1779 (?P<value>[a-zA-Z0-9._-]+)\s*
1780 ''' % '|'.join(map(re
.escape
, STR_OPERATORS
.keys())))
1781 m
= str_operator_rex
.fullmatch(filter_spec
)
1783 comparison_value
= m
.group('value')
1784 str_op
= STR_OPERATORS
[m
.group('op')]
1785 if m
.group('negation'):
1786 op
= lambda attr
, value
: not str_op(attr
, value
)
1791 raise SyntaxError('Invalid filter specification %r' % filter_spec
)
1794 actual_value
= f
.get(m
.group('key'))
1795 if actual_value
is None:
1796 return m
.group('none_inclusive')
1797 return op(actual_value
, comparison_value
)
1800 def _check_formats(self
, formats
):
1802 self
.to_screen('[info] Testing format %s' % f
['format_id'])
1803 path
= self
.get_output_path('temp')
1804 if not self
._ensure
_dir
_exists
(f
'{path}/'):
1806 temp_file
= tempfile
.NamedTemporaryFile(suffix
='.tmp', delete
=False, dir=path
or None)
1809 success
, _
= self
.dl(temp_file
.name
, f
, test
=True)
1810 except (DownloadError
, IOError, OSError, ValueError) + network_exceptions
:
1813 if os
.path
.exists(temp_file
.name
):
1815 os
.remove(temp_file
.name
)
1817 self
.report_warning('Unable to delete temporary file "%s"' % temp_file
.name
)
1821 self
.to_screen('[info] Unable to download format %s. Skipping...' % f
['format_id'])
1823 def _default_format_spec(self
, info_dict
, download
=True):
1826 merger
= FFmpegMergerPP(self
)
1827 return merger
.available
and merger
.can_merge()
1830 not self
.params
.get('simulate')
1834 or info_dict
.get('is_live', False)
1835 or self
.outtmpl_dict
['default'] == '-'))
1838 or self
.params
.get('allow_multiple_audio_streams', False)
1839 or 'format-spec' in self
.params
.get('compat_opts', []))
1842 'best/bestvideo+bestaudio' if prefer_best
1843 else 'bestvideo*+bestaudio/best' if not compat
1844 else 'bestvideo+bestaudio/best')
1846 def build_format_selector(self
, format_spec
):
1847 def syntax_error(note
, start
):
1849 'Invalid format specification: '
1850 '{0}\n\t{1}\n\t{2}^'.format(note
, format_spec
, ' ' * start
[1]))
1851 return SyntaxError(message
)
1853 PICKFIRST
= 'PICKFIRST'
1857 FormatSelector
= collections
.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1859 allow_multiple_streams
= {'audio': self
.params
.get('allow_multiple_audio_streams', False),
1860 'video': self
.params
.get('allow_multiple_video_streams', False)}
1862 check_formats
= self
.params
.get('check_formats') == 'selected'
1864 def _parse_filter(tokens
):
1866 for type, string
, start
, _
, _
in tokens
:
1867 if type == tokenize
.OP
and string
== ']':
1868 return ''.join(filter_parts
)
1870 filter_parts
.append(string
)
1872 def _remove_unused_ops(tokens
):
1873 # Remove operators that we don't use and join them with the surrounding strings
1874 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1875 ALLOWED_OPS
= ('/', '+', ',', '(', ')')
1876 last_string
, last_start
, last_end
, last_line
= None, None, None, None
1877 for type, string
, start
, end
, line
in tokens
:
1878 if type == tokenize
.OP
and string
== '[':
1880 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1882 yield type, string
, start
, end
, line
1883 # everything inside brackets will be handled by _parse_filter
1884 for type, string
, start
, end
, line
in tokens
:
1885 yield type, string
, start
, end
, line
1886 if type == tokenize
.OP
and string
== ']':
1888 elif type == tokenize
.OP
and string
in ALLOWED_OPS
:
1890 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1892 yield type, string
, start
, end
, line
1893 elif type in [tokenize
.NAME
, tokenize
.NUMBER
, tokenize
.OP
]:
1895 last_string
= string
1899 last_string
+= string
1901 yield tokenize
.NAME
, last_string
, last_start
, last_end
, last_line
1903 def _parse_format_selection(tokens
, inside_merge
=False, inside_choice
=False, inside_group
=False):
1905 current_selector
= None
1906 for type, string
, start
, _
, _
in tokens
:
1907 # ENCODING is only defined in python 3.x
1908 if type == getattr(tokenize
, 'ENCODING', None):
1910 elif type in [tokenize
.NAME
, tokenize
.NUMBER
]:
1911 current_selector
= FormatSelector(SINGLE
, string
, [])
1912 elif type == tokenize
.OP
:
1914 if not inside_group
:
1915 # ')' will be handled by the parentheses group
1916 tokens
.restore_last_token()
1918 elif inside_merge
and string
in ['/', ',']:
1919 tokens
.restore_last_token()
1921 elif inside_choice
and string
== ',':
1922 tokens
.restore_last_token()
1925 if not current_selector
:
1926 raise syntax_error('"," must follow a format selector', start
)
1927 selectors
.append(current_selector
)
1928 current_selector
= None
1930 if not current_selector
:
1931 raise syntax_error('"/" must follow a format selector', start
)
1932 first_choice
= current_selector
1933 second_choice
= _parse_format_selection(tokens
, inside_choice
=True)
1934 current_selector
= FormatSelector(PICKFIRST
, (first_choice
, second_choice
), [])
1936 if not current_selector
:
1937 current_selector
= FormatSelector(SINGLE
, 'best', [])
1938 format_filter
= _parse_filter(tokens
)
1939 current_selector
.filters
.append(format_filter
)
1941 if current_selector
:
1942 raise syntax_error('Unexpected "("', start
)
1943 group
= _parse_format_selection(tokens
, inside_group
=True)
1944 current_selector
= FormatSelector(GROUP
, group
, [])
1946 if not current_selector
:
1947 raise syntax_error('Unexpected "+"', start
)
1948 selector_1
= current_selector
1949 selector_2
= _parse_format_selection(tokens
, inside_merge
=True)
1951 raise syntax_error('Expected a selector', start
)
1952 current_selector
= FormatSelector(MERGE
, (selector_1
, selector_2
), [])
1954 raise syntax_error('Operator not recognized: "{0}"'.format(string
), start
)
1955 elif type == tokenize
.ENDMARKER
:
1957 if current_selector
:
1958 selectors
.append(current_selector
)
1961 def _merge(formats_pair
):
1962 format_1
, format_2
= formats_pair
1965 formats_info
.extend(format_1
.get('requested_formats', (format_1
,)))
1966 formats_info
.extend(format_2
.get('requested_formats', (format_2
,)))
1968 if not allow_multiple_streams
['video'] or not allow_multiple_streams
['audio']:
1969 get_no_more
= {'video': False, 'audio': False}
1970 for (i
, fmt_info
) in enumerate(formats_info
):
1971 if fmt_info
.get('acodec') == fmt_info
.get('vcodec') == 'none':
1974 for aud_vid
in ['audio', 'video']:
1975 if not allow_multiple_streams
[aud_vid
] and fmt_info
.get(aud_vid
[0] + 'codec') != 'none':
1976 if get_no_more
[aud_vid
]:
1979 get_no_more
[aud_vid
] = True
1981 if len(formats_info
) == 1:
1982 return formats_info
[0]
1984 video_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('vcodec') != 'none']
1985 audio_fmts
= [fmt_info
for fmt_info
in formats_info
if fmt_info
.get('acodec') != 'none']
1987 the_only_video
= video_fmts
[0] if len(video_fmts
) == 1 else None
1988 the_only_audio
= audio_fmts
[0] if len(audio_fmts
) == 1 else None
1990 output_ext
= self
.params
.get('merge_output_format')
1993 output_ext
= the_only_video
['ext']
1994 elif the_only_audio
and not video_fmts
:
1995 output_ext
= the_only_audio
['ext']
1999 filtered
= lambda *keys
: filter(None, (traverse_obj(fmt
, *keys
) for fmt
in formats_info
))
2002 'requested_formats': formats_info
,
2003 'format': '+'.join(filtered('format')),
2004 'format_id': '+'.join(filtered('format_id')),
2006 'protocol': '+'.join(map(determine_protocol
, formats_info
)),
2007 'language': '+'.join(orderedSet(filtered('language'))) or None,
2008 'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
2009 'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
2010 'tbr': sum(filtered('tbr', 'vbr', 'abr')),
2015 'width': the_only_video
.get('width'),
2016 'height': the_only_video
.get('height'),
2017 'resolution': the_only_video
.get('resolution') or self
.format_resolution(the_only_video
),
2018 'fps': the_only_video
.get('fps'),
2019 'dynamic_range': the_only_video
.get('dynamic_range'),
2020 'vcodec': the_only_video
.get('vcodec'),
2021 'vbr': the_only_video
.get('vbr'),
2022 'stretched_ratio': the_only_video
.get('stretched_ratio'),
2027 'acodec': the_only_audio
.get('acodec'),
2028 'abr': the_only_audio
.get('abr'),
2029 'asr': the_only_audio
.get('asr'),
2034 def _check_formats(formats
):
2035 if not check_formats
:
2038 yield from self
._check
_formats
(formats
)
2040 def _build_selector_function(selector
):
2041 if isinstance(selector
, list): # ,
2042 fs
= [_build_selector_function(s
) for s
in selector
]
2044 def selector_function(ctx
):
2047 return selector_function
2049 elif selector
.type == GROUP
: # ()
2050 selector_function
= _build_selector_function(selector
.selector
)
2052 elif selector
.type == PICKFIRST
: # /
2053 fs
= [_build_selector_function(s
) for s
in selector
.selector
]
2055 def selector_function(ctx
):
2057 picked_formats
= list(f(ctx
))
2059 return picked_formats
2062 elif selector
.type == MERGE
: # +
2063 selector_1
, selector_2
= map(_build_selector_function
, selector
.selector
)
2065 def selector_function(ctx
):
2066 for pair
in itertools
.product(
2067 selector_1(copy
.deepcopy(ctx
)), selector_2(copy
.deepcopy(ctx
))):
2070 elif selector
.type == SINGLE
: # atom
2071 format_spec
= selector
.selector
or 'best'
2073 # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
2074 if format_spec
== 'all':
2075 def selector_function(ctx
):
2076 yield from _check_formats(ctx
['formats'][::-1])
2077 elif format_spec
== 'mergeall':
2078 def selector_function(ctx
):
2079 formats
= list(_check_formats(ctx
['formats']))
2082 merged_format
= formats
[-1]
2083 for f
in formats
[-2::-1]:
2084 merged_format
= _merge((merged_format
, f
))
2088 format_fallback
, format_reverse
, format_idx
= False, True, 1
2090 r
'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
2092 if mobj
is not None:
2093 format_idx
= int_or_none(mobj
.group('n'), default
=1)
2094 format_reverse
= mobj
.group('bw')[0] == 'b'
2095 format_type
= (mobj
.group('type') or [None])[0]
2096 not_format_type
= {'v': 'a', 'a': 'v'}
.get(format_type
)
2097 format_modified
= mobj
.group('mod') is not None
2099 format_fallback
= not format_type
and not format_modified
# for b, w
2101 (lambda f
: f
.get('%scodec' % format_type
) != 'none')
2102 if format_type
and format_modified
# bv*, ba*, wv*, wa*
2103 else (lambda f
: f
.get('%scodec' % not_format_type
) == 'none')
2104 if format_type
# bv, ba, wv, wa
2105 else (lambda f
: f
.get('vcodec') != 'none' and f
.get('acodec') != 'none')
2106 if not format_modified
# b, w
2107 else lambda f
: True) # b*, w*
2108 filter_f
= lambda f
: _filter_f(f
) and (
2109 f
.get('vcodec') != 'none' or f
.get('acodec') != 'none')
2111 if format_spec
in self
._format
_selection
_exts
['audio']:
2112 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none'
2113 elif format_spec
in self
._format
_selection
_exts
['video']:
2114 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') != 'none' and f
.get('vcodec') != 'none'
2115 elif format_spec
in self
._format
_selection
_exts
['storyboards']:
2116 filter_f
= lambda f
: f
.get('ext') == format_spec
and f
.get('acodec') == 'none' and f
.get('vcodec') == 'none'
2118 filter_f
= lambda f
: f
.get('format_id') == format_spec
# id
2120 def selector_function(ctx
):
2121 formats
= list(ctx
['formats'])
2122 matches
= list(filter(filter_f
, formats
)) if filter_f
is not None else formats
2123 if format_fallback
and ctx
['incomplete_formats'] and not matches
:
2124 # for extractors with incomplete formats (audio only (soundcloud)
2125 # or video only (imgur)) best/worst will fallback to
2126 # best/worst {video,audio}-only format
2128 matches
= LazyList(_check_formats(matches
[::-1 if format_reverse
else 1]))
2130 yield matches
[format_idx
- 1]
2134 filters
= [self
._build
_format
_filter
(f
) for f
in selector
.filters
]
2136 def final_selector(ctx
):
2137 ctx_copy
= copy
.deepcopy(ctx
)
2138 for _filter
in filters
:
2139 ctx_copy
['formats'] = list(filter(_filter
, ctx_copy
['formats']))
2140 return selector_function(ctx_copy
)
2141 return final_selector
2143 stream
= io
.BytesIO(format_spec
.encode('utf-8'))
2145 tokens
= list(_remove_unused_ops(compat_tokenize_tokenize(stream
.readline
)))
2146 except tokenize
.TokenError
:
2147 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec
)))
2149 class TokenIterator(object):
2150 def __init__(self
, tokens
):
2151 self
.tokens
= tokens
2158 if self
.counter
>= len(self
.tokens
):
2159 raise StopIteration()
2160 value
= self
.tokens
[self
.counter
]
2166 def restore_last_token(self
):
2169 parsed_selector
= _parse_format_selection(iter(TokenIterator(tokens
)))
2170 return _build_selector_function(parsed_selector
)
2172 def _calc_headers(self
, info_dict
):
2173 res
= std_headers
.copy()
2175 add_headers
= info_dict
.get('http_headers')
2177 res
.update(add_headers
)
2179 cookies
= self
._calc
_cookies
(info_dict
)
2181 res
['Cookie'] = cookies
2183 if 'X-Forwarded-For' not in res
:
2184 x_forwarded_for_ip
= info_dict
.get('__x_forwarded_for_ip')
2185 if x_forwarded_for_ip
:
2186 res
['X-Forwarded-For'] = x_forwarded_for_ip
2190 def _calc_cookies(self
, info_dict
):
2191 pr
= sanitized_Request(info_dict
['url'])
2192 self
.cookiejar
.add_cookie_header(pr
)
2193 return pr
.get_header('Cookie')
2195 def _sort_thumbnails(self
, thumbnails
):
2196 thumbnails
.sort(key
=lambda t
: (
2197 t
.get('preference') if t
.get('preference') is not None else -1,
2198 t
.get('width') if t
.get('width') is not None else -1,
2199 t
.get('height') if t
.get('height') is not None else -1,
2200 t
.get('id') if t
.get('id') is not None else '',
2203 def _sanitize_thumbnails(self
, info_dict
):
2204 thumbnails
= info_dict
.get('thumbnails')
2205 if thumbnails
is None:
2206 thumbnail
= info_dict
.get('thumbnail')
2208 info_dict
['thumbnails'] = thumbnails
= [{'url': thumbnail}
]
2212 def check_thumbnails(thumbnails
):
2213 for t
in thumbnails
:
2214 self
.to_screen(f
'[info] Testing thumbnail {t["id"]}')
2216 self
.urlopen(HEADRequest(t
['url']))
2217 except network_exceptions
as err
:
2218 self
.to_screen(f
'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
2222 self
._sort
_thumbnails
(thumbnails
)
2223 for i
, t
in enumerate(thumbnails
):
2224 if t
.get('id') is None:
2226 if t
.get('width') and t
.get('height'):
2227 t
['resolution'] = '%dx%d' % (t
['width'], t
['height'])
2228 t
['url'] = sanitize_url(t
['url'])
2230 if self
.params
.get('check_formats') is True:
2231 info_dict
['thumbnails'] = LazyList(check_thumbnails(thumbnails
[::-1]), reverse
=True)
2233 info_dict
['thumbnails'] = thumbnails
2235 def process_video_result(self
, info_dict
, download
=True):
2236 assert info_dict
.get('_type', 'video') == 'video'
2238 if 'id' not in info_dict
:
2239 raise ExtractorError('Missing "id" field in extractor result')
2240 if 'title' not in info_dict
:
2241 raise ExtractorError('Missing "title" field in extractor result',
2242 video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2244 def report_force_conversion(field
, field_not
, conversion
):
2245 self
.report_warning(
2246 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
2247 % (field
, field_not
, conversion
))
2249 def sanitize_string_field(info
, string_field
):
2250 field
= info
.get(string_field
)
2251 if field
is None or isinstance(field
, compat_str
):
2253 report_force_conversion(string_field
, 'a string', 'string')
2254 info
[string_field
] = compat_str(field
)
2256 def sanitize_numeric_fields(info
):
2257 for numeric_field
in self
._NUMERIC
_FIELDS
:
2258 field
= info
.get(numeric_field
)
2259 if field
is None or isinstance(field
, compat_numeric_types
):
2261 report_force_conversion(numeric_field
, 'numeric', 'int')
2262 info
[numeric_field
] = int_or_none(field
)
2264 sanitize_string_field(info_dict
, 'id')
2265 sanitize_numeric_fields(info_dict
)
2267 if 'playlist' not in info_dict
:
2268 # It isn't part of a playlist
2269 info_dict
['playlist'] = None
2270 info_dict
['playlist_index'] = None
2272 self
._sanitize
_thumbnails
(info_dict
)
2274 thumbnail
= info_dict
.get('thumbnail')
2275 thumbnails
= info_dict
.get('thumbnails')
2277 info_dict
['thumbnail'] = sanitize_url(thumbnail
)
2279 info_dict
['thumbnail'] = thumbnails
[-1]['url']
2281 if info_dict
.get('display_id') is None and 'id' in info_dict
:
2282 info_dict
['display_id'] = info_dict
['id']
2284 if info_dict
.get('duration') is not None:
2285 info_dict
['duration_string'] = formatSeconds(info_dict
['duration'])
2287 for ts_key
, date_key
in (
2288 ('timestamp', 'upload_date'),
2289 ('release_timestamp', 'release_date'),
2291 if info_dict
.get(date_key
) is None and info_dict
.get(ts_key
) is not None:
2292 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
2293 # see http://bugs.python.org/issue1646728)
2295 upload_date
= datetime
.datetime
.utcfromtimestamp(info_dict
[ts_key
])
2296 info_dict
[date_key
] = upload_date
.strftime('%Y%m%d')
2297 except (ValueError, OverflowError, OSError):
2300 live_keys
= ('is_live', 'was_live')
2301 live_status
= info_dict
.get('live_status')
2302 if live_status
is None:
2303 for key
in live_keys
:
2304 if info_dict
.get(key
) is False:
2306 if info_dict
.get(key
):
2309 if all(info_dict
.get(key
) is False for key
in live_keys
):
2310 live_status
= 'not_live'
2312 info_dict
['live_status'] = live_status
2313 for key
in live_keys
:
2314 if info_dict
.get(key
) is None:
2315 info_dict
[key
] = (live_status
== key
)
2317 # Auto generate title fields corresponding to the *_number fields when missing
2318 # in order to always have clean titles. This is very common for TV series.
2319 for field
in ('chapter', 'season', 'episode'):
2320 if info_dict
.get('%s_number' % field
) is not None and not info_dict
.get(field
):
2321 info_dict
[field
] = '%s %d' % (field
.capitalize(), info_dict
['%s_number' % field
])
2323 for cc_kind
in ('subtitles', 'automatic_captions'):
2324 cc
= info_dict
.get(cc_kind
)
2326 for _
, subtitle
in cc
.items():
2327 for subtitle_format
in subtitle
:
2328 if subtitle_format
.get('url'):
2329 subtitle_format
['url'] = sanitize_url(subtitle_format
['url'])
2330 if subtitle_format
.get('ext') is None:
2331 subtitle_format
['ext'] = determine_ext(subtitle_format
['url']).lower()
2333 automatic_captions
= info_dict
.get('automatic_captions')
2334 subtitles
= info_dict
.get('subtitles')
2336 info_dict
['requested_subtitles'] = self
.process_subtitles(
2337 info_dict
['id'], subtitles
, automatic_captions
)
2339 if info_dict
.get('formats') is None:
2340 # There's only one format available
2341 formats
= [info_dict
]
2343 formats
= info_dict
['formats']
2345 info_dict
['__has_drm'] = any(f
.get('has_drm') for f
in formats
)
2346 if not self
.params
.get('allow_unplayable_formats'):
2347 formats
= [f
for f
in formats
if not f
.get('has_drm')]
2350 self
.raise_no_formats(info_dict
)
2352 def is_wellformed(f
):
2355 self
.report_warning(
2356 '"url" field is missing or empty - skipping format, '
2357 'there is an error in extractor')
2359 if isinstance(url
, bytes):
2360 sanitize_string_field(f
, 'url')
2363 # Filter out malformed formats for better extraction robustness
2364 formats
= list(filter(is_wellformed
, formats
))
2368 # We check that all the formats have the format and format_id fields
2369 for i
, format
in enumerate(formats
):
2370 sanitize_string_field(format
, 'format_id')
2371 sanitize_numeric_fields(format
)
2372 format
['url'] = sanitize_url(format
['url'])
2373 if not format
.get('format_id'):
2374 format
['format_id'] = compat_str(i
)
2376 # Sanitize format_id from characters used in format selector expression
2377 format
['format_id'] = re
.sub(r
'[\s,/+\[\]()]', '_', format
['format_id'])
2378 format_id
= format
['format_id']
2379 if format_id
not in formats_dict
:
2380 formats_dict
[format_id
] = []
2381 formats_dict
[format_id
].append(format
)
2383 # Make sure all formats have unique format_id
2384 common_exts
= set(itertools
.chain(*self
._format
_selection
_exts
.values()))
2385 for format_id
, ambiguous_formats
in formats_dict
.items():
2386 ambigious_id
= len(ambiguous_formats
) > 1
2387 for i
, format
in enumerate(ambiguous_formats
):
2389 format
['format_id'] = '%s-%d' % (format_id
, i
)
2390 if format
.get('ext') is None:
2391 format
['ext'] = determine_ext(format
['url']).lower()
2392 # Ensure there is no conflict between id and ext in format selection
2393 # See https://github.com/yt-dlp/yt-dlp/issues/1282
2394 if format
['format_id'] != format
['ext'] and format
['format_id'] in common_exts
:
2395 format
['format_id'] = 'f%s' % format
['format_id']
2397 for i
, format
in enumerate(formats
):
2398 if format
.get('format') is None:
2399 format
['format'] = '{id} - {res}{note}'.format(
2400 id=format
['format_id'],
2401 res
=self
.format_resolution(format
),
2402 note
=format_field(format
, 'format_note', ' (%s)'),
2404 if format
.get('protocol') is None:
2405 format
['protocol'] = determine_protocol(format
)
2406 if format
.get('resolution') is None:
2407 format
['resolution'] = self
.format_resolution(format
, default
=None)
2408 if format
.get('dynamic_range') is None and format
.get('vcodec') != 'none':
2409 format
['dynamic_range'] = 'SDR'
2410 if (info_dict
.get('duration') and format
.get('tbr')
2411 and not format
.get('filesize') and not format
.get('filesize_approx')):
2412 format
['filesize_approx'] = info_dict
['duration'] * format
['tbr'] * (1024 / 8)
2414 # Add HTTP headers, so that external programs can use them from the
2416 full_format_info
= info_dict
.copy()
2417 full_format_info
.update(format
)
2418 format
['http_headers'] = self
._calc
_headers
(full_format_info
)
2419 # Remove private housekeeping stuff
2420 if '__x_forwarded_for_ip' in info_dict
:
2421 del info_dict
['__x_forwarded_for_ip']
2423 # TODO Central sorting goes here
2425 if self
.params
.get('check_formats') is True:
2426 formats
= LazyList(self
._check
_formats
(formats
[::-1]), reverse
=True)
2428 if not formats
or formats
[0] is not info_dict
:
2429 # only set the 'formats' fields if the original info_dict list them
2430 # otherwise we end up with a circular reference, the first (and unique)
2431 # element in the 'formats' field in info_dict is info_dict itself,
2432 # which can't be exported to json
2433 info_dict
['formats'] = formats
2435 info_dict
, _
= self
.pre_process(info_dict
)
2437 # The pre-processors may have modified the formats
2438 formats
= info_dict
.get('formats', [info_dict
])
2440 if self
.params
.get('list_thumbnails'):
2441 self
.list_thumbnails(info_dict
)
2442 if self
.params
.get('listformats'):
2443 if not info_dict
.get('formats') and not info_dict
.get('url'):
2444 self
.to_screen('%s has no formats' % info_dict
['id'])
2446 self
.list_formats(info_dict
)
2447 if self
.params
.get('listsubtitles'):
2448 if 'automatic_captions' in info_dict
:
2449 self
.list_subtitles(
2450 info_dict
['id'], automatic_captions
, 'automatic captions')
2451 self
.list_subtitles(info_dict
['id'], subtitles
, 'subtitles')
2452 list_only
= self
.params
.get('simulate') is None and (
2453 self
.params
.get('list_thumbnails') or self
.params
.get('listformats') or self
.params
.get('listsubtitles'))
2455 # Without this printing, -F --print-json will not work
2456 self
.__forced
_printings
(info_dict
, self
.prepare_filename(info_dict
), incomplete
=True)
2459 format_selector
= self
.format_selector
2460 if format_selector
is None:
2461 req_format
= self
._default
_format
_spec
(info_dict
, download
=download
)
2462 self
.write_debug('Default format spec: %s' % req_format
)
2463 format_selector
= self
.build_format_selector(req_format
)
2465 # While in format selection we may need to have an access to the original
2466 # format set in order to calculate some metrics or do some processing.
2467 # For now we need to be able to guess whether original formats provided
2468 # by extractor are incomplete or not (i.e. whether extractor provides only
2469 # video-only or audio-only formats) for proper formats selection for
2470 # extractors with such incomplete formats (see
2471 # https://github.com/ytdl-org/youtube-dl/pull/5556).
2472 # Since formats may be filtered during format selection and may not match
2473 # the original formats the results may be incorrect. Thus original formats
2474 # or pre-calculated metrics should be passed to format selection routines
2476 # We will pass a context object containing all necessary additional data
2477 # instead of just formats.
2478 # This fixes incorrect format selection issue (see
2479 # https://github.com/ytdl-org/youtube-dl/issues/10083).
2480 incomplete_formats
= (
2481 # All formats are video-only or
2482 all(f
.get('vcodec') != 'none' and f
.get('acodec') == 'none' for f
in formats
)
2483 # all formats are audio-only
2484 or all(f
.get('vcodec') == 'none' and f
.get('acodec') != 'none' for f
in formats
))
2488 'incomplete_formats': incomplete_formats
,
2491 formats_to_download
= list(format_selector(ctx
))
2492 if not formats_to_download
:
2493 if not self
.params
.get('ignore_no_formats_error'):
2494 raise ExtractorError('Requested format is not available', expected
=True,
2495 video_id
=info_dict
['id'], ie
=info_dict
['extractor'])
2497 self
.report_warning('Requested format is not available')
2498 # Process what we can, even without any available formats.
2499 self
.process_info(dict(info_dict
))
2502 '[info] %s: Downloading %d format(s): %s' % (
2503 info_dict
['id'], len(formats_to_download
),
2504 ", ".join([f
['format_id'] for f
in formats_to_download
])))
2505 for fmt
in formats_to_download
:
2506 new_info
= dict(info_dict
)
2507 # Save a reference to the original info_dict so that it can be modified in process_info if needed
2508 new_info
['__original_infodict'] = info_dict
2509 new_info
.update(fmt
)
2510 self
.process_info(new_info
)
2511 # We update the info dict with the selected best quality format (backwards compatibility)
2512 if formats_to_download
:
2513 info_dict
.update(formats_to_download
[-1])
2516 def process_subtitles(self
, video_id
, normal_subtitles
, automatic_captions
):
2517 """Select the requested subtitles and their format"""
2519 if normal_subtitles
and self
.params
.get('writesubtitles'):
2520 available_subs
.update(normal_subtitles
)
2521 if automatic_captions
and self
.params
.get('writeautomaticsub'):
2522 for lang
, cap_info
in automatic_captions
.items():
2523 if lang
not in available_subs
:
2524 available_subs
[lang
] = cap_info
2526 if (not self
.params
.get('writesubtitles') and not
2527 self
.params
.get('writeautomaticsub') or not
2531 all_sub_langs
= available_subs
.keys()
2532 if self
.params
.get('allsubtitles', False):
2533 requested_langs
= all_sub_langs
2534 elif self
.params
.get('subtitleslangs', False):
2535 # A list is used so that the order of languages will be the same as
2536 # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
2537 requested_langs
= []
2538 for lang_re
in self
.params
.get('subtitleslangs'):
2539 if lang_re
== 'all':
2540 requested_langs
.extend(all_sub_langs
)
2542 discard
= lang_re
[0] == '-'
2544 lang_re
= lang_re
[1:]
2545 current_langs
= filter(re
.compile(lang_re
+ '$').match
, all_sub_langs
)
2547 for lang
in current_langs
:
2548 while lang
in requested_langs
:
2549 requested_langs
.remove(lang
)
2551 requested_langs
.extend(current_langs
)
2552 requested_langs
= orderedSet(requested_langs
)
2553 elif 'en' in available_subs
:
2554 requested_langs
= ['en']
2556 requested_langs
= [list(all_sub_langs
)[0]]
2558 self
.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs
))
2560 formats_query
= self
.params
.get('subtitlesformat', 'best')
2561 formats_preference
= formats_query
.split('/') if formats_query
else []
2563 for lang
in requested_langs
:
2564 formats
= available_subs
.get(lang
)
2566 self
.report_warning('%s subtitles not available for %s' % (lang
, video_id
))
2568 for ext
in formats_preference
:
2572 matches
= list(filter(lambda f
: f
['ext'] == ext
, formats
))
2578 self
.report_warning(
2579 'No subtitle format found matching "%s" for language %s, '
2580 'using %s' % (formats_query
, lang
, f
['ext']))
2584 def __forced_printings(self
, info_dict
, filename
, incomplete
):
2585 def print_mandatory(field
, actual_field
=None):
2586 if actual_field
is None:
2587 actual_field
= field
2588 if (self
.params
.get('force%s' % field
, False)
2589 and (not incomplete
or info_dict
.get(actual_field
) is not None)):
2590 self
.to_stdout(info_dict
[actual_field
])
2592 def print_optional(field
):
2593 if (self
.params
.get('force%s' % field
, False)
2594 and info_dict
.get(field
) is not None):
2595 self
.to_stdout(info_dict
[field
])
2597 info_dict
= info_dict
.copy()
2598 if filename
is not None:
2599 info_dict
['filename'] = filename
2600 if info_dict
.get('requested_formats') is not None:
2601 # For RTMP URLs, also include the playpath
2602 info_dict
['urls'] = '\n'.join(f
['url'] + f
.get('play_path', '') for f
in info_dict
['requested_formats'])
2603 elif 'url' in info_dict
:
2604 info_dict
['urls'] = info_dict
['url'] + info_dict
.get('play_path', '')
2606 if self
.params
.get('forceprint') or self
.params
.get('forcejson'):
2607 self
.post_extract(info_dict
)
2608 for tmpl
in self
.params
.get('forceprint', []):
2609 mobj
= re
.match(r
'\w+(=?)$', tmpl
)
2610 if mobj
and mobj
.group(1):
2611 tmpl
= f
'{tmpl[:-1]} = %({tmpl[:-1]})s'
2613 tmpl
= '%({})s'.format(tmpl
)
2614 self
.to_stdout(self
.evaluate_outtmpl(tmpl
, info_dict
))
2616 print_mandatory('title')
2617 print_mandatory('id')
2618 print_mandatory('url', 'urls')
2619 print_optional('thumbnail')
2620 print_optional('description')
2621 print_optional('filename')
2622 if self
.params
.get('forceduration') and info_dict
.get('duration') is not None:
2623 self
.to_stdout(formatSeconds(info_dict
['duration']))
2624 print_mandatory('format')
2626 if self
.params
.get('forcejson'):
2627 self
.to_stdout(json
.dumps(self
.sanitize_info(info_dict
)))
2629 def dl(self
, name
, info
, subtitle
=False, test
=False):
2630 if not info
.get('url'):
2631 self
.raise_no_formats(info
, True)
2634 verbose
= self
.params
.get('verbose')
2637 'quiet': self
.params
.get('quiet') or not verbose
,
2639 'noprogress': not verbose
,
2641 'skip_unavailable_fragments': False,
2642 'keep_fragments': False,
2644 '_no_ytdl_file': True,
2647 params
= self
.params
2648 fd
= get_suitable_downloader(info
, params
, to_stdout
=(name
== '-'))(self
, params
)
2650 for ph
in self
._progress
_hooks
:
2651 fd
.add_progress_hook(ph
)
2652 urls
= '", "'.join([f
['url'] for f
in info
.get('requested_formats', [])] or [info
['url']])
2653 self
.write_debug('Invoking downloader on "%s"' % urls
)
2655 new_info
= copy
.deepcopy(self
._copy
_infodict
(info
))
2656 if new_info
.get('http_headers') is None:
2657 new_info
['http_headers'] = self
._calc
_headers
(new_info
)
2658 return fd
.download(name
, new_info
, subtitle
)
2660 def process_info(self
, info_dict
):
2661 """Process a single resolved IE result."""
2663 assert info_dict
.get('_type', 'video') == 'video'
2665 max_downloads
= self
.params
.get('max_downloads')
2666 if max_downloads
is not None:
2667 if self
._num
_downloads
>= int(max_downloads
):
2668 raise MaxDownloadsReached()
2670 # TODO: backward compatibility, to be removed
2671 info_dict
['fulltitle'] = info_dict
['title']
2673 if 'format' not in info_dict
and 'ext' in info_dict
:
2674 info_dict
['format'] = info_dict
['ext']
2676 if self
._match
_entry
(info_dict
) is not None:
2679 self
.post_extract(info_dict
)
2680 self
._num
_downloads
+= 1
2682 # info_dict['_filename'] needs to be set for backward compatibility
2683 info_dict
['_filename'] = full_filename
= self
.prepare_filename(info_dict
, warn
=True)
2684 temp_filename
= self
.prepare_filename(info_dict
, 'temp')
2688 self
.__forced
_printings
(info_dict
, full_filename
, incomplete
=('format' not in info_dict
))
2690 if self
.params
.get('simulate'):
2691 if self
.params
.get('force_write_download_archive', False):
2692 self
.record_download_archive(info_dict
)
2693 # Do nothing else if in simulate mode
2696 if full_filename
is None:
2698 if not self
._ensure
_dir
_exists
(encodeFilename(full_filename
)):
2700 if not self
._ensure
_dir
_exists
(encodeFilename(temp_filename
)):
2703 if self
._write
_description
('video', info_dict
,
2704 self
.prepare_filename(info_dict
, 'description')) is None:
2707 sub_files
= self
._write
_subtitles
(info_dict
, temp_filename
)
2708 if sub_files
is None:
2710 files_to_move
.update(dict(sub_files
))
2712 thumb_files
= self
._write
_thumbnails
(
2713 'video', info_dict
, temp_filename
, self
.prepare_filename(info_dict
, 'thumbnail'))
2714 if thumb_files
is None:
2716 files_to_move
.update(dict(thumb_files
))
2718 infofn
= self
.prepare_filename(info_dict
, 'infojson')
2719 _infojson_written
= self
._write
_info
_json
('video', info_dict
, infofn
)
2720 if _infojson_written
:
2721 info_dict
['infojson_filename'] = infofn
2722 # For backward compatability, even though it was a private field
2723 info_dict
['__infojson_filename'] = infofn
2724 elif _infojson_written
is None:
2727 # Note: Annotations are deprecated
2729 if self
.params
.get('writeannotations', False):
2730 annofn
= self
.prepare_filename(info_dict
, 'annotation')
2732 if not self
._ensure
_dir
_exists
(encodeFilename(annofn
)):
2734 if not self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(annofn
)):
2735 self
.to_screen('[info] Video annotations are already present')
2736 elif not info_dict
.get('annotations'):
2737 self
.report_warning('There are no annotations to write.')
2740 self
.to_screen('[info] Writing video annotations to: ' + annofn
)
2741 with io
.open(encodeFilename(annofn
), 'w', encoding
='utf-8') as annofile
:
2742 annofile
.write(info_dict
['annotations'])
2743 except (KeyError, TypeError):
2744 self
.report_warning('There are no annotations to write.')
2745 except (OSError, IOError):
2746 self
.report_error('Cannot write annotations file: ' + annofn
)
2749 # Write internet shortcut files
2750 def _write_link_file(link_type
):
2751 if 'webpage_url' not in info_dict
:
2752 self
.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information')
2754 linkfn
= replace_extension(self
.prepare_filename(info_dict
, 'link'), link_type
, info_dict
.get('ext'))
2755 if not self
._ensure
_dir
_exists
(encodeFilename(linkfn
)):
2757 if self
.params
.get('overwrites', True) and os
.path
.exists(encodeFilename(linkfn
)):
2758 self
.to_screen(f
'[info] Internet shortcut (.{link_type}) is already present')
2761 self
.to_screen(f
'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
2762 with io
.open(encodeFilename(to_high_limit_path(linkfn
)), 'w', encoding
='utf-8',
2763 newline
='\r\n' if link_type
== 'url' else '\n') as linkfile
:
2764 template_vars
= {'url': iri_to_uri(info_dict['webpage_url'])}
2765 if link_type
== 'desktop':
2766 template_vars
['filename'] = linkfn
[:-(len(link_type
) + 1)]
2767 linkfile
.write(LINK_TEMPLATES
[link_type
] % template_vars
)
2768 except (OSError, IOError):
2769 self
.report_error(f
'Cannot write internet shortcut {linkfn}')
2774 'url': self
.params
.get('writeurllink'),
2775 'webloc': self
.params
.get('writewebloclink'),
2776 'desktop': self
.params
.get('writedesktoplink'),
2778 if self
.params
.get('writelink'):
2779 link_type
= ('webloc' if sys
.platform
== 'darwin'
2780 else 'desktop' if sys
.platform
.startswith('linux')
2782 write_links
[link_type
] = True
2784 if any(should_write
and not _write_link_file(link_type
)
2785 for link_type
, should_write
in write_links
.items()):
2789 info_dict
, files_to_move
= self
.pre_process(info_dict
, 'before_dl', files_to_move
)
2790 except PostProcessingError
as err
:
2791 self
.report_error('Preprocessing: %s' % str(err
))
2794 must_record_download_archive
= False
2795 if self
.params
.get('skip_download', False):
2796 info_dict
['filepath'] = temp_filename
2797 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
2798 info_dict
['__files_to_move'] = files_to_move
2799 info_dict
= self
.run_pp(MoveFilesAfterDownloadPP(self
, False), info_dict
)
2802 info_dict
.setdefault('__postprocessors', [])
2805 def existing_file(*filepaths
):
2806 ext
= info_dict
.get('ext')
2807 final_ext
= self
.params
.get('final_ext', ext
)
2809 for file in orderedSet(filepaths
):
2810 if final_ext
!= ext
:
2811 converted
= replace_extension(file, final_ext
, ext
)
2812 if os
.path
.exists(encodeFilename(converted
)):
2813 existing_files
.append(converted
)
2814 if os
.path
.exists(encodeFilename(file)):
2815 existing_files
.append(file)
2817 if not existing_files
or self
.params
.get('overwrites', False):
2818 for file in orderedSet(existing_files
):
2819 self
.report_file_delete(file)
2820 os
.remove(encodeFilename(file))
2823 info_dict
['ext'] = os
.path
.splitext(existing_files
[0])[1][1:]
2824 return existing_files
[0]
2827 if info_dict
.get('requested_formats') is not None:
2829 def compatible_formats(formats
):
2830 # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
2831 video_formats
= [format
for format
in formats
if format
.get('vcodec') != 'none']
2832 audio_formats
= [format
for format
in formats
if format
.get('acodec') != 'none']
2833 if len(video_formats
) > 2 or len(audio_formats
) > 2:
2837 exts
= set(format
.get('ext') for format
in formats
)
2839 set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
2842 for ext_sets
in COMPATIBLE_EXTS
:
2843 if ext_sets
.issuperset(exts
):
2845 # TODO: Check acodec/vcodec
2848 requested_formats
= info_dict
['requested_formats']
2849 old_ext
= info_dict
['ext']
2850 if self
.params
.get('merge_output_format') is None:
2851 if not compatible_formats(requested_formats
):
2852 info_dict
['ext'] = 'mkv'
2853 self
.report_warning(
2854 'Requested formats are incompatible for merge and will be merged into mkv')
2855 if (info_dict
['ext'] == 'webm'
2856 and info_dict
.get('thumbnails')
2857 # check with type instead of pp_key, __name__, or isinstance
2858 # since we dont want any custom PPs to trigger this
2859 and any(type(pp
) == EmbedThumbnailPP
for pp
in self
._pps
['post_process'])):
2860 info_dict
['ext'] = 'mkv'
2861 self
.report_warning(
2862 'webm doesn\'t support embedding a thumbnail, mkv will be used')
2863 new_ext
= info_dict
['ext']
2865 def correct_ext(filename
, ext
=new_ext
):
2868 filename_real_ext
= os
.path
.splitext(filename
)[1][1:]
2870 os
.path
.splitext(filename
)[0]
2871 if filename_real_ext
in (old_ext
, new_ext
)
2873 return '%s.%s' % (filename_wo_ext
, ext
)
2875 # Ensure filename always has a correct extension for successful merge
2876 full_filename
= correct_ext(full_filename
)
2877 temp_filename
= correct_ext(temp_filename
)
2878 dl_filename
= existing_file(full_filename
, temp_filename
)
2879 info_dict
['__real_download'] = False
2881 if dl_filename
is not None:
2882 self
.report_file_already_downloaded(dl_filename
)
2883 elif get_suitable_downloader(info_dict
, self
.params
, to_stdout
=temp_filename
== '-'):
2884 info_dict
['url'] = '\n'.join(f
['url'] for f
in requested_formats
)
2885 success
, real_download
= self
.dl(temp_filename
, info_dict
)
2886 info_dict
['__real_download'] = real_download
2889 merger
= FFmpegMergerPP(self
)
2890 if self
.params
.get('allow_unplayable_formats'):
2891 self
.report_warning(
2892 'You have requested merging of multiple formats '
2893 'while also allowing unplayable formats to be downloaded. '
2894 'The formats won\'t be merged to prevent data corruption.')
2895 elif not merger
.available
:
2896 self
.report_warning(
2897 'You have requested merging of multiple formats but ffmpeg is not installed. '
2898 'The formats won\'t be merged.')
2900 if temp_filename
== '-':
2901 reason
= ('using a downloader other than ffmpeg' if FFmpegFD
.can_merge_formats(info_dict
)
2902 else 'but the formats are incompatible for simultaneous download' if merger
.available
2903 else 'but ffmpeg is not installed')
2904 self
.report_warning(
2905 f
'You have requested downloading multiple formats to stdout {reason}. '
2906 'The formats will be streamed one after the other')
2907 fname
= temp_filename
2908 for f
in requested_formats
:
2909 new_info
= dict(info_dict
)
2910 del new_info
['requested_formats']
2912 if temp_filename
!= '-':
2913 fname
= prepend_extension(
2914 correct_ext(temp_filename
, new_info
['ext']),
2915 'f%s' % f
['format_id'], new_info
['ext'])
2916 if not self
._ensure
_dir
_exists
(fname
):
2918 f
['filepath'] = fname
2919 downloaded
.append(fname
)
2920 partial_success
, real_download
= self
.dl(fname
, new_info
)
2921 info_dict
['__real_download'] = info_dict
['__real_download'] or real_download
2922 success
= success
and partial_success
2923 if merger
.available
and not self
.params
.get('allow_unplayable_formats'):
2924 info_dict
['__postprocessors'].append(merger
)
2925 info_dict
['__files_to_merge'] = downloaded
2926 # Even if there were no downloads, it is being merged only now
2927 info_dict
['__real_download'] = True
2929 for file in downloaded
:
2930 files_to_move
[file] = None
2932 # Just a single file
2933 dl_filename
= existing_file(full_filename
, temp_filename
)
2934 if dl_filename
is None or dl_filename
== temp_filename
:
2935 # dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
2936 # So we should try to resume the download
2937 success
, real_download
= self
.dl(temp_filename
, info_dict
)
2938 info_dict
['__real_download'] = real_download
2940 self
.report_file_already_downloaded(dl_filename
)
2942 dl_filename
= dl_filename
or temp_filename
2943 info_dict
['__finaldir'] = os
.path
.dirname(os
.path
.abspath(encodeFilename(full_filename
)))
2945 except network_exceptions
as err
:
2946 self
.report_error('unable to download video data: %s' % error_to_compat_str(err
))
2948 except (OSError, IOError) as err
:
2949 raise UnavailableVideoError(err
)
2950 except (ContentTooShortError
, ) as err
:
2951 self
.report_error('content too short (expected %s bytes and served %s)' % (err
.expected
, err
.downloaded
))
2954 if success
and full_filename
!= '-':
2958 fixup_policy
= self
.params
.get('fixup')
2959 vid
= info_dict
['id']
2961 if fixup_policy
in ('ignore', 'never'):
2963 elif fixup_policy
== 'warn':
2965 elif fixup_policy
!= 'force':
2966 assert fixup_policy
in ('detect_or_warn', None)
2967 if not info_dict
.get('__real_download'):
2970 def ffmpeg_fixup(cndn
, msg
, cls
):
2974 self
.report_warning(f
'{vid}: {msg}')
2978 info_dict
['__postprocessors'].append(pp
)
2980 self
.report_warning(f
'{vid}: {msg}. Install ffmpeg to fix this automatically')
2982 stretched_ratio
= info_dict
.get('stretched_ratio')
2984 stretched_ratio
not in (1, None),
2985 f
'Non-uniform pixel ratio {stretched_ratio}',
2986 FFmpegFixupStretchedPP
)
2989 (info_dict
.get('requested_formats') is None
2990 and info_dict
.get('container') == 'm4a_dash'
2991 and info_dict
.get('ext') == 'm4a'),
2992 'writing DASH m4a. Only some players support this container',
2995 downloader
= get_suitable_downloader(info_dict
, self
.params
) if 'protocol' in info_dict
else None
2996 downloader
= downloader
.__name
__ if downloader
else None
2997 ffmpeg_fixup(info_dict
.get('requested_formats') is None and downloader
== 'HlsFD',
2998 'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
3000 ffmpeg_fixup(downloader
== 'WebSocketFragmentFD', 'Malformed timestamps detected', FFmpegFixupTimestampPP
)
3001 ffmpeg_fixup(downloader
== 'WebSocketFragmentFD', 'Malformed duration detected', FFmpegFixupDurationPP
)
3005 info_dict
= self
.post_process(dl_filename
, info_dict
, files_to_move
)
3006 except PostProcessingError
as err
:
3007 self
.report_error('Postprocessing: %s' % str(err
))
3010 for ph
in self
._post
_hooks
:
3011 ph(info_dict
['filepath'])
3012 except Exception as err
:
3013 self
.report_error('post hooks: %s' % str(err
))
3015 must_record_download_archive
= True
3017 if must_record_download_archive
or self
.params
.get('force_write_download_archive', False):
3018 self
.record_download_archive(info_dict
)
3019 max_downloads
= self
.params
.get('max_downloads')
3020 if max_downloads
is not None and self
._num
_downloads
>= int(max_downloads
):
3021 raise MaxDownloadsReached()
3023 def __download_wrapper(self
, func
):
3024 @functools.wraps(func
)
3025 def wrapper(*args
, **kwargs
):
3027 res
= func(*args
, **kwargs
)
3028 except UnavailableVideoError
as e
:
3029 self
.report_error(e
)
3030 except MaxDownloadsReached
as e
:
3031 self
.to_screen(f
'[info] {e}')
3033 except DownloadCancelled
as e
:
3034 self
.to_screen(f
'[info] {e}')
3035 if not self
.params
.get('break_per_url'):
3038 if self
.params
.get('dump_single_json', False):
3039 self
.post_extract(res
)
3040 self
.to_stdout(json
.dumps(self
.sanitize_info(res
)))
3043 def download(self
, url_list
):
3044 """Download a given list of URLs."""
3045 url_list
= variadic(url_list
) # Passing a single URL is a common mistake
3046 outtmpl
= self
.outtmpl_dict
['default']
3047 if (len(url_list
) > 1
3049 and '%' not in outtmpl
3050 and self
.params
.get('max_downloads') != 1):
3051 raise SameFileError(outtmpl
)
3053 for url
in url_list
:
3054 self
.__download
_wrapper
(self
.extract_info
)(
3055 url
, force_generic_extractor
=self
.params
.get('force_generic_extractor', False))
3057 return self
._download
_retcode
3059 def download_with_info_file(self
, info_filename
):
3060 with contextlib
.closing(fileinput
.FileInput(
3061 [info_filename
], mode
='r',
3062 openhook
=fileinput
.hook_encoded('utf-8'))) as f
:
3063 # FileInput doesn't have a read method, we can't call json.load
3064 info
= self
.sanitize_info(json
.loads('\n'.join(f
)), self
.params
.get('clean_infojson', True))
3066 self
.__download
_wrapper
(self
.process_ie_result
)(info
, download
=True)
3067 except (DownloadError
, EntryNotInPlaylist
, ReExtractInfo
) as e
:
3068 if not isinstance(e
, EntryNotInPlaylist
):
3069 self
.to_stderr('\r')
3070 webpage_url
= info
.get('webpage_url')
3071 if webpage_url
is not None:
3072 self
.report_warning(f
'The info failed to download: {e}; trying with URL {webpage_url}')
3073 return self
.download([webpage_url
])
3076 return self
._download
_retcode
3079 def sanitize_info(info_dict
, remove_private_keys
=False):
3080 ''' Sanitize the infodict for converting to json '''
3081 if info_dict
is None:
3083 info_dict
.setdefault('epoch', int(time
.time()))
3084 remove_keys
= {'__original_infodict'}
# Always remove this since this may contain a copy of the entire dict
3085 keep_keys
= ['_type'] # Always keep this to facilitate load-info-json
3086 if remove_private_keys
:
3088 'requested_formats', 'requested_subtitles', 'requested_entries', 'entries',
3089 'filepath', 'infojson_filename', 'original_url', 'playlist_autonumber',
3091 empty_values
= (None, {}, [], set(), tuple())
3092 reject
= lambda k
, v
: k
not in keep_keys
and (
3093 k
.startswith('_') or k
in remove_keys
or v
in empty_values
)
3095 reject
= lambda k
, v
: k
in remove_keys
3096 filter_fn
= lambda obj
: (
3097 list(map(filter_fn
, obj
)) if isinstance(obj
, (LazyList
, list, tuple, set))
3098 else obj
if not isinstance(obj
, dict)
3099 else dict((k
, filter_fn(v
)) for k
, v
in obj
.items() if not reject(k
, v
)))
3100 return filter_fn(info_dict
)
3103 def filter_requested_info(info_dict
, actually_filter
=True):
3104 ''' Alias of sanitize_info for backward compatibility '''
3105 return YoutubeDL
.sanitize_info(info_dict
, actually_filter
)
3107 def run_pp(self
, pp
, infodict
):
3108 files_to_delete
= []
3109 if '__files_to_move' not in infodict
:
3110 infodict
['__files_to_move'] = {}
3112 files_to_delete
, infodict
= pp
.run(infodict
)
3113 except PostProcessingError
as e
:
3114 # Must be True and not 'only_download'
3115 if self
.params
.get('ignoreerrors') is True:
3116 self
.report_error(e
)
3120 if not files_to_delete
:
3122 if self
.params
.get('keepvideo', False):
3123 for f
in files_to_delete
:
3124 infodict
['__files_to_move'].setdefault(f
, '')
3126 for old_filename
in set(files_to_delete
):
3127 self
.to_screen('Deleting original file %s (pass -k to keep)' % old_filename
)
3129 os
.remove(encodeFilename(old_filename
))
3130 except (IOError, OSError):
3131 self
.report_warning('Unable to remove downloaded original file')
3132 if old_filename
in infodict
['__files_to_move']:
3133 del infodict
['__files_to_move'][old_filename
]
3137 def post_extract(info_dict
):
3138 def actual_post_extract(info_dict
):
3139 if info_dict
.get('_type') in ('playlist', 'multi_video'):
3140 for video_dict
in info_dict
.get('entries', {}):
3141 actual_post_extract(video_dict
or {})
3144 post_extractor
= info_dict
.get('__post_extractor') or (lambda: {})
3145 extra
= post_extractor().items()
3146 info_dict
.update(extra
)
3147 info_dict
.pop('__post_extractor', None)
3149 original_infodict
= info_dict
.get('__original_infodict') or {}
3150 original_infodict
.update(extra
)
3151 original_infodict
.pop('__post_extractor', None)
3153 actual_post_extract(info_dict
or {})
3155 def pre_process(self
, ie_info
, key
='pre_process', files_to_move
=None):
3156 info
= dict(ie_info
)
3157 info
['__files_to_move'] = files_to_move
or {}
3158 for pp
in self
._pps
[key
]:
3159 info
= self
.run_pp(pp
, info
)
3160 return info
, info
.pop('__files_to_move', None)
3162 def post_process(self
, filename
, ie_info
, files_to_move
=None):
3163 """Run all the postprocessors on the given file."""
3164 info
= dict(ie_info
)
3165 info
['filepath'] = filename
3166 info
['__files_to_move'] = files_to_move
or {}
3168 for pp
in ie_info
.get('__postprocessors', []) + self
._pps
['post_process']:
3169 info
= self
.run_pp(pp
, info
)
3170 info
= self
.run_pp(MoveFilesAfterDownloadPP(self
), info
)
3171 del info
['__files_to_move']
3172 for pp
in self
._pps
['after_move']:
3173 info
= self
.run_pp(pp
, info
)
3176 def _make_archive_id(self
, info_dict
):
3177 video_id
= info_dict
.get('id')
3180 # Future-proof against any change in case
3181 # and backwards compatibility with prior versions
3182 extractor
= info_dict
.get('extractor_key') or info_dict
.get('ie_key') # key in a playlist
3183 if extractor
is None:
3184 url
= str_or_none(info_dict
.get('url'))
3187 # Try to find matching extractor for the URL and take its ie_key
3188 for ie_key
, ie
in self
._ies
.items():
3189 if ie
.suitable(url
):
3194 return '%s %s' % (extractor
.lower(), video_id
)
3196 def in_download_archive(self
, info_dict
):
3197 fn
= self
.params
.get('download_archive')
3201 vid_id
= self
._make
_archive
_id
(info_dict
)
3203 return False # Incomplete video information
3205 return vid_id
in self
.archive
3207 def record_download_archive(self
, info_dict
):
3208 fn
= self
.params
.get('download_archive')
3211 vid_id
= self
._make
_archive
_id
(info_dict
)
3213 with locked_file(fn
, 'a', encoding
='utf-8') as archive_file
:
3214 archive_file
.write(vid_id
+ '\n')
3215 self
.archive
.add(vid_id
)
3218 def format_resolution(format
, default
='unknown'):
3219 if format
.get('vcodec') == 'none' and format
.get('acodec') != 'none':
3221 if format
.get('resolution') is not None:
3222 return format
['resolution']
3223 if format
.get('width') and format
.get('height'):
3224 return '%dx%d' % (format
['width'], format
['height'])
3225 elif format
.get('height'):
3226 return '%sp' % format
['height']
3227 elif format
.get('width'):
3228 return '%dx?' % format
['width']
3231 def _format_note(self
, fdict
):
3233 if fdict
.get('ext') in ['f4f', 'f4m']:
3234 res
+= '(unsupported)'
3235 if fdict
.get('language'):
3238 res
+= '[%s]' % fdict
['language']
3239 if fdict
.get('format_note') is not None:
3242 res
+= fdict
['format_note']
3243 if fdict
.get('tbr') is not None:
3246 res
+= '%4dk' % fdict
['tbr']
3247 if fdict
.get('container') is not None:
3250 res
+= '%s container' % fdict
['container']
3251 if (fdict
.get('vcodec') is not None
3252 and fdict
.get('vcodec') != 'none'):
3255 res
+= fdict
['vcodec']
3256 if fdict
.get('vbr') is not None:
3258 elif fdict
.get('vbr') is not None and fdict
.get('abr') is not None:
3260 if fdict
.get('vbr') is not None:
3261 res
+= '%4dk' % fdict
['vbr']
3262 if fdict
.get('fps') is not None:
3265 res
+= '%sfps' % fdict
['fps']
3266 if fdict
.get('acodec') is not None:
3269 if fdict
['acodec'] == 'none':
3272 res
+= '%-5s' % fdict
['acodec']
3273 elif fdict
.get('abr') is not None:
3277 if fdict
.get('abr') is not None:
3278 res
+= '@%3dk' % fdict
['abr']
3279 if fdict
.get('asr') is not None:
3280 res
+= ' (%5dHz)' % fdict
['asr']
3281 if fdict
.get('filesize') is not None:
3284 res
+= format_bytes(fdict
['filesize'])
3285 elif fdict
.get('filesize_approx') is not None:
3288 res
+= '~' + format_bytes(fdict
['filesize_approx'])
3291 def _list_format_headers(self
, *headers
):
3292 if self
.params
.get('listformats_table', True) is not False:
3293 return [self
._format
_screen
(header
, self
.Styles
.HEADERS
) for header
in headers
]
3296 def list_formats(self
, info_dict
):
3297 formats
= info_dict
.get('formats', [info_dict
])
3298 new_format
= self
.params
.get('listformats_table', True) is not False
3300 delim
= self
._format
_screen
('\u2502', self
.Styles
.DELIM
, '|', test_encoding
=True)
3303 self
._format
_screen
(format_field(f
, 'format_id'), self
.Styles
.ID
),
3304 format_field(f
, 'ext'),
3305 format_field(f
, func
=self
.format_resolution
, ignore
=('audio only', 'images')),
3306 format_field(f
, 'fps', '\t%d'),
3307 format_field(f
, 'dynamic_range', '%s', ignore
=(None, 'SDR')).replace('HDR', ''),
3309 format_field(f
, 'filesize', ' \t%s', func
=format_bytes
) + format_field(f
, 'filesize_approx', '~\t%s', func
=format_bytes
),
3310 format_field(f
, 'tbr', '\t%dk'),
3311 shorten_protocol_name(f
.get('protocol', '').replace('native', 'n')),
3313 format_field(f
, 'vcodec', default
='unknown').replace(
3315 'images' if f
.get('acodec') == 'none'
3316 else self
._format
_screen
('audio only', self
.Styles
.SUPPRESS
)),
3317 format_field(f
, 'vbr', '\t%dk'),
3318 format_field(f
, 'acodec', default
='unknown').replace(
3320 '' if f
.get('vcodec') == 'none'
3321 else self
._format
_screen
('video only', self
.Styles
.SUPPRESS
)),
3322 format_field(f
, 'abr', '\t%dk'),
3323 format_field(f
, 'asr', '\t%dHz'),
3325 self
._format
_screen
('UNSUPPORTED', 'light red') if f
.get('ext') in ('f4f', 'f4m') else None,
3326 format_field(f
, 'language', '[%s]'),
3328 format_field(f
, 'format_note'),
3329 format_field(f
, 'container', ignore
=(None, f
.get('ext'))),
3332 ] for f
in formats
if f
.get('preference') is None or f
['preference'] >= -1000]
3333 header_line
= self
._list
_format
_headers
(
3334 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim
, '\tFILESIZE', '\tTBR', 'PROTO',
3335 delim
, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
3339 format_field(f
, 'format_id'),
3340 format_field(f
, 'ext'),
3341 self
.format_resolution(f
),
3342 self
._format
_note
(f
)]
3344 if f
.get('preference') is None or f
['preference'] >= -1000]
3345 header_line
= ['format code', 'extension', 'resolution', 'note']
3348 '[info] Available formats for %s:' % info_dict
['id'])
3349 self
.to_stdout(render_table(
3351 extra_gap
=(0 if new_format
else 1),
3352 hide_empty
=new_format
,
3353 delim
=new_format
and self
._format
_screen
('\u2500', self
.Styles
.DELIM
, '-', test_encoding
=True)))
3355 def list_thumbnails(self
, info_dict
):
3356 thumbnails
= list(info_dict
.get('thumbnails'))
3358 self
.to_screen('[info] No thumbnails present for %s' % info_dict
['id'])
3362 '[info] Thumbnails for %s:' % info_dict
['id'])
3363 self
.to_stdout(render_table(
3364 self
._list
_format
_headers
('ID', 'Width', 'Height', 'URL'),
3365 [[t
['id'], t
.get('width', 'unknown'), t
.get('height', 'unknown'), t
['url']] for t
in thumbnails
]))
3367 def list_subtitles(self
, video_id
, subtitles
, name
='subtitles'):
3369 self
.to_screen('%s has no %s' % (video_id
, name
))
3372 'Available %s for %s:' % (name
, video_id
))
3374 def _row(lang
, formats
):
3375 exts
, names
= zip(*((f
['ext'], f
.get('name') or 'unknown') for f
in reversed(formats
)))
3376 if len(set(names
)) == 1:
3377 names
= [] if names
[0] == 'unknown' else names
[:1]
3378 return [lang
, ', '.join(names
), ', '.join(exts
)]
3380 self
.to_stdout(render_table(
3381 self
._list
_format
_headers
('Language', 'Name', 'Formats'),
3382 [_row(lang
, formats
) for lang
, formats
in subtitles
.items()],
3385 def urlopen(self
, req
):
3386 """ Start an HTTP download """
3387 if isinstance(req
, compat_basestring
):
3388 req
= sanitized_Request(req
)
3389 return self
._opener
.open(req
, timeout
=self
._socket
_timeout
)
3391 def print_debug_header(self
):
3392 if not self
.params
.get('verbose'):
3395 def get_encoding(stream
):
3396 ret
= getattr(stream
, 'encoding', 'missing (%s)' % type(stream
).__name
__)
3397 if not supports_terminal_sequences(stream
):
3401 encoding_str
= 'Encodings: locale %s, fs %s, out %s, err %s, pref %s' % (
3402 locale
.getpreferredencoding(),
3403 sys
.getfilesystemencoding(),
3404 get_encoding(self
._screen
_file
), get_encoding(self
._err
_file
),
3405 self
.get_encoding())
3407 logger
= self
.params
.get('logger')
3409 write_debug
= lambda msg
: logger
.debug(f
'[debug] {msg}')
3410 write_debug(encoding_str
)
3412 write_string(f
'[debug] {encoding_str}\n', encoding
=None)
3413 write_debug
= lambda msg
: self
._write
_string
(f
'[debug] {msg}\n')
3415 source
= detect_variant()
3416 write_debug(join_nonempty(
3417 'yt-dlp version', __version__
,
3418 f
'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD
else '',
3419 '' if source
== 'unknown' else f
'({source})',
3421 if not _LAZY_LOADER
:
3422 if os
.environ
.get('YTDLP_NO_LAZY_EXTRACTORS'):
3423 write_debug('Lazy loading extractors is forcibly disabled')
3425 write_debug('Lazy loading extractors is disabled')
3426 if plugin_extractors
or plugin_postprocessors
:
3427 write_debug('Plugins: %s' % [
3428 '%s%s' % (klass
.__name
__, '' if klass
.__name
__ == name
else f
' as {name}')
3429 for name
, klass
in itertools
.chain(plugin_extractors
.items(), plugin_postprocessors
.items())])
3430 if self
.params
.get('compat_opts'):
3431 write_debug('Compatibility options: %s' % ', '.join(self
.params
.get('compat_opts')))
3433 if source
== 'source':
3436 ['git', 'rev-parse', '--short', 'HEAD'],
3437 stdout
=subprocess
.PIPE
, stderr
=subprocess
.PIPE
,
3438 cwd
=os
.path
.dirname(os
.path
.abspath(__file__
)))
3439 out
, err
= sp
.communicate_or_kill()
3440 out
= out
.decode().strip()
3441 if re
.match('[0-9a-f]+', out
):
3442 write_debug('Git HEAD: %s' % out
)
3449 def python_implementation():
3450 impl_name
= platform
.python_implementation()
3451 if impl_name
== 'PyPy' and hasattr(sys
, 'pypy_version_info'):
3452 return impl_name
+ ' version %d.%d.%d' % sys
.pypy_version_info
[:3]
3455 write_debug('Python version %s (%s %s) - %s' % (
3456 platform
.python_version(),
3457 python_implementation(),
3458 platform
.architecture()[0],
3461 exe_versions
, ffmpeg_features
= FFmpegPostProcessor
.get_versions_and_features(self
)
3462 ffmpeg_features
= {key for key, val in ffmpeg_features.items() if val}
3464 exe_versions
['ffmpeg'] += ' (%s)' % ','.join(ffmpeg_features
)
3466 exe_versions
['rtmpdump'] = rtmpdump_version()
3467 exe_versions
['phantomjs'] = PhantomJSwrapper
._version
()
3468 exe_str
= ', '.join(
3469 f
'{exe} {v}' for exe
, v
in sorted(exe_versions
.items()) if v
3471 write_debug('exe versions: %s' % exe_str
)
3473 from .downloader
.websocket
import has_websockets
3474 from .postprocessor
.embedthumbnail
import has_mutagen
3475 from .cookies
import SQLITE_AVAILABLE
, KEYRING_AVAILABLE
3477 lib_str
= join_nonempty(
3478 compat_pycrypto_AES
and compat_pycrypto_AES
.__name
__.split('.')[0],
3479 KEYRING_AVAILABLE
and 'keyring',
3480 has_mutagen
and 'mutagen',
3481 SQLITE_AVAILABLE
and 'sqlite',
3482 has_websockets
and 'websockets',
3483 delim
=', ') or 'none'
3484 write_debug('Optional libraries: %s' % lib_str
)
3487 for handler
in self
._opener
.handlers
:
3488 if hasattr(handler
, 'proxies'):
3489 proxy_map
.update(handler
.proxies
)
3490 write_debug(f
'Proxy map: {proxy_map}')
3493 if False and self
.params
.get('call_home'):
3494 ipaddr
= self
.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
3495 write_debug('Public IP address: %s' % ipaddr
)
3496 latest_version
= self
.urlopen(
3497 'https://yt-dl.org/latest/version').read().decode('utf-8')
3498 if version_tuple(latest_version
) > version_tuple(__version__
):
3499 self
.report_warning(
3500 'You are using an outdated version (newest version: %s)! '
3501 'See https://yt-dl.org/update if you need help updating.' %
3504 def _setup_opener(self
):
3505 timeout_val
= self
.params
.get('socket_timeout')
3506 self
._socket
_timeout
= 20 if timeout_val
is None else float(timeout_val
)
3508 opts_cookiesfrombrowser
= self
.params
.get('cookiesfrombrowser')
3509 opts_cookiefile
= self
.params
.get('cookiefile')
3510 opts_proxy
= self
.params
.get('proxy')
3512 self
.cookiejar
= load_cookies(opts_cookiefile
, opts_cookiesfrombrowser
, self
)
3514 cookie_processor
= YoutubeDLCookieProcessor(self
.cookiejar
)
3515 if opts_proxy
is not None:
3516 if opts_proxy
== '':
3519 proxies
= {'http': opts_proxy, 'https': opts_proxy}
3521 proxies
= compat_urllib_request
.getproxies()
3522 # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
3523 if 'http' in proxies
and 'https' not in proxies
:
3524 proxies
['https'] = proxies
['http']
3525 proxy_handler
= PerRequestProxyHandler(proxies
)
3527 debuglevel
= 1 if self
.params
.get('debug_printtraffic') else 0
3528 https_handler
= make_HTTPS_handler(self
.params
, debuglevel
=debuglevel
)
3529 ydlh
= YoutubeDLHandler(self
.params
, debuglevel
=debuglevel
)
3530 redirect_handler
= YoutubeDLRedirectHandler()
3531 data_handler
= compat_urllib_request_DataHandler()
3533 # When passing our own FileHandler instance, build_opener won't add the
3534 # default FileHandler and allows us to disable the file protocol, which
3535 # can be used for malicious purposes (see
3536 # https://github.com/ytdl-org/youtube-dl/issues/8227)
3537 file_handler
= compat_urllib_request
.FileHandler()
3539 def file_open(*args
, **kwargs
):
3540 raise compat_urllib_error
.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
3541 file_handler
.file_open
= file_open
3543 opener
= compat_urllib_request
.build_opener(
3544 proxy_handler
, https_handler
, cookie_processor
, ydlh
, redirect_handler
, data_handler
, file_handler
)
3546 # Delete the default user-agent header, which would otherwise apply in
3547 # cases where our custom HTTP handler doesn't come into play
3548 # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
3549 opener
.addheaders
= []
3550 self
._opener
= opener
3552 def encode(self
, s
):
3553 if isinstance(s
, bytes):
3554 return s
# Already encoded
3557 return s
.encode(self
.get_encoding())
3558 except UnicodeEncodeError as err
:
3559 err
.reason
= err
.reason
+ '. Check your system encoding configuration or use the --encoding option.'
3562 def get_encoding(self
):
3563 encoding
= self
.params
.get('encoding')
3564 if encoding
is None:
3565 encoding
= preferredencoding()
3568 def _write_info_json(self
, label
, ie_result
, infofn
, overwrite
=None):
3569 ''' Write infojson and returns True = written, False = skip, None = error '''
3570 if overwrite
is None:
3571 overwrite
= self
.params
.get('overwrites', True)
3572 if not self
.params
.get('writeinfojson'):
3575 self
.write_debug(f
'Skipping writing {label} infojson')
3577 elif not self
._ensure
_dir
_exists
(infofn
):
3579 elif not overwrite
and os
.path
.exists(infofn
):
3580 self
.to_screen(f
'[info] {label.title()} metadata is already present')
3582 self
.to_screen(f
'[info] Writing {label} metadata as JSON to: {infofn}')
3584 write_json_file(self
.sanitize_info(ie_result
, self
.params
.get('clean_infojson', True)), infofn
)
3585 except (OSError, IOError):
3586 self
.report_error(f
'Cannot write {label} metadata to JSON file {infofn}')
3590 def _write_description(self
, label
, ie_result
, descfn
):
3591 ''' Write description and returns True = written, False = skip, None = error '''
3592 if not self
.params
.get('writedescription'):
3595 self
.write_debug(f
'Skipping writing {label} description')
3597 elif not self
._ensure
_dir
_exists
(descfn
):
3599 elif not self
.params
.get('overwrites', True) and os
.path
.exists(descfn
):
3600 self
.to_screen(f
'[info] {label.title()} description is already present')
3601 elif ie_result
.get('description') is None:
3602 self
.report_warning(f
'There\'s no {label} description to write')
3606 self
.to_screen(f
'[info] Writing {label} description to: {descfn}')
3607 with io
.open(encodeFilename(descfn
), 'w', encoding
='utf-8') as descfile
:
3608 descfile
.write(ie_result
['description'])
3609 except (OSError, IOError):
3610 self
.report_error(f
'Cannot write {label} description file {descfn}')
3614 def _write_subtitles(self
, info_dict
, filename
):
3615 ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
3617 subtitles
= info_dict
.get('requested_subtitles')
3618 if not subtitles
or not (self
.params
.get('writesubtitles') or self
.params
.get('writeautomaticsub')):
3619 # subtitles download errors are already managed as troubles in relevant IE
3620 # that way it will silently go on when used with unsupporting IE
3623 sub_filename_base
= self
.prepare_filename(info_dict
, 'subtitle')
3624 if not sub_filename_base
:
3625 self
.to_screen('[info] Skipping writing video subtitles')
3627 for sub_lang
, sub_info
in subtitles
.items():
3628 sub_format
= sub_info
['ext']
3629 sub_filename
= subtitles_filename(filename
, sub_lang
, sub_format
, info_dict
.get('ext'))
3630 sub_filename_final
= subtitles_filename(sub_filename_base
, sub_lang
, sub_format
, info_dict
.get('ext'))
3631 if not self
.params
.get('overwrites', True) and os
.path
.exists(sub_filename
):
3632 self
.to_screen(f
'[info] Video subtitle {sub_lang}.{sub_format} is already present')
3633 sub_info
['filepath'] = sub_filename
3634 ret
.append((sub_filename
, sub_filename_final
))
3637 self
.to_screen(f
'[info] Writing video subtitles to: {sub_filename}')
3638 if sub_info
.get('data') is not None:
3640 # Use newline='' to prevent conversion of newline characters
3641 # See https://github.com/ytdl-org/youtube-dl/issues/10268
3642 with io
.open(sub_filename
, 'w', encoding
='utf-8', newline
='') as subfile
:
3643 subfile
.write(sub_info
['data'])
3644 sub_info
['filepath'] = sub_filename
3645 ret
.append((sub_filename
, sub_filename_final
))
3647 except (OSError, IOError):
3648 self
.report_error(f
'Cannot write video subtitles file {sub_filename}')
3652 sub_copy
= sub_info
.copy()
3653 sub_copy
.setdefault('http_headers', info_dict
.get('http_headers'))
3654 self
.dl(sub_filename
, sub_copy
, subtitle
=True)
3655 sub_info
['filepath'] = sub_filename
3656 ret
.append((sub_filename
, sub_filename_final
))
3657 except (ExtractorError
, IOError, OSError, ValueError) + network_exceptions
as err
:
3658 self
.report_warning(f
'Unable to download video subtitles for {sub_lang!r}: {err}')
3662 def _write_thumbnails(self
, label
, info_dict
, filename
, thumb_filename_base
=None):
3663 ''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
3664 write_all
= self
.params
.get('write_all_thumbnails', False)
3665 thumbnails
, ret
= [], []
3666 if write_all
or self
.params
.get('writethumbnail', False):
3667 thumbnails
= info_dict
.get('thumbnails') or []
3668 multiple
= write_all
and len(thumbnails
) > 1
3670 if thumb_filename_base
is None:
3671 thumb_filename_base
= filename
3672 if thumbnails
and not thumb_filename_base
:
3673 self
.write_debug(f
'Skipping writing {label} thumbnail')
3676 for t
in thumbnails
[::-1]:
3677 thumb_ext
= (f
'{t["id"]}.' if multiple
else '') + determine_ext(t
['url'], 'jpg')
3678 thumb_display_id
= f
'{label} thumbnail {t["id"]}'
3679 thumb_filename
= replace_extension(filename
, thumb_ext
, info_dict
.get('ext'))
3680 thumb_filename_final
= replace_extension(thumb_filename_base
, thumb_ext
, info_dict
.get('ext'))
3682 if not self
.params
.get('overwrites', True) and os
.path
.exists(thumb_filename
):
3683 ret
.append((thumb_filename
, thumb_filename_final
))
3684 t
['filepath'] = thumb_filename
3685 self
.to_screen('[info] %s is already present' % (
3686 thumb_display_id
if multiple
else f
'{label} thumbnail').capitalize())
3688 self
.to_screen(f
'[info] Downloading {thumb_display_id} ...')
3690 uf
= self
.urlopen(t
['url'])
3691 self
.to_screen(f
'[info] Writing {thumb_display_id} to: {thumb_filename}')
3692 with open(encodeFilename(thumb_filename
), 'wb') as thumbf
:
3693 shutil
.copyfileobj(uf
, thumbf
)
3694 ret
.append((thumb_filename
, thumb_filename_final
))
3695 t
['filepath'] = thumb_filename
3696 except network_exceptions
as err
:
3697 self
.report_warning(f
'Unable to download {thumb_display_id}: {err}')
3698 if ret
and not write_all
: