21 import xml
.etree
.ElementTree
23 from ..compat
import functools
# isort: split
24 from ..compat
import compat_etree_fromstring
, compat_expanduser
, compat_os_name
25 from ..cookies
import LenientSimpleCookie
26 from ..downloader
import FileDownloader
27 from ..downloader
.f4m
import get_base_url
, remove_encrypted_media
64 parse_m3u8_attributes
,
92 """Information Extractor class.
94 Information extractors are the classes that, given a URL, extract
95 information about the video (or videos) the URL refers to. This
96 information includes the real video URL, the video title, author and
97 others. The information is stored in a dictionary which is then
98 passed to the YoutubeDL. The YoutubeDL processes this
99 information possibly downloading the video to the file system, among
100 other possible outcomes.
102 The type field determines the type of the result.
103 By far the most common value (and the default if _type is missing) is
104 "video", which indicates a single video.
106 For a video, the dictionaries must include the following fields:
108 id: Video identifier.
109 title: Video title, unescaped. Set to an empty string if video has
110 no title as opposed to "None" which signifies that the
111 extractor failed to obtain a title
113 Additionally, it must contain either a formats entry or a url one:
115 formats: A list of dictionaries for each format available, ordered
116 from worst to best quality.
119 * url The mandatory URL representing the media:
120 for plain file media - HTTP URL of this file,
122 for HLS - URL of the M3U8 media playlist,
123 for HDS - URL of the F4M manifest,
125 - HTTP URL to plain file media (in case of
127 - URL of the MPD manifest or base URL
128 representing the media if MPD manifest
129 is parsed from a string (in case of
131 for MSS - URL of the ISM manifest.
133 The URL of the manifest file in case of
135 for HLS - URL of the M3U8 master playlist,
136 for HDS - URL of the F4M manifest,
137 for DASH - URL of the MPD manifest,
138 for MSS - URL of the ISM manifest.
139 * manifest_stream_number (For internal use only)
140 The index of the stream in the manifest file
141 * ext Will be calculated from URL if missing
142 * format A human-readable description of the format
143 ("mp4 container with h264/opus").
144 Calculated from the format_id, width, height.
145 and format_note fields if missing.
146 * format_id A short description of the format
147 ("mp4_h264_opus" or "19").
148 Technically optional, but strongly recommended.
149 * format_note Additional info about the format
150 ("3D" or "DASH video")
151 * width Width of the video, if known
152 * height Height of the video, if known
153 * resolution Textual description of width and height
154 * dynamic_range The dynamic range of the video. One of:
155 "SDR" (None), "HDR10", "HDR10+, "HDR12", "HLG, "DV"
156 * tbr Average bitrate of audio and video in KBit/s
157 * abr Average audio bitrate in KBit/s
158 * acodec Name of the audio codec in use
159 * asr Audio sampling rate in Hertz
160 * audio_channels Number of audio channels
161 * vbr Average video bitrate in KBit/s
163 * vcodec Name of the video codec in use
164 * container Name of the container format
165 * filesize The number of bytes, if known in advance
166 * filesize_approx An estimate for the number of bytes
167 * player_url SWF Player URL (used for rtmpdump).
168 * protocol The protocol that will be used for the actual
169 download, lower-case. One of "http", "https" or
170 one of the protocols defined in downloader.PROTOCOL_MAP
172 Base URL for fragments. Each fragment's path
173 value (if present) will be relative to
175 * fragments A list of fragments of a fragmented media.
176 Each fragment entry must contain either an url
177 or a path. If an url is present it should be
178 considered by a client. Otherwise both path and
179 fragment_base_url must be present. Here is
180 the list of all potential fields:
181 * "url" - fragment's URL
182 * "path" - fragment's path relative to
184 * "duration" (optional, int or float)
185 * "filesize" (optional, int)
186 * is_from_start Is a live format that can be downloaded
187 from the start. Boolean
188 * preference Order number of this format. If this field is
189 present and not None, the formats get sorted
190 by this field, regardless of all other values.
191 -1 for default (order by other properties),
192 -2 or smaller for less than default.
193 < -1000 to hide the format (if there is
194 another one which is strictly better)
195 * language Language code, e.g. "de" or "en-US".
196 * language_preference Is this in the language mentioned in
198 10 if it's what the URL is about,
199 -1 for default (don't know),
200 -10 otherwise, other values reserved for now.
201 * quality Order number of the video quality of this
202 format, irrespective of the file format.
203 -1 for default (order by other properties),
204 -2 or smaller for less than default.
205 * source_preference Order number for this video source
206 (quality takes higher priority)
207 -1 for default (order by other properties),
208 -2 or smaller for less than default.
209 * http_headers A dictionary of additional HTTP headers
210 to add to the request.
211 * stretched_ratio If given and not 1, indicates that the
212 video's pixels are not square.
213 width : height ratio as float.
214 * no_resume The server does not support resuming the
215 (HTTP or RTMP) download. Boolean.
216 * has_drm The format has DRM and cannot be downloaded. Boolean
217 * downloader_options A dictionary of downloader options
218 (For internal use only)
219 * http_chunk_size Chunk size for HTTP downloads
220 * ffmpeg_args Extra arguments for ffmpeg downloader
221 RTMP formats can also have the additional fields: page_url,
222 app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn,
223 rtmp_protocol, rtmp_real_time
225 url: Final video URL.
226 ext: Video filename extension.
227 format: The video format, defaults to ext (used for --get-format)
228 player_url: SWF Player URL (used for rtmpdump).
230 The following fields are optional:
232 direct: True if a direct video file was given (must only be set by GenericIE)
233 alt_title: A secondary title of the video.
234 display_id An alternative identifier for the video, not necessarily
235 unique, but available before title. Typically, id is
236 something like "4234987", title "Dancing naked mole rats",
237 and display_id "dancing-naked-mole-rats"
238 thumbnails: A list of dictionaries, with the following entries:
239 * "id" (optional, string) - Thumbnail format ID
241 * "preference" (optional, int) - quality of the image
242 * "width" (optional, int)
243 * "height" (optional, int)
244 * "resolution" (optional, string "{width}x{height}",
246 * "filesize" (optional, int)
247 * "http_headers" (dict) - HTTP headers for the request
248 thumbnail: Full URL to a video thumbnail image.
249 description: Full video description.
250 uploader: Full name of the video uploader.
251 license: License name the video is licensed under.
252 creator: The creator of the video.
253 timestamp: UNIX timestamp of the moment the video was uploaded
254 upload_date: Video upload date in UTC (YYYYMMDD).
255 If not explicitly set, calculated from timestamp
256 release_timestamp: UNIX timestamp of the moment the video was released.
257 If it is not clear whether to use timestamp or this, use the former
258 release_date: The date (YYYYMMDD) when the video was released in UTC.
259 If not explicitly set, calculated from release_timestamp
260 modified_timestamp: UNIX timestamp of the moment the video was last modified.
261 modified_date: The date (YYYYMMDD) when the video was last modified in UTC.
262 If not explicitly set, calculated from modified_timestamp
263 uploader_id: Nickname or id of the video uploader.
264 uploader_url: Full URL to a personal webpage of the video uploader.
265 channel: Full name of the channel the video is uploaded on.
266 Note that channel fields may or may not repeat uploader
267 fields. This depends on a particular extractor.
268 channel_id: Id of the channel.
269 channel_url: Full URL to a channel webpage.
270 channel_follower_count: Number of followers of the channel.
271 location: Physical location where the video was filmed.
272 subtitles: The available subtitles as a dictionary in the format
273 {tag: subformats}. "tag" is usually a language code, and
274 "subformats" is a list sorted from lower to higher
275 preference, each element is a dictionary with the "ext"
277 * "data": The subtitles file contents
278 * "url": A URL pointing to the subtitles file
279 It can optionally also have:
280 * "name": Name or description of the subtitles
281 * "http_headers": A dictionary of additional HTTP headers
282 to add to the request.
283 "ext" will be calculated from URL if missing
284 automatic_captions: Like 'subtitles'; contains automatically generated
285 captions instead of normal subtitles
286 duration: Length of the video in seconds, as an integer or float.
287 view_count: How many users have watched the video on the platform.
288 concurrent_view_count: How many users are currently watching the video on the platform.
289 like_count: Number of positive ratings of the video
290 dislike_count: Number of negative ratings of the video
291 repost_count: Number of reposts of the video
292 average_rating: Average rating give by users, the scale used depends on the webpage
293 comment_count: Number of comments on the video
294 comments: A list of comments, each with one or more of the following
295 properties (all but one of text or html optional):
296 * "author" - human-readable name of the comment author
297 * "author_id" - user ID of the comment author
298 * "author_thumbnail" - The thumbnail of the comment author
300 * "html" - Comment as HTML
301 * "text" - Plain text of the comment
302 * "timestamp" - UNIX timestamp of comment
303 * "parent" - ID of the comment this one is replying to.
304 Set to "root" to indicate that this is a
305 comment to the original video.
306 * "like_count" - Number of positive ratings of the comment
307 * "dislike_count" - Number of negative ratings of the comment
308 * "is_favorited" - Whether the comment is marked as
309 favorite by the video uploader
310 * "author_is_uploader" - Whether the comment is made by
312 age_limit: Age restriction for the video, as an integer (years)
313 webpage_url: The URL to the video webpage, if given to yt-dlp it
314 should allow to get the same result again. (It will be set
315 by YoutubeDL if it's missing)
316 categories: A list of categories that the video falls in, for example
318 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
319 cast: A list of the video cast
320 is_live: True, False, or None (=unknown). Whether this video is a
321 live stream that goes on instead of a fixed-length video.
322 was_live: True, False, or None (=unknown). Whether this video was
323 originally a live stream.
324 live_status: None (=unknown), 'is_live', 'is_upcoming', 'was_live', 'not_live',
325 or 'post_live' (was live, but VOD is not yet processed)
326 If absent, automatically set from is_live, was_live
327 start_time: Time in seconds where the reproduction should start, as
328 specified in the URL.
329 end_time: Time in seconds where the reproduction should end, as
330 specified in the URL.
331 chapters: A list of dictionaries, with the following entries:
332 * "start_time" - The start time of the chapter in seconds
333 * "end_time" - The end time of the chapter in seconds
334 * "title" (optional, string)
335 playable_in_embed: Whether this video is allowed to play in embedded
336 players on other sites. Can be True (=always allowed),
337 False (=never allowed), None (=unknown), or a string
338 specifying the criteria for embedability; e.g. 'whitelist'
339 availability: Under what condition the video is available. One of
340 'private', 'premium_only', 'subscriber_only', 'needs_auth',
341 'unlisted' or 'public'. Use 'InfoExtractor._availability'
343 _old_archive_ids: A list of old archive ids needed for backward compatibility
344 __post_extractor: A function to be called just before the metadata is
345 written to either disk, logger or console. The function
346 must return a dict which will be added to the info_dict.
347 This is usefull for additional information that is
348 time-consuming to extract. Note that the fields thus
349 extracted will not be available to output template and
350 match_filter. So, only "comments" and "comment_count" are
351 currently allowed to be extracted via this method.
353 The following fields should only be used when the video belongs to some logical
356 chapter: Name or title of the chapter the video belongs to.
357 chapter_number: Number of the chapter the video belongs to, as an integer.
358 chapter_id: Id of the chapter the video belongs to, as a unicode string.
360 The following fields should only be used when the video is an episode of some
361 series, programme or podcast:
363 series: Title of the series or programme the video episode belongs to.
364 series_id: Id of the series or programme the video episode belongs to, as a unicode string.
365 season: Title of the season the video episode belongs to.
366 season_number: Number of the season the video episode belongs to, as an integer.
367 season_id: Id of the season the video episode belongs to, as a unicode string.
368 episode: Title of the video episode. Unlike mandatory video title field,
369 this field should denote the exact title of the video episode
370 without any kind of decoration.
371 episode_number: Number of the video episode within a season, as an integer.
372 episode_id: Id of the video episode, as a unicode string.
374 The following fields should only be used when the media is a track or a part of
377 track: Title of the track.
378 track_number: Number of the track within an album or a disc, as an integer.
379 track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
381 artist: Artist(s) of the track.
382 genre: Genre(s) of the track.
383 album: Title of the album the track belongs to.
384 album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
385 album_artist: List of all artists appeared on the album (e.g.
386 "Ash Borer / Fell Voices" or "Various Artists", useful for splits
388 disc_number: Number of the disc or other physical medium the track belongs to,
390 release_year: Year (YYYY) when the album was released.
391 composer: Composer of the piece
393 The following fields should only be set for clips that should be cut from the original video:
395 section_start: Start time of the section in seconds
396 section_end: End time of the section in seconds
398 The following fields should only be set for storyboards:
399 rows: Number of rows in each storyboard fragment, as an integer
400 columns: Number of columns in each storyboard fragment, as an integer
402 Unless mentioned otherwise, the fields should be Unicode strings.
404 Unless mentioned otherwise, None is equivalent to absence of information.
407 _type "playlist" indicates multiple videos.
408 There must be a key "entries", which is a list, an iterable, or a PagedList
409 object, each element of which is a valid dictionary by this specification.
411 Additionally, playlists can have "id", "title", and any other relevant
412 attributes with the same semantics as videos (see above).
414 It can also have the following optional fields:
416 playlist_count: The total number of videos in a playlist. If not given,
417 YoutubeDL tries to calculate it from "entries"
420 _type "multi_video" indicates that there are multiple videos that
421 form a single show, for examples multiple acts of an opera or TV episode.
422 It must have an entries key like a playlist and contain all the keys
423 required for a video at the same time.
426 _type "url" indicates that the video must be extracted from another
427 location, possibly by a different extractor. Its only required key is:
428 "url" - the next URL to extract.
429 The key "ie_key" can be set to the class name (minus the trailing "IE",
430 e.g. "Youtube") if the extractor class is known in advance.
431 Additionally, the dictionary may have any properties of the resolved entity
432 known in advance, for example "title" if the title of the referred video is
436 _type "url_transparent" entities have the same specification as "url", but
437 indicate that the given additional information is more precise than the one
438 associated with the resolved URL.
439 This is useful when a site employs a video service that hosts the video and
440 its technical metadata, but that video service does not embed a useful
441 title, description etc.
444 Subclasses of this should also be added to the list of extractors and
445 should define a _VALID_URL regexp and, re-define the _real_extract() and
446 (optionally) _real_initialize() methods.
448 Subclasses may also override suitable() if necessary, but ensure the function
449 signature is preserved and that this function imports everything it needs
450 (except other extractors), so that lazy_extractors works correctly.
452 Subclasses can define a list of _EMBED_REGEX, which will be searched for in
453 the HTML of Generic webpages. It may also override _extract_embed_urls
454 or _extract_from_webpage as necessary. While these are normally classmethods,
455 _extract_from_webpage is allowed to be an instance method.
457 _extract_from_webpage may raise self.StopExtraction() to stop further
458 processing of the webpage and obtain exclusive rights to it. This is useful
459 when the extractor cannot reliably be matched using just the URL,
460 e.g. invidious/peertube instances
462 Embed-only extractors can be defined by setting _VALID_URL = False.
464 To support username + password (or netrc) login, the extractor must define a
465 _NETRC_MACHINE and re-define _perform_login(username, password) and
466 (optionally) _initialize_pre_login() methods. The _perform_login method will
467 be called between _initialize_pre_login and _real_initialize if credentials
468 are passed by the user. In cases where it is necessary to have the login
469 process as part of the extraction rather than initialization, _perform_login
470 can be left undefined.
472 _GEO_BYPASS attribute may be set to False in order to disable
473 geo restriction bypass mechanisms for a particular extractor.
474 Though it won't disable explicit geo restriction bypass based on
475 country code provided with geo_bypass_country.
477 _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
478 countries for this extractor. One of these countries will be used by
479 geo restriction bypass mechanism right away in order to bypass
480 geo restriction, of course, if the mechanism is not disabled.
482 _GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
483 IP blocks in CIDR notation for this extractor. One of these IP blocks
484 will be used by geo restriction bypass mechanism similarly
487 The _ENABLED attribute should be set to False for IEs that
488 are disabled by default and must be explicitly enabled.
490 The _WORKING attribute should be set to False for broken IEs
491 in order to warn the users and skip the tests.
496 _x_forwarded_for_ip
= None
498 _GEO_COUNTRIES
= None
499 _GEO_IP_BLOCKS
= None
502 _NETRC_MACHINE
= None
508 def _login_hint(self
, method
=NO_DEFAULT
, netrc
=None):
509 password_hint
= f
'--username and --password, or --netrc ({netrc or self._NETRC_MACHINE}) to provide account credentials'
512 'any': f
'Use --cookies, --cookies-from-browser, {password_hint}',
513 'password': f
'Use {password_hint}',
515 'Use --cookies-from-browser or --cookies for the authentication. '
516 'See https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp for how to manually pass cookies'),
517 }[method
if method
is not NO_DEFAULT
else 'any' if self
.supports_login() else 'cookies']
519 def __init__(self
, downloader
=None):
520 """Constructor. Receives an optional downloader (a YoutubeDL instance).
521 If a downloader is not passed during initialization,
522 it must be set using "set_downloader()" before "extract()" is called"""
524 self
._x
_forwarded
_for
_ip
= None
525 self
._printed
_messages
= set()
526 self
.set_downloader(downloader
)
529 def _match_valid_url(cls
, url
):
530 if cls
._VALID
_URL
is False:
532 # This does not use has/getattr intentionally - we want to know whether
533 # we have cached the regexp for *this* class, whereas getattr would also
534 # match the superclass
535 if '_VALID_URL_RE' not in cls
.__dict
__:
536 cls
._VALID
_URL
_RE
= re
.compile(cls
._VALID
_URL
)
537 return cls
._VALID
_URL
_RE
.match(url
)
540 def suitable(cls
, url
):
541 """Receives a URL and returns True if suitable for this IE."""
542 # This function must import everything it needs (except other extractors),
543 # so that lazy_extractors works correctly
544 return cls
._match
_valid
_url
(url
) is not None
547 def _match_id(cls
, url
):
548 return cls
._match
_valid
_url
(url
).group('id')
551 def get_temp_id(cls
, url
):
553 return cls
._match
_id
(url
)
554 except (IndexError, AttributeError):
559 """Getter method for _WORKING."""
563 def supports_login(cls
):
564 return bool(cls
._NETRC
_MACHINE
)
566 def initialize(self
):
567 """Initializes an instance (authentication, etc)."""
568 self
._printed
_messages
= set()
569 self
._initialize
_geo
_bypass
({
570 'countries': self
._GEO
_COUNTRIES
,
571 'ip_blocks': self
._GEO
_IP
_BLOCKS
,
574 self
._initialize
_pre
_login
()
575 if self
.supports_login():
576 username
, password
= self
._get
_login
_info
()
578 self
._perform
_login
(username
, password
)
579 elif self
.get_param('username') and False not in (self
.IE_DESC
, self
._NETRC
_MACHINE
):
580 self
.report_warning(f
'Login with password is not supported for this website. {self._login_hint("cookies")}')
581 self
._real
_initialize
()
584 def _initialize_geo_bypass(self
, geo_bypass_context
):
586 Initialize geo restriction bypass mechanism.
588 This method is used to initialize geo bypass mechanism based on faking
589 X-Forwarded-For HTTP header. A random country from provided country list
590 is selected and a random IP belonging to this country is generated. This
591 IP will be passed as X-Forwarded-For HTTP header in all subsequent
594 This method will be used for initial geo bypass mechanism initialization
595 during the instance initialization with _GEO_COUNTRIES and
598 You may also manually call it from extractor's code if geo bypass
599 information is not available beforehand (e.g. obtained during
600 extraction) or due to some other reason. In this case you should pass
601 this information in geo bypass context passed as first argument. It may
602 contain following fields:
604 countries: List of geo unrestricted countries (similar
606 ip_blocks: List of geo unrestricted IP blocks in CIDR notation
607 (similar to _GEO_IP_BLOCKS)
610 if not self
._x
_forwarded
_for
_ip
:
612 # Geo bypass mechanism is explicitly disabled by user
613 if not self
.get_param('geo_bypass', True):
616 if not geo_bypass_context
:
617 geo_bypass_context
= {}
619 # Backward compatibility: previously _initialize_geo_bypass
620 # expected a list of countries, some 3rd party code may still use
622 if isinstance(geo_bypass_context
, (list, tuple)):
623 geo_bypass_context
= {
624 'countries': geo_bypass_context
,
627 # The whole point of geo bypass mechanism is to fake IP
628 # as X-Forwarded-For HTTP header based on some IP block or
631 # Path 1: bypassing based on IP block in CIDR notation
633 # Explicit IP block specified by user, use it right away
634 # regardless of whether extractor is geo bypassable or not
635 ip_block
= self
.get_param('geo_bypass_ip_block', None)
637 # Otherwise use random IP block from geo bypass context but only
638 # if extractor is known as geo bypassable
640 ip_blocks
= geo_bypass_context
.get('ip_blocks')
641 if self
._GEO
_BYPASS
and ip_blocks
:
642 ip_block
= random
.choice(ip_blocks
)
645 self
._x
_forwarded
_for
_ip
= GeoUtils
.random_ipv4(ip_block
)
646 self
.write_debug(f
'Using fake IP {self._x_forwarded_for_ip} as X-Forwarded-For')
649 # Path 2: bypassing based on country code
651 # Explicit country code specified by user, use it right away
652 # regardless of whether extractor is geo bypassable or not
653 country
= self
.get_param('geo_bypass_country', None)
655 # Otherwise use random country code from geo bypass context but
656 # only if extractor is known as geo bypassable
658 countries
= geo_bypass_context
.get('countries')
659 if self
._GEO
_BYPASS
and countries
:
660 country
= random
.choice(countries
)
663 self
._x
_forwarded
_for
_ip
= GeoUtils
.random_ipv4(country
)
664 self
._downloader
.write_debug(
665 f
'Using fake IP {self._x_forwarded_for_ip} ({country.upper()}) as X-Forwarded-For')
667 def extract(self
, url
):
668 """Extracts URL information and returns it in list of dicts."""
673 self
.write_debug('Extracting URL: %s' % url
)
674 ie_result
= self
._real
_extract
(url
)
675 if ie_result
is None:
677 if self
._x
_forwarded
_for
_ip
:
678 ie_result
['__x_forwarded_for_ip'] = self
._x
_forwarded
_for
_ip
679 subtitles
= ie_result
.get('subtitles') or {}
680 if 'no-live-chat' in self
.get_param('compat_opts'):
681 for lang
in ('live_chat', 'comments', 'danmaku'):
682 subtitles
.pop(lang
, None)
684 except GeoRestrictedError
as e
:
685 if self
.__maybe
_fake
_ip
_and
_retry
(e
.countries
):
688 except UnsupportedError
:
690 except ExtractorError
as e
:
692 'video_id': e
.video_id
or self
.get_temp_id(url
),
694 'tb': e
.traceback
or sys
.exc_info()[2],
695 'expected': e
.expected
,
698 if hasattr(e
, 'countries'):
699 kwargs
['countries'] = e
.countries
700 raise type(e
)(e
.orig_msg
, **kwargs
)
701 except http
.client
.IncompleteRead
as e
:
702 raise ExtractorError('A network error has occurred.', cause
=e
, expected
=True, video_id
=self
.get_temp_id(url
))
703 except (KeyError, StopIteration) as e
:
704 raise ExtractorError('An extractor error has occurred.', cause
=e
, video_id
=self
.get_temp_id(url
))
706 def __maybe_fake_ip_and_retry(self
, countries
):
707 if (not self
.get_param('geo_bypass_country', None)
709 and self
.get_param('geo_bypass', True)
710 and not self
._x
_forwarded
_for
_ip
712 country_code
= random
.choice(countries
)
713 self
._x
_forwarded
_for
_ip
= GeoUtils
.random_ipv4(country_code
)
714 if self
._x
_forwarded
_for
_ip
:
716 'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
717 % (self
._x
_forwarded
_for
_ip
, country_code
.upper()))
721 def set_downloader(self
, downloader
):
722 """Sets a YoutubeDL instance as the downloader for this IE."""
723 self
._downloader
= downloader
727 return self
._downloader
.cache
731 return self
._downloader
.cookiejar
733 def _initialize_pre_login(self
):
734 """ Initialization before login. Redefine in subclasses."""
737 def _perform_login(self
, username
, password
):
738 """ Login with username and password. Redefine in subclasses."""
741 def _real_initialize(self
):
742 """Real initialization process. Redefine in subclasses."""
745 def _real_extract(self
, url
):
746 """Real extraction process. Redefine in subclasses."""
747 raise NotImplementedError('This method must be implemented by subclasses')
751 """A string for getting the InfoExtractor with get_info_extractor"""
752 return cls
.__name
__[:-2]
756 return cls
.__name
__[:-2]
759 def __can_accept_status_code(err
, expected_status
):
760 assert isinstance(err
, urllib
.error
.HTTPError
)
761 if expected_status
is None:
763 elif callable(expected_status
):
764 return expected_status(err
.code
) is True
766 return err
.code
in variadic(expected_status
)
768 def _create_request(self
, url_or_request
, data
=None, headers
=None, query
=None):
769 if isinstance(url_or_request
, urllib
.request
.Request
):
770 return update_Request(url_or_request
, data
=data
, headers
=headers
, query
=query
)
772 url_or_request
= update_url_query(url_or_request
, query
)
773 return sanitized_Request(url_or_request
, data
, headers
or {})
775 def _request_webpage(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True, data
=None, headers
=None, query
=None, expected_status
=None):
777 Return the response handle.
779 See _download_webpage docstring for arguments specification.
781 if not self
._downloader
._first
_webpage
_request
:
782 sleep_interval
= self
.get_param('sleep_interval_requests') or 0
783 if sleep_interval
> 0:
784 self
.to_screen('Sleeping %s seconds ...' % sleep_interval
)
785 time
.sleep(sleep_interval
)
787 self
._downloader
._first
_webpage
_request
= False
790 self
.report_download_webpage(video_id
)
791 elif note
is not False:
793 self
.to_screen(str(note
))
795 self
.to_screen(f
'{video_id}: {note}')
797 # Some sites check X-Forwarded-For HTTP header in order to figure out
798 # the origin of the client behind proxy. This allows bypassing geo
799 # restriction by faking this header's value to IP that belongs to some
800 # geo unrestricted country. We will do so once we encounter any
801 # geo restriction error.
802 if self
._x
_forwarded
_for
_ip
:
803 headers
= (headers
or {}).copy()
804 headers
.setdefault('X-Forwarded-For', self
._x
_forwarded
_for
_ip
)
807 return self
._downloader
.urlopen(self
._create
_request
(url_or_request
, data
, headers
, query
))
808 except network_exceptions
as err
:
809 if isinstance(err
, urllib
.error
.HTTPError
):
810 if self
.__can
_accept
_status
_code
(err
, expected_status
):
811 # Retain reference to error to prevent file object from
812 # being closed before it can be read. Works around the
813 # effects of <https://bugs.python.org/issue15002>
814 # introduced in Python 3.4.1.
821 errnote
= 'Unable to download webpage'
823 errmsg
= f
'{errnote}: {error_to_compat_str(err)}'
825 raise ExtractorError(errmsg
, cause
=err
)
827 self
.report_warning(errmsg
)
830 def _download_webpage_handle(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True,
831 encoding
=None, data
=None, headers
={}, query={}
, expected_status
=None):
833 Return a tuple (page content as string, URL handle).
836 url_or_request -- plain text URL as a string or
837 a urllib.request.Request object
838 video_id -- Video/playlist/item identifier (string)
841 note -- note printed before downloading (string)
842 errnote -- note printed in case of an error (string)
843 fatal -- flag denoting whether error should be considered fatal,
844 i.e. whether it should cause ExtractionError to be raised,
845 otherwise a warning will be reported and extraction continued
846 encoding -- encoding for a page content decoding, guessed automatically
847 when not explicitly specified
848 data -- POST data (bytes)
849 headers -- HTTP headers (dict)
850 query -- URL query (dict)
851 expected_status -- allows to accept failed HTTP requests (non 2xx
852 status code) by explicitly specifying a set of accepted status
853 codes. Can be any of the following entities:
854 - an integer type specifying an exact failed status code to
856 - a list or a tuple of integer types specifying a list of
857 failed status codes to accept
858 - a callable accepting an actual failed status code and
859 returning True if it should be accepted
860 Note that this argument does not affect success status codes (2xx)
861 which are always accepted.
864 # Strip hashes from the URL (#1038)
865 if isinstance(url_or_request
, str):
866 url_or_request
= url_or_request
.partition('#')[0]
868 urlh
= self
._request
_webpage
(url_or_request
, video_id
, note
, errnote
, fatal
, data
=data
, headers
=headers
, query
=query
, expected_status
=expected_status
)
872 content
= self
._webpage
_read
_content
(urlh
, url_or_request
, video_id
, note
, errnote
, fatal
, encoding
=encoding
)
873 return (content
, urlh
)
876 def _guess_encoding_from_content(content_type
, webpage_bytes
):
877 m
= re
.match(r
'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type
)
879 encoding
= m
.group(1)
881 m
= re
.search(br
'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
882 webpage_bytes[:1024])
884 encoding = m.group(1).decode('ascii')
885 elif webpage_bytes.startswith(b'\xff\xfe'):
892 def __check_blocked(self, content):
893 first_block = content[:512]
894 if ('<title>Access to this site is blocked</title>' in content
895 and 'Websense' in first_block):
896 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
897 blocked_iframe = self._html_search_regex(
898 r'<iframe src="([^
"]+)"', content,
899 'Websense information URL
', default=None)
901 msg += ' Visit
%s for more details
' % blocked_iframe
902 raise ExtractorError(msg, expected=True)
903 if '<title
>The URL you requested has been blocked
</title
>' in first_block:
905 'Access to this webpage has been blocked by Indian censorship
. '
906 'Use a VPN
or proxy
server (with --proxy
) to route around it
.')
907 block_msg = self._html_search_regex(
908 r'</h1
><p
>(.*?
)</p
>',
909 content, 'block message
', default=None)
911 msg += ' (Message
: "%s")' % block_msg.replace('\n', ' ')
912 raise ExtractorError(msg, expected=True)
913 if ('<title
>TTK
:: Доступ к ресурсу ограничен
</title
>' in content
914 and 'blocklist
.rkn
.gov
.ru
' in content):
915 raise ExtractorError(
916 'Access to this webpage has been blocked by decision of the Russian government
. '
917 'Visit http
://blocklist
.rkn
.gov
.ru
/ for a block reason
.',
920 def _request_dump_filename(self, url, video_id):
921 basen = f'{video_id}_{url}
'
922 trim_length = self.get_param('trim_file_name
') or 240
923 if len(basen) > trim_length:
924 h = '___
' + hashlib.md5(basen.encode('utf
-8')).hexdigest()
925 basen = basen[:trim_length - len(h)] + h
926 filename = sanitize_filename(f'{basen}
.dump
', restricted=True)
927 # Working around MAX_PATH limitation on Windows (see
928 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
929 if compat_os_name == 'nt
':
930 absfilepath = os.path.abspath(filename)
931 if len(absfilepath) > 259:
932 filename = fR'\\?\{absfilepath}
'
935 def __decode_webpage(self, webpage_bytes, encoding, headers):
937 encoding = self._guess_encoding_from_content(headers.get('Content
-Type
', ''), webpage_bytes)
939 return webpage_bytes.decode(encoding, 'replace
')
941 return webpage_bytes.decode('utf
-8', 'replace
')
943 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
944 webpage_bytes = urlh.read()
945 if prefix is not None:
946 webpage_bytes = prefix + webpage_bytes
947 if self.get_param('dump_intermediate_pages
', False):
948 self.to_screen('Dumping request to
' + urlh.geturl())
949 dump = base64.b64encode(webpage_bytes).decode('ascii
')
950 self._downloader.to_screen(dump)
951 if self.get_param('write_pages
'):
952 filename = self._request_dump_filename(urlh.geturl(), video_id)
953 self.to_screen(f'Saving request to {filename}
')
954 with open(filename, 'wb
') as outf:
955 outf.write(webpage_bytes)
957 content = self.__decode_webpage(webpage_bytes, encoding, urlh.headers)
958 self.__check_blocked(content)
962 def __print_error(self, errnote, fatal, video_id, err):
964 raise ExtractorError(f'{video_id}
: {errnote}
', cause=err)
966 self.report_warning(f'{video_id}
: {errnote}
: {err}
')
968 def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True, errnote=None):
970 xml_string = transform_source(xml_string)
972 return compat_etree_fromstring(xml_string.encode('utf
-8'))
973 except xml.etree.ElementTree.ParseError as ve:
974 self.__print_error('Failed to parse XML
' if errnote is None else errnote, fatal, video_id, ve)
976 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True, errnote=None, **parser_kwargs):
979 json_string, cls=LenientJSONDecoder, strict=False, transform_source=transform_source, **parser_kwargs)
980 except ValueError as ve:
981 self.__print_error('Failed to parse JSON
' if errnote is None else errnote, fatal, video_id, ve)
983 def _parse_socket_response_as_json(self, data, *args, **kwargs):
984 return self._parse_json(data[data.find('{'):data.rfind('}
') + 1], *args, **kwargs)
986 def __create_download_methods(name, parser, note, errnote, return_value):
988 def parse(ie, content, *args, errnote=errnote, **kwargs):
992 kwargs['errnote
'] = errnote
993 # parser is fetched by name so subclasses can override it
994 return getattr(ie, parser)(content, *args, **kwargs)
996 def download_handle(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
997 fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
998 res = self._download_webpage_handle(
999 url_or_request, video_id, note=note, errnote=errnote, fatal=fatal, encoding=encoding,
1000 data=data, headers=headers, query=query, expected_status=expected_status)
1004 return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote), urlh
1006 def download_content(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
1007 fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
1008 if self.get_param('load_pages
'):
1009 url_or_request = self._create_request(url_or_request, data, headers, query)
1010 filename = self._request_dump_filename(url_or_request.full_url, video_id)
1011 self.to_screen(f'Loading request
from {filename}
')
1013 with open(filename, 'rb
') as dumpf:
1014 webpage_bytes = dumpf.read()
1015 except OSError as e:
1016 self.report_warning(f'Unable to load request
from disk
: {e}
')
1018 content = self.__decode_webpage(webpage_bytes, encoding, url_or_request.headers)
1019 return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote)
1023 'transform_source
': transform_source,
1025 'encoding
': encoding,
1029 'expected_status
': expected_status,
1032 kwargs.pop('transform_source
')
1033 # The method is fetched by name so subclasses can override _download_..._handle
1034 res = getattr(self, download_handle.__name__)(url_or_request, video_id, **kwargs)
1035 return res if res is False else res[0]
1037 def impersonate(func, name, return_value):
1038 func.__name__, func.__qualname__ = name, f'InfoExtractor
.{name}
'
1040 @param transform_source Apply this transformation before parsing
1041 @returns {return_value}
1043 See _download_webpage_handle docstring for other arguments specification
1046 impersonate(download_handle, f'_download_{name}_handle
', f'({return_value}
, URL handle
)')
1047 impersonate(download_content, f'_download_{name}
', f'{return_value}
')
1048 return download_handle, download_content
1050 _download_xml_handle, _download_xml = __create_download_methods(
1051 'xml
', '_parse_xml
', 'Downloading XML
', 'Unable to download XML
', 'xml
as an xml
.etree
.ElementTree
.Element
')
1052 _download_json_handle, _download_json = __create_download_methods(
1053 'json
', '_parse_json
', 'Downloading JSON metadata
', 'Unable to download JSON metadata
', 'JSON
object as a
dict')
1054 _download_socket_json_handle, _download_socket_json = __create_download_methods(
1055 'socket_json
', '_parse_socket_response_as_json
', 'Polling socket
', 'Unable to poll socket
', 'JSON
object as a
dict')
1056 __download_webpage = __create_download_methods('webpage
', None, None, None, 'data of the page
as a string
')[1]
1058 def _download_webpage(
1059 self, url_or_request, video_id, note=None, errnote=None,
1060 fatal=True, tries=1, timeout=NO_DEFAULT, *args, **kwargs):
1062 Return the data of the page as a string.
1065 tries -- number of tries
1066 timeout -- sleep interval between tries
1068 See _download_webpage_handle docstring for other arguments specification.
1071 R''' # NB: These are unused; should they be deprecated?
1073 self._downloader.deprecation_warning('tries argument
is deprecated
in InfoExtractor
._download
_webpage
')
1074 if timeout is NO_DEFAULT:
1077 self._downloader.deprecation_warning('timeout argument
is deprecated
in InfoExtractor
._download
_webpage
')
1083 return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
1084 except http.client.IncompleteRead as e:
1086 if try_count >= tries:
1088 self._sleep(timeout, video_id)
1090 def report_warning(self, msg, video_id=None, *args, only_once=False, **kwargs):
1091 idstr = format_field(video_id, None, '%s: ')
1092 msg = f'[{self.IE_NAME}
] {idstr}{msg}
'
1094 if f'WARNING
: {msg}
' in self._printed_messages:
1096 self._printed_messages.add(f'WARNING
: {msg}
')
1097 self._downloader.report_warning(msg, *args, **kwargs)
1099 def to_screen(self, msg, *args, **kwargs):
1100 """Print msg to screen, prefixing it with '[ie_name
]'"""
1101 self._downloader.to_screen(f'[{self.IE_NAME}
] {msg}
', *args, **kwargs)
1103 def write_debug(self, msg, *args, **kwargs):
1104 self._downloader.write_debug(f'[{self.IE_NAME}
] {msg}
', *args, **kwargs)
1106 def get_param(self, name, default=None, *args, **kwargs):
1107 if self._downloader:
1108 return self._downloader.params.get(name, default, *args, **kwargs)
1111 def report_drm(self, video_id, partial=NO_DEFAULT):
1112 if partial is not NO_DEFAULT:
1113 self._downloader.deprecation_warning('InfoExtractor
.report_drm no longer accepts the argument partial
')
1114 self.raise_no_formats('This video
is DRM protected
', expected=True, video_id=video_id)
1116 def report_extraction(self, id_or_name):
1117 """Report information extraction."""
1118 self.to_screen('%s: Extracting information
' % id_or_name)
1120 def report_download_webpage(self, video_id):
1121 """Report webpage download."""
1122 self.to_screen('%s: Downloading webpage
' % video_id)
1124 def report_age_confirmation(self):
1125 """Report attempt to confirm age."""
1126 self.to_screen('Confirming age
')
1128 def report_login(self):
1129 """Report attempt to log in."""
1130 self.to_screen('Logging
in')
1132 def raise_login_required(
1133 self, msg='This video
is only available
for registered users
',
1134 metadata_available=False, method=NO_DEFAULT):
1135 if metadata_available and (
1136 self.get_param('ignore_no_formats_error
') or self.get_param('wait_for_video
')):
1137 self.report_warning(msg)
1139 msg += format_field(self._login_hint(method), None, '. %s')
1140 raise ExtractorError(msg, expected=True)
1142 def raise_geo_restricted(
1143 self, msg='This video
is not available
from your location due to geo restriction
',
1144 countries=None, metadata_available=False):
1145 if metadata_available and (
1146 self.get_param('ignore_no_formats_error
') or self.get_param('wait_for_video
')):
1147 self.report_warning(msg)
1149 raise GeoRestrictedError(msg, countries=countries)
1151 def raise_no_formats(self, msg, expected=False, video_id=None):
1153 self.get_param('ignore_no_formats_error
') or self.get_param('wait_for_video
')):
1154 self.report_warning(msg, video_id)
1155 elif isinstance(msg, ExtractorError):
1158 raise ExtractorError(msg, expected=expected, video_id=video_id)
1160 # Methods for following #608
1162 def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs):
1163 """Returns a URL that points to a page that should be processed"""
1165 kwargs['ie_key
'] = ie if isinstance(ie, str) else ie.ie_key()
1166 if video_id is not None:
1167 kwargs['id'] = video_id
1168 if video_title is not None:
1169 kwargs['title
'] = video_title
1172 '_type
': 'url_transparent
' if url_transparent else 'url
',
1177 def playlist_from_matches(cls, matches, playlist_id=None, playlist_title=None,
1178 getter=IDENTITY, ie=None, video_kwargs=None, **kwargs):
1179 return cls.playlist_result(
1180 (cls.url_result(m, ie, **(video_kwargs or {})) for m in orderedSet(map(getter, matches), lazy=True)),
1181 playlist_id, playlist_title, **kwargs)
1184 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, *, multi_video=False, **kwargs):
1185 """Returns a playlist"""
1187 kwargs['id'] = playlist_id
1189 kwargs['title
'] = playlist_title
1190 if playlist_description is not None:
1191 kwargs['description
'] = playlist_description
1194 '_type
': 'multi_video
' if multi_video else 'playlist
',
1198 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
1200 Perform a regex search on the given string, using a single or a list of
1201 patterns returning the first matching group.
1202 In case of failure return a default value or raise a WARNING or a
1203 RegexNotFoundError, depending on fatal, specifying the field name.
1207 elif isinstance(pattern, (str, re.Pattern)):
1208 mobj = re.search(pattern, string, flags)
1211 mobj = re.search(p, string, flags)
1215 _name = self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
1219 # return the first matching group
1220 return next(g for g in mobj.groups() if g is not None)
1221 elif isinstance(group, (list, tuple)):
1222 return tuple(mobj.group(g) for g in group)
1224 return mobj.group(group)
1225 elif default is not NO_DEFAULT:
1228 raise RegexNotFoundError('Unable to extract
%s' % _name)
1230 self.report_warning('unable to extract
%s' % _name + bug_reports_message())
1233 def _search_json(self, start_pattern, string, name, video_id, *, end_pattern='',
1234 contains_pattern=r'{(?s:.+)}
', fatal=True, default=NO_DEFAULT, **kwargs):
1235 """Searches string for the JSON object specified by start_pattern"""
1236 # NB: end_pattern is only used to reduce the size of the initial match
1237 if default is NO_DEFAULT:
1238 default, has_default = {}, False
1240 fatal, has_default = False, True
1242 json_string = self._search_regex(
1243 rf'(?
:{start_pattern}
)\s
*(?P
<json
>{contains_pattern}
)\s
*(?
:{end_pattern}
)',
1244 string, name, group='json
', fatal=fatal, default=None if has_default else NO_DEFAULT)
1248 _name = self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
1250 return self._parse_json(json_string, video_id, ignore_extra=True, **kwargs)
1251 except ExtractorError as e:
1253 raise ExtractorError(
1254 f'Unable to extract {_name}
- Failed to parse JSON
', cause=e.cause, video_id=video_id)
1255 elif not has_default:
1256 self.report_warning(
1257 f'Unable to extract {_name}
- Failed to parse JSON
: {e}
', video_id=video_id)
1260 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
1262 Like _search_regex, but strips HTML tags and unescapes entities.
1264 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
1266 return clean_html(res).strip()
1270 def _get_netrc_login_info(self, netrc_machine=None):
1273 netrc_machine = netrc_machine or self._NETRC_MACHINE
1275 if self.get_param('usenetrc
', False):
1277 netrc_file = compat_expanduser(self.get_param('netrc_location
') or '~
')
1278 if os.path.isdir(netrc_file):
1279 netrc_file = os.path.join(netrc_file, '.netrc
')
1280 info = netrc.netrc(file=netrc_file).authenticators(netrc_machine)
1281 if info is not None:
1285 raise netrc.NetrcParseError(
1286 'No authenticators
for %s' % netrc_machine)
1287 except (OSError, netrc.NetrcParseError) as err:
1288 self.report_warning(
1289 'parsing
.netrc
: %s' % error_to_compat_str(err))
1291 return username, password
1293 def _get_login_info(self, username_option='username
', password_option='password
', netrc_machine=None):
1295 Get the login info as (username, password)
1296 First look for the manually specified credentials using username_option
1297 and password_option as keys in params dictionary. If no such credentials
1298 available look in the netrc file using the netrc_machine or _NETRC_MACHINE
1300 If there's no info available
, return (None, None)
1303 # Attempt to use provided username and password or .netrc data
1304 username = self.get_param(username_option)
1305 if username is not None:
1306 password = self.get_param(password_option)
1308 username, password = self._get_netrc_login_info(netrc_machine)
1310 return username, password
1312 def _get_tfa_info(self, note='two-factor verification code'):
1314 Get the two
-factor authentication info
1315 TODO
- asking the user will be required
for sms
/phone verify
1316 currently just uses the command line option
1317 If there
's no info available, return None
1320 tfa = self.get_param('twofactor
')
1324 return getpass.getpass('Type
%s and press
[Return
]: ' % note)
1326 # Helper functions for extracting OpenGraph info
1328 def _og_regexes(prop):
1329 content_re = r'content
=(?
:"([^"]+?
)"|\'([^\']+?)\'|\s*([^\s"\'=<>`
]+?
))'
1330 property_re = (r'(?
:name|
property)=(?
:\'og
%(sep)s%(prop)s\'|
"og%(sep)s%(prop)s"|\s
*og
%(sep)s%(prop)s\b)'
1331 % {'prop': re.escape(prop), 'sep': '(?::|[:-])'})
1332 template = r'<meta
[^
>]+?
%s[^
>]+?
%s'
1334 template % (property_re, content_re),
1335 template % (content_re, property_re),
1339 def _meta_regex(prop):
1340 return r'''(?isx)<meta
1341 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
1342 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
1344 def _og_search_property(self, prop, html, name=None, **kargs):
1345 prop = variadic(prop)
1347 name = 'OpenGraph
%s' % prop[0]
1350 og_regexes.extend(self._og_regexes(p))
1351 escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
1354 return unescapeHTML(escaped)
1356 def _og_search_thumbnail(self, html, **kargs):
1357 return self._og_search_property('image
', html, 'thumbnail URL
', fatal=False, **kargs)
1359 def _og_search_description(self, html, **kargs):
1360 return self._og_search_property('description
', html, fatal=False, **kargs)
1362 def _og_search_title(self, html, *, fatal=False, **kargs):
1363 return self._og_search_property('title
', html, fatal=fatal, **kargs)
1365 def _og_search_video_url(self, html, name='video url
', secure=True, **kargs):
1366 regexes = self._og_regexes('video
') + self._og_regexes('video
:url
')
1368 regexes = self._og_regexes('video
:secure_url
') + regexes
1369 return self._html_search_regex(regexes, html, name, **kargs)
1371 def _og_search_url(self, html, **kargs):
1372 return self._og_search_property('url
', html, **kargs)
1374 def _html_extract_title(self, html, name='title
', *, fatal=False, **kwargs):
1375 return self._html_search_regex(r'(?s
)<title
\b[^
>]*>([^
<]+)</title
>', html, name, fatal=fatal, **kwargs)
1377 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
1378 name = variadic(name)
1379 if display_name is None:
1380 display_name = name[0]
1381 return self._html_search_regex(
1382 [self._meta_regex(n) for n in name],
1383 html, display_name, fatal=fatal, group='content
', **kwargs)
1385 def _dc_search_uploader(self, html):
1386 return self._html_search_meta('dc
.creator
', html, 'uploader
')
1389 def _rta_search(html):
1390 # See http://www.rtalabel.org/index.php?content=howtofaq#single
1391 if re.search(r'(?ix
)<meta\s
+name
="rating"\s
+'
1392 r' content
="RTA-5042-1996-1400-1577-RTA"',
1396 # And then there are the jokers who advertise that they use RTA, but actually don't
.
1397 AGE_LIMIT_MARKERS
= [
1398 r
'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
1400 if any(re
.search(marker
, html
) for marker
in AGE_LIMIT_MARKERS
):
1404 def _media_rating_search(self
, html
):
1405 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
1406 rating
= self
._html
_search
_meta
('rating', html
)
1418 return RATING_TABLE
.get(rating
.lower())
1420 def _family_friendly_search(self
, html
):
1421 # See http://schema.org/VideoObject
1422 family_friendly
= self
._html
_search
_meta
(
1423 'isFamilyFriendly', html
, default
=None)
1425 if not family_friendly
:
1434 return RATING_TABLE
.get(family_friendly
.lower())
1436 def _twitter_search_player(self
, html
):
1437 return self
._html
_search
_meta
('twitter:player', html
,
1438 'twitter card player')
1440 def _yield_json_ld(self
, html
, video_id
, *, fatal
=True, default
=NO_DEFAULT
):
1441 """Yield all json ld objects in the html"""
1442 if default
is not NO_DEFAULT
:
1444 for mobj
in re
.finditer(JSON_LD_RE
, html
):
1445 json_ld_item
= self
._parse
_json
(mobj
.group('json_ld'), video_id
, fatal
=fatal
)
1446 for json_ld
in variadic(json_ld_item
):
1447 if isinstance(json_ld
, dict):
1450 def _search_json_ld(self
, html
, video_id
, expected_type
=None, *, fatal
=True, default
=NO_DEFAULT
):
1451 """Search for a video in any json ld in the html"""
1452 if default
is not NO_DEFAULT
:
1454 info
= self
._json
_ld
(
1455 list(self
._yield
_json
_ld
(html
, video_id
, fatal
=fatal
, default
=default
)),
1456 video_id
, fatal
=fatal
, expected_type
=expected_type
)
1459 if default
is not NO_DEFAULT
:
1462 raise RegexNotFoundError('Unable to extract JSON-LD')
1464 self
.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
1467 def _json_ld(self
, json_ld
, video_id
, fatal
=True, expected_type
=None):
1468 if isinstance(json_ld
, str):
1469 json_ld
= self
._parse
_json
(json_ld
, video_id
, fatal
=fatal
)
1474 INTERACTION_TYPE_MAP
= {
1475 'CommentAction': 'comment',
1476 'AgreeAction': 'like',
1477 'DisagreeAction': 'dislike',
1478 'LikeAction': 'like',
1479 'DislikeAction': 'dislike',
1480 'ListenAction': 'view',
1481 'WatchAction': 'view',
1482 'ViewAction': 'view',
1485 def is_type(e
, *expected_types
):
1486 type = variadic(traverse_obj(e
, '@type'))
1487 return any(x
in type for x
in expected_types
)
1489 def extract_interaction_type(e
):
1490 interaction_type
= e
.get('interactionType')
1491 if isinstance(interaction_type
, dict):
1492 interaction_type
= interaction_type
.get('@type')
1493 return str_or_none(interaction_type
)
1495 def extract_interaction_statistic(e
):
1496 interaction_statistic
= e
.get('interactionStatistic')
1497 if isinstance(interaction_statistic
, dict):
1498 interaction_statistic
= [interaction_statistic
]
1499 if not isinstance(interaction_statistic
, list):
1501 for is_e
in interaction_statistic
:
1502 if not is_type(is_e
, 'InteractionCounter'):
1504 interaction_type
= extract_interaction_type(is_e
)
1505 if not interaction_type
:
1507 # For interaction count some sites provide string instead of
1508 # an integer (as per spec) with non digit characters (e.g. ",")
1509 # so extracting count with more relaxed str_to_int
1510 interaction_count
= str_to_int(is_e
.get('userInteractionCount'))
1511 if interaction_count
is None:
1513 count_kind
= INTERACTION_TYPE_MAP
.get(interaction_type
.split('/')[-1])
1516 count_key
= '%s_count' % count_kind
1517 if info
.get(count_key
) is not None:
1519 info
[count_key
] = interaction_count
1521 def extract_chapter_information(e
):
1523 'title': part
.get('name'),
1524 'start_time': part
.get('startOffset'),
1525 'end_time': part
.get('endOffset'),
1526 } for part
in variadic(e
.get('hasPart') or []) if part
.get('@type') == 'Clip']
1527 for idx
, (last_c
, current_c
, next_c
) in enumerate(zip(
1528 [{'end_time': 0}
] + chapters
, chapters
, chapters
[1:])):
1529 current_c
['end_time'] = current_c
['end_time'] or next_c
['start_time']
1530 current_c
['start_time'] = current_c
['start_time'] or last_c
['end_time']
1531 if None in current_c
.values():
1532 self
.report_warning(f
'Chapter {idx} contains broken data. Not extracting chapters')
1535 chapters
[-1]['end_time'] = chapters
[-1]['end_time'] or info
['duration']
1536 info
['chapters'] = chapters
1538 def extract_video_object(e
):
1539 author
= e
.get('author')
1541 'url': url_or_none(e
.get('contentUrl')),
1542 'ext': mimetype2ext(e
.get('encodingFormat')),
1543 'title': unescapeHTML(e
.get('name')),
1544 'description': unescapeHTML(e
.get('description')),
1545 'thumbnails': [{'url': unescapeHTML(url)}
1546 for url
in variadic(traverse_obj(e
, 'thumbnailUrl', 'thumbnailURL'))
1547 if url_or_none(url
)],
1548 'duration': parse_duration(e
.get('duration')),
1549 'timestamp': unified_timestamp(e
.get('uploadDate')),
1550 # author can be an instance of 'Organization' or 'Person' types.
1551 # both types can have 'name' property(inherited from 'Thing' type). [1]
1552 # however some websites are using 'Text' type instead.
1553 # 1. https://schema.org/VideoObject
1554 'uploader': author
.get('name') if isinstance(author
, dict) else author
if isinstance(author
, str) else None,
1555 'artist': traverse_obj(e
, ('byArtist', 'name'), expected_type
=str),
1556 'filesize': int_or_none(float_or_none(e
.get('contentSize'))),
1557 'tbr': int_or_none(e
.get('bitrate')),
1558 'width': int_or_none(e
.get('width')),
1559 'height': int_or_none(e
.get('height')),
1560 'view_count': int_or_none(e
.get('interactionCount')),
1561 'tags': try_call(lambda: e
.get('keywords').split(',')),
1563 if is_type(e
, 'AudioObject'):
1566 'abr': int_or_none(e
.get('bitrate')),
1568 extract_interaction_statistic(e
)
1569 extract_chapter_information(e
)
1571 def traverse_json_ld(json_ld
, at_top_level
=True):
1572 for e
in variadic(json_ld
):
1573 if not isinstance(e
, dict):
1575 if at_top_level
and '@context' not in e
:
1577 if at_top_level
and set(e
.keys()) == {'@context', '@graph'}
:
1578 traverse_json_ld(e
['@graph'], at_top_level
=False)
1580 if expected_type
is not None and not is_type(e
, expected_type
):
1582 rating
= traverse_obj(e
, ('aggregateRating', 'ratingValue'), expected_type
=float_or_none
)
1583 if rating
is not None:
1584 info
['average_rating'] = rating
1585 if is_type(e
, 'TVEpisode', 'Episode'):
1586 episode_name
= unescapeHTML(e
.get('name'))
1588 'episode': episode_name
,
1589 'episode_number': int_or_none(e
.get('episodeNumber')),
1590 'description': unescapeHTML(e
.get('description')),
1592 if not info
.get('title') and episode_name
:
1593 info
['title'] = episode_name
1594 part_of_season
= e
.get('partOfSeason')
1595 if is_type(part_of_season
, 'TVSeason', 'Season', 'CreativeWorkSeason'):
1597 'season': unescapeHTML(part_of_season
.get('name')),
1598 'season_number': int_or_none(part_of_season
.get('seasonNumber')),
1600 part_of_series
= e
.get('partOfSeries') or e
.get('partOfTVSeries')
1601 if is_type(part_of_series
, 'TVSeries', 'Series', 'CreativeWorkSeries'):
1602 info
['series'] = unescapeHTML(part_of_series
.get('name'))
1603 elif is_type(e
, 'Movie'):
1605 'title': unescapeHTML(e
.get('name')),
1606 'description': unescapeHTML(e
.get('description')),
1607 'duration': parse_duration(e
.get('duration')),
1608 'timestamp': unified_timestamp(e
.get('dateCreated')),
1610 elif is_type(e
, 'Article', 'NewsArticle'):
1612 'timestamp': parse_iso8601(e
.get('datePublished')),
1613 'title': unescapeHTML(e
.get('headline')),
1614 'description': unescapeHTML(e
.get('articleBody') or e
.get('description')),
1616 if is_type(traverse_obj(e
, ('video', 0)), 'VideoObject'):
1617 extract_video_object(e
['video'][0])
1618 elif is_type(traverse_obj(e
, ('subjectOf', 0)), 'VideoObject'):
1619 extract_video_object(e
['subjectOf'][0])
1620 elif is_type(e
, 'VideoObject', 'AudioObject'):
1621 extract_video_object(e
)
1622 if expected_type
is None:
1626 video
= e
.get('video')
1627 if is_type(video
, 'VideoObject'):
1628 extract_video_object(video
)
1629 if expected_type
is None:
1634 traverse_json_ld(json_ld
)
1635 return filter_dict(info
)
1637 def _search_nextjs_data(self
, webpage
, video_id
, *, transform_source
=None, fatal
=True, **kw
):
1638 return self
._parse
_json
(
1640 r
'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^
>]*>([^
<]+)</script
>',
1641 webpage, 'next
.js data
', fatal=fatal, **kw),
1642 video_id, transform_source=transform_source, fatal=fatal)
1644 def _search_nuxt_data(self, webpage, video_id, context_name='__NUXT__
', *, fatal=True, traverse=('data
', 0)):
1645 """Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function"""
1646 rectx = re.escape(context_name)
1647 FUNCTION_RE = r'\
(function\
((?P
<arg_keys
>.*?
)\
){return\s+(?P<js>{.*?}
)\s
*;?\s
*}\
((?P
<arg_vals
>.*?
)\
)'
1648 js, arg_keys, arg_vals = self._search_regex(
1649 (rf'<script
>\s
*window\
.{rectx}
={FUNCTION_RE}\s
*\
)\s
*;?\s
*</script
>', rf'{rectx}\
(.*?{FUNCTION_RE}
'),
1650 webpage, context_name, group=('js
', 'arg_keys
', 'arg_vals
'), fatal=fatal)
1652 args = dict(zip(arg_keys.split(','), arg_vals.split(',')))
1654 for key, val in args.items():
1655 if val in ('undefined
', 'void
0'):
1658 ret = self._parse_json(js, video_id, transform_source=functools.partial(js_to_json, vars=args), fatal=fatal)
1659 return traverse_obj(ret, traverse) or {}
1662 def _hidden_inputs(html):
1663 html = re.sub(r'<!--(?
:(?
!<!--).)*-->', '', html)
1665 for input in re.findall(r'(?i
)(<input[^
>]+>)', html):
1666 attrs = extract_attributes(input)
1669 if attrs.get('type') not in ('hidden
', 'submit
'):
1671 name = attrs.get('name
') or attrs.get('id')
1672 value = attrs.get('value
')
1673 if name and value is not None:
1674 hidden_inputs[name] = value
1675 return hidden_inputs
1677 def _form_hidden_inputs(self, form_id, html):
1678 form = self._search_regex(
1679 r'(?
is)<form
[^
>]+?
id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
1680 html, '%s form' % form_id, group='form')
1681 return self._hidden_inputs(form)
1684 regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
1686 default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
1687 'res', 'fps', 'hdr:12', 'vcodec:vp9.2', 'channels', 'acodec',
1688 'size', 'br', 'asr', 'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
1689 ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
1690 'height', 'width', 'proto', 'vext', 'abr', 'aext',
1691 'fps', 'fs_approx', 'source', 'id')
1694 'vcodec': {'type': 'ordered', 'regex': True,
1695 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']},
1696 'acodec': {'type': 'ordered', 'regex': True,
1697 'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis|ogg', 'aac', 'mp?4a?', 'mp3', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']},
1698 'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range',
1699 'order': ['dv', '(hdr)?12', r'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]},
1700 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol',
1701 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']},
1702 'vext': {'type': 'ordered', 'field': 'video_ext',
1703 'order': ('mp4', 'webm', 'flv', '', 'none'),
1704 'order_free': ('webm', 'mp4', 'flv', '', 'none')},
1705 'aext': {'type': 'ordered', 'field': 'audio_ext',
1706 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'webm', '', 'none'),
1707 'order_free': ('ogg', 'opus', 'webm', 'mp3', 'm4a', 'aac', '', 'none')},
1708 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000},
1709 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple',
1710 'field': ('vcodec', 'acodec'),
1711 'function': lambda it: int(any(v != 'none' for v in it))},
1712 'ie_pref': {'priority': True, 'type': 'extractor'},
1713 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)},
1714 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)},
1715 'lang': {'convert': 'float', 'field': 'language_preference', 'default': -1},
1716 'quality': {'convert': 'float', 'default': -1},
1717 'filesize': {'convert': 'bytes'},
1718 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'},
1719 'id': {'convert': 'string', 'field': 'format_id'},
1720 'height': {'convert': 'float_none'},
1721 'width': {'convert': 'float_none'},
1722 'fps': {'convert': 'float_none'},
1723 'channels': {'convert': 'float_none', 'field': 'audio_channels'},
1724 'tbr': {'convert': 'float_none'},
1725 'vbr': {'convert': 'float_none'},
1726 'abr': {'convert': 'float_none'},
1727 'asr': {'convert': 'float_none'},
1728 'source': {'convert': 'float', 'field': 'source_preference', 'default': -1},
1730 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')},
1731 'br': {'type': 'combined', 'field': ('tbr', 'vbr', 'abr'), 'same_limit': True},
1732 'size': {'type': 'combined', 'same_limit': True, 'field': ('filesize', 'fs_approx')},
1733 'ext': {'type': 'combined', 'field': ('vext', 'aext')},
1734 'res': {'type': 'multiple', 'field': ('height', 'width'),
1735 'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))},
1737 # Actual field names
1738 'format_id': {'type': 'alias', 'field': 'id'},
1739 'preference': {'type': 'alias', 'field': 'ie_pref'},
1740 'language_preference': {'type': 'alias', 'field': 'lang'},
1741 'source_preference': {'type': 'alias', 'field': 'source'},
1742 'protocol': {'type': 'alias', 'field': 'proto'},
1743 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'},
1744 'audio_channels': {'type': 'alias', 'field': 'channels'},
1747 'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True},
1748 'resolution': {'type': 'alias', 'field': 'res', 'deprecated': True},
1749 'extension': {'type': 'alias', 'field': 'ext', 'deprecated': True},
1750 'bitrate': {'type': 'alias', 'field': 'br', 'deprecated': True},
1751 'total_bitrate': {'type': 'alias', 'field': 'tbr', 'deprecated': True},
1752 'video_bitrate': {'type': 'alias', 'field': 'vbr', 'deprecated': True},
1753 'audio_bitrate': {'type': 'alias', 'field': 'abr', 'deprecated': True},
1754 'framerate': {'type': 'alias', 'field': 'fps', 'deprecated': True},
1755 'filesize_estimate': {'type': 'alias', 'field': 'size', 'deprecated': True},
1756 'samplerate': {'type': 'alias', 'field': 'asr', 'deprecated': True},
1757 'video_ext': {'type': 'alias', 'field': 'vext', 'deprecated': True},
1758 'audio_ext': {'type': 'alias', 'field': 'aext', 'deprecated': True},
1759 'video_codec': {'type': 'alias', 'field': 'vcodec', 'deprecated': True},
1760 'audio_codec': {'type': 'alias', 'field': 'acodec', 'deprecated': True},
1761 'video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True},
1762 'has_video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True},
1763 'audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True},
1764 'has_audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True},
1765 'extractor': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True},
1766 'extractor_preference': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True},
1769 def __init__(self, ie, field_preference):
1771 self.ydl = ie._downloader
1772 self.evaluate_params(self.ydl.params, field_preference)
1773 if ie.get_param('verbose'):
1774 self.print_verbose_info(self.ydl.write_debug)
1776 def _get_field_setting(self, field, key):
1777 if field not in self.settings:
1778 if key in ('forced', 'priority'):
1780 self.ydl.deprecated_feature(f'Using arbitrary fields ({field}) for format sorting is '
1781 'deprecated and may be removed in a future version')
1782 self.settings[field] = {}
1783 propObj = self.settings[field]
1784 if key not in propObj:
1785 type = propObj.get('type')
1787 default = 'preference' if type == 'extractor' else (field,) if type in ('combined', 'multiple') else field
1788 elif key == 'convert':
1789 default = 'order' if type == 'ordered' else 'float_string' if field else 'ignore'
1791 default = {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}.get(key, None)
1792 propObj[key] = default
1795 def _resolve_field_value(self, field, value, convertNone=False):
1800 value = value.lower()
1801 conversion = self._get_field_setting(field, 'convert')
1802 if conversion == 'ignore':
1804 if conversion == 'string':
1806 elif conversion == 'float_none':
1807 return float_or_none(value)
1808 elif conversion == 'bytes':
1809 return FileDownloader.parse_bytes(value)
1810 elif conversion == 'order':
1811 order_list = (self._use_free_order and self._get_field_setting(field, 'order_free')) or self._get_field_setting(field, 'order')
1812 use_regex = self._get_field_setting(field, 'regex')
1813 list_length = len(order_list)
1814 empty_pos = order_list.index('') if '' in order_list else list_length + 1
1815 if use_regex and value is not None:
1816 for i, regex in enumerate(order_list):
1817 if regex and re.match(regex, value):
1818 return list_length - i
1819 return list_length - empty_pos # not in list
1820 else: # not regex or value = None
1821 return list_length - (order_list.index(value) if value in order_list else empty_pos)
1823 if value.isnumeric():
1826 self.settings[field]['convert'] = 'string'
1829 def evaluate_params(self, params, sort_extractor):
1830 self._use_free_order = params.get('prefer_free_formats', False)
1831 self._sort_user = params.get('format_sort', [])
1832 self._sort_extractor = sort_extractor
1834 def add_item(field, reverse, closest, limit_text):
1835 field = field.lower()
1836 if field in self._order:
1838 self._order.append(field)
1839 limit = self._resolve_field_value(field, limit_text)
1842 'closest': False if limit is None else closest,
1843 'limit_text': limit_text,
1845 if field in self.settings:
1846 self.settings[field].update(data)
1848 self.settings[field] = data
1851 tuple(field for field in self.default if self._get_field_setting(field, 'forced'))
1852 + (tuple() if params.get('format_sort_force', False)
1853 else tuple(field for field in self.default if self._get_field_setting(field, 'priority')))
1854 + tuple(self._sort_user) + tuple(sort_extractor) + self.default)
1856 for item in sort_list:
1857 match = re.match(self.regex, item)
1859 raise ExtractorError('Invalid format sort string "%s" given by extractor' % item)
1860 field = match.group('field')
1863 if self._get_field_setting(field, 'type') == 'alias':
1864 alias, field = field, self._get_field_setting(field, 'field')
1865 if self._get_field_setting(alias, 'deprecated'):
1866 self.ydl.deprecated_feature(f'Format sorting alias {alias} is deprecated and may '
1867 f'be removed in a future version. Please use {field} instead')
1868 reverse = match.group('reverse') is not None
1869 closest = match.group('separator') == '~'
1870 limit_text = match.group('limit')
1872 has_limit = limit_text is not None
1873 has_multiple_fields = self._get_field_setting(field, 'type') == 'combined'
1874 has_multiple_limits = has_limit and has_multiple_fields and not self._get_field_setting(field, 'same_limit')
1876 fields = self._get_field_setting(field, 'field') if has_multiple_fields else (field,)
1877 limits = limit_text.split(':') if has_multiple_limits else (limit_text,) if has_limit else tuple()
1878 limit_count = len(limits)
1879 for (i, f) in enumerate(fields):
1880 add_item(f, reverse, closest,
1881 limits[i] if i < limit_count
1882 else limits[0] if has_limit and not has_multiple_limits
1885 def print_verbose_info(self, write_debug):
1887 write_debug('Sort order given by user: %s' % ', '.join(self._sort_user))
1888 if self._sort_extractor:
1889 write_debug('Sort order given by extractor: %s' % ', '.join(self._sort_extractor))
1890 write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % (
1891 '+' if self._get_field_setting(field, 'reverse') else '', field,
1892 '%s%s(%s)' % ('~' if self._get_field_setting(field, 'closest') else ':',
1893 self._get_field_setting(field, 'limit_text'),
1894 self._get_field_setting(field, 'limit'))
1895 if self._get_field_setting(field, 'limit_text') is not None else '')
1896 for field in self._order if self._get_field_setting(field, 'visible')]))
1898 def _calculate_field_preference_from_value(self, format, field, type, value):
1899 reverse = self._get_field_setting(field, 'reverse')
1900 closest = self._get_field_setting(field, 'closest')
1901 limit = self._get_field_setting(field, 'limit')
1903 if type == 'extractor':
1904 maximum = self._get_field_setting(field, 'max')
1905 if value is None or (maximum is not None and value >= maximum):
1907 elif type == 'boolean':
1908 in_list = self._get_field_setting(field, 'in_list')
1909 not_in_list = self._get_field_setting(field, 'not_in_list')
1910 value = 0 if ((in_list is None or value in in_list) and (not_in_list is None or value not in not_in_list)) else -1
1911 elif type == 'ordered':
1912 value = self._resolve_field_value(field, value, True)
1914 # try to convert to number
1915 val_num = float_or_none(value, default=self._get_field_setting(field, 'default'))
1916 is_num = self._get_field_setting(field, 'convert') != 'string' and val_num is not None
1920 return ((-10, 0) if value is None
1921 else (1, value, 0) if not is_num # if a field has mixed strings and numbers, strings are sorted higher
1922 else (0, -abs(value - limit), value - limit if reverse else limit - value) if closest
1923 else (0, value, 0) if not reverse and (limit is None or value <= limit)
1924 else (0, -value, 0) if limit is None or (reverse and value == limit) or value > limit
1925 else (-1, value, 0))
1927 def _calculate_field_preference(self, format, field):
1928 type = self._get_field_setting(field, 'type') # extractor, boolean, ordered, field, multiple
1929 get_value = lambda f: format.get(self._get_field_setting(f, 'field'))
1930 if type == 'multiple':
1931 type = 'field' # Only 'field' is allowed in multiple for now
1932 actual_fields = self._get_field_setting(field, 'field')
1934 value = self._get_field_setting(field, 'function')(get_value(f) for f in actual_fields)
1936 value = get_value(field)
1937 return self._calculate_field_preference_from_value(format, field, type, value)
1939 def calculate_preference(self, format):
1940 # Determine missing protocol
1941 if not format.get('protocol'):
1942 format['protocol'] = determine_protocol(format)
1944 # Determine missing ext
1945 if not format.get('ext') and 'url' in format:
1946 format['ext'] = determine_ext(format['url'])
1947 if format.get('vcodec') == 'none':
1948 format['audio_ext'] = format['ext'] if format.get('acodec') != 'none' else 'none'
1949 format['video_ext'] = 'none'
1951 format['video_ext'] = format['ext']
1952 format['audio_ext'] = 'none'
1953 # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported?
1954 # format['preference'] = -1000
1956 # Determine missing bitrates
1957 if format.get('tbr') is None:
1958 if format.get('vbr') is not None and format.get('abr') is not None:
1959 format['tbr'] = format.get('vbr', 0) + format.get('abr', 0)
1961 if format.get('vcodec') != 'none' and format.get('vbr') is None:
1962 format['vbr'] = format.get('tbr') - format.get('abr', 0)
1963 if format.get('acodec') != 'none' and format.get('abr') is None:
1964 format['abr'] = format.get('tbr') - format.get('vbr', 0)
1966 return tuple(self._calculate_field_preference(format, field) for field in self._order)
1968 def _sort_formats(self, formats, field_preference=[]):
1971 formats.sort(key=self.FormatSort(self, field_preference).calculate_preference)
1973 def _check_formats(self, formats, video_id):
1975 formats[:] = filter(
1976 lambda f: self._is_valid_url(
1978 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
1982 def _remove_duplicate_formats(formats):
1986 if f['url'] not in format_urls:
1987 format_urls.add(f['url'])
1988 unique_formats.append(f)
1989 formats[:] = unique_formats
1991 def _is_valid_url(self, url, video_id, item='video', headers={}):
1992 url = self._proto_relative_url(url, scheme='http:')
1993 # For now assume non HTTP(S) URLs always valid
1994 if not (url.startswith('http://') or url.startswith('https://')):
1997 self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
1999 except ExtractorError as e:
2001 '%s: %s URL is invalid, skipping: %s'
2002 % (video_id, item, error_to_compat_str(e.cause)))
2005 def http_scheme(self):
2006 """ Either "http
:" or "https
:", depending on the user's preferences """
2009 if self.get_param('prefer_insecure', False)
2012 def _proto_relative_url(self, url, scheme=None):
2013 scheme = scheme or self.http_scheme()
2014 assert scheme.endswith(':')
2015 return sanitize_url(url, scheme=scheme[:-1])
2017 def _sleep(self, timeout, video_id, msg_template=None):
2018 if msg_template is None:
2019 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
2020 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
2024 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
2025 transform_source=lambda s: fix_xml_ampersands(s).strip(),
2026 fatal=True, m3u8_id=None, data=None, headers={}, query={}):
2027 res = self._download_xml_handle(
2028 manifest_url, video_id, 'Downloading f4m manifest',
2029 'Unable to download f4m manifest',
2030 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
2031 # (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
2032 transform_source=transform_source,
2033 fatal=fatal, data=data, headers=headers, query=query)
2037 manifest, urlh = res
2038 manifest_url = urlh.geturl()
2040 return self._parse_f4m_formats(
2041 manifest, manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
2042 transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
2044 def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
2045 transform_source=lambda s: fix_xml_ampersands(s).strip(),
2046 fatal=True, m3u8_id=None):
2047 if not isinstance(manifest, xml.etree.ElementTree.Element) and not fatal:
2050 # currently yt-dlp cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
2051 akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
2052 if akamai_pv is not None and ';' in akamai_pv.text:
2053 playerVerificationChallenge = akamai_pv.text.split(';')[0]
2054 if playerVerificationChallenge.strip() != '':
2058 manifest_version = '1.0'
2059 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
2061 manifest_version = '2.0'
2062 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
2063 # Remove unsupported DRM protected media from final formats
2064 # rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
2065 media_nodes = remove_encrypted_media(media_nodes)
2069 manifest_base_url = get_base_url(manifest)
2071 bootstrap_info = xpath_element(
2072 manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
2073 'bootstrap info', default=None)
2076 mime_type = xpath_text(
2077 manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
2078 'base URL', default=None)
2079 if mime_type and mime_type.startswith('audio/'):
2082 for i, media_el in enumerate(media_nodes):
2083 tbr = int_or_none(media_el.attrib.get('bitrate'))
2084 width = int_or_none(media_el.attrib.get('width'))
2085 height = int_or_none(media_el.attrib.get('height'))
2086 format_id = join_nonempty(f4m_id, tbr or i)
2087 # If <bootstrapInfo> is present, the specified f4m is a
2088 # stream-level manifest, and only set-level manifests may refer to
2089 # external resources. See section 11.4 and section 4 of F4M spec
2090 if bootstrap_info is None:
2092 # @href is introduced in 2.0, see section 11.6 of F4M spec
2093 if manifest_version == '2.0':
2094 media_url = media_el.attrib.get('href')
2095 if media_url is None:
2096 media_url = media_el.attrib.get('url')
2100 media_url if media_url.startswith('http://') or media_url.startswith('https://')
2101 else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
2102 # If media_url is itself a f4m manifest do the recursive extraction
2103 # since bitrates in parent manifest (this one) and media_url manifest
2104 # may differ leading to inability to resolve the format by requested
2105 # bitrate in f4m downloader
2106 ext = determine_ext(manifest_url)
2108 f4m_formats = self._extract_f4m_formats(
2109 manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
2110 transform_source=transform_source, fatal=fatal)
2111 # Sometimes stream-level manifest contains single media entry that
2112 # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
2113 # At the same time parent's media entry in set-level manifest may
2114 # contain it. We will copy it from parent in such cases.
2115 if len(f4m_formats) == 1:
2118 'tbr': f.get('tbr') or tbr,
2119 'width': f.get('width') or width,
2120 'height': f.get('height') or height,
2121 'format_id': f.get('format_id') if not tbr else format_id,
2124 formats.extend(f4m_formats)
2127 formats.extend(self._extract_m3u8_formats(
2128 manifest_url, video_id, 'mp4', preference=preference,
2129 quality=quality, m3u8_id=m3u8_id, fatal=fatal))
2132 'format_id': format_id,
2133 'url': manifest_url,
2134 'manifest_url': manifest_url,
2135 'ext': 'flv' if bootstrap_info is not None else None,
2141 'preference': preference,
2146 def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, quality=None, m3u8_id=None):
2148 'format_id': join_nonempty(m3u8_id, 'meta'),
2152 'preference': preference - 100 if preference else -100,
2154 'resolution': 'multiple',
2155 'format_note': 'Quality selection URL',
2158 def _report_ignoring_subs(self, name):
2159 self.report_warning(bug_reports_message(
2160 f'Ignoring subtitle tracks found in the {name} manifest; '
2161 'if any subtitle tracks are missing,'
2164 def _extract_m3u8_formats(self, *args, **kwargs):
2165 fmts, subs = self._extract_m3u8_formats_and_subtitles(*args, **kwargs)
2167 self._report_ignoring_subs('HLS')
2170 def _extract_m3u8_formats_and_subtitles(
2171 self, m3u8_url, video_id, ext=None, entry_protocol='m3u8_native',
2172 preference=None, quality=None, m3u8_id=None, note=None,
2173 errnote=None, fatal=True, live=False, data=None, headers={},
2176 res = self._download_webpage_handle(
2178 note='Downloading m3u8 information' if note is None else note,
2179 errnote='Failed to download m3u8 information' if errnote is None else errnote,
2180 fatal=fatal, data=data, headers=headers, query=query)
2185 m3u8_doc, urlh = res
2186 m3u8_url = urlh.geturl()
2188 return self._parse_m3u8_formats_and_subtitles(
2189 m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
2190 preference=preference, quality=quality, m3u8_id=m3u8_id,
2191 note=note, errnote=errnote, fatal=fatal, live=live, data=data,
2192 headers=headers, query=query, video_id=video_id)
2194 def _parse_m3u8_formats_and_subtitles(
2195 self, m3u8_doc, m3u8_url=None, ext=None, entry_protocol='m3u8_native',
2196 preference=None, quality=None, m3u8_id=None, live=False, note=None,
2197 errnote=None, fatal=True, data=None, headers={}, query={},
2199 formats, subtitles = [], {}
2201 has_drm = re.search('|'.join([
2202 r'#EXT-X-FAXS-CM:', # Adobe Flash Access
2203 r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd
://', # Apple FairPlay
2206 def format_url(url):
2207 return url if re.match(r'^https?
://', url) else urllib.parse.urljoin(m3u8_url, url)
2209 if self.get_param('hls_split_discontinuity
', False):
2210 def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None):
2212 if not manifest_url:
2214 m3u8_doc = self._download_webpage(
2215 manifest_url, video_id, fatal=fatal, data=data, headers=headers,
2216 note=False, errnote='Failed to download m3u8 playlist information
')
2217 if m3u8_doc is False:
2219 return range(1 + sum(line.startswith('#EXT-X-DISCONTINUITY') for line in m3u8_doc.splitlines()))
2222 def _extract_m3u8_playlist_indices(*args
, **kwargs
):
2226 # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
2227 # 2. https://github.com/ytdl-org/youtube-dl/issues/12211
2228 # 3. https://github.com/ytdl-org/youtube-dl/issues/18923
2230 # We should try extracting formats only from master playlists [1, 4.3.4],
2231 # i.e. playlists that describe available qualities. On the other hand
2232 # media playlists [1, 4.3.3] should be returned as is since they contain
2233 # just the media without qualities renditions.
2234 # Fortunately, master playlist can be easily distinguished from media
2235 # playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
2236 # master playlist tags MUST NOT appear in a media playlist and vice versa.
2237 # As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
2238 # media playlist and MUST NOT appear in master playlist thus we can
2239 # clearly detect media playlist with this criterion.
2241 if '#EXT-X-TARGETDURATION' in m3u8_doc
: # media playlist, return as is
2243 'format_id': join_nonempty(m3u8_id
, idx
),
2244 'format_index': idx
,
2245 'url': m3u8_url
or encode_data_uri(m3u8_doc
.encode('utf-8'), 'application/x-mpegurl'),
2247 'protocol': entry_protocol
,
2248 'preference': preference
,
2251 } for idx
in _extract_m3u8_playlist_indices(m3u8_doc
=m3u8_doc
)]
2253 return formats
, subtitles
2256 last_stream_inf
= {}
2258 def extract_media(x_media_line
):
2259 media
= parse_m3u8_attributes(x_media_line
)
2260 # As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
2261 media_type
, group_id
, name
= media
.get('TYPE'), media
.get('GROUP-ID'), media
.get('NAME')
2262 if not (media_type
and group_id
and name
):
2264 groups
.setdefault(group_id
, []).append(media
)
2265 # <https://tools.ietf.org/html/rfc8216#section-4.3.4.1>
2266 if media_type
== 'SUBTITLES':
2267 # According to RFC 8216 §4.3.4.2.1, URI is REQUIRED in the
2268 # EXT-X-MEDIA tag if the media type is SUBTITLES.
2269 # However, lack of URI has been spotted in the wild.
2270 # e.g. NebulaIE; see https://github.com/yt-dlp/yt-dlp/issues/339
2271 if not media
.get('URI'):
2273 url
= format_url(media
['URI'])
2276 'ext': determine_ext(url
),
2278 if sub_info
['ext'] == 'm3u8':
2279 # Per RFC 8216 §3.1, the only possible subtitle format m3u8
2280 # files may contain is WebVTT:
2281 # <https://tools.ietf.org/html/rfc8216#section-3.1>
2282 sub_info
['ext'] = 'vtt'
2283 sub_info
['protocol'] = 'm3u8_native'
2284 lang
= media
.get('LANGUAGE') or 'und'
2285 subtitles
.setdefault(lang
, []).append(sub_info
)
2286 if media_type
not in ('VIDEO', 'AUDIO'):
2288 media_url
= media
.get('URI')
2290 manifest_url
= format_url(media_url
)
2292 'format_id': join_nonempty(m3u8_id
, group_id
, name
, idx
),
2293 'format_note': name
,
2294 'format_index': idx
,
2295 'url': manifest_url
,
2296 'manifest_url': m3u8_url
,
2297 'language': media
.get('LANGUAGE'),
2299 'protocol': entry_protocol
,
2300 'preference': preference
,
2302 'vcodec': 'none' if media_type
== 'AUDIO' else None,
2303 } for idx
in _extract_m3u8_playlist_indices(manifest_url
))
2305 def build_stream_name():
2306 # Despite specification does not mention NAME attribute for
2307 # EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
2308 # or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
2309 # 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
2310 stream_name
= last_stream_inf
.get('NAME')
2313 # If there is no NAME in EXT-X-STREAM-INF it will be obtained
2314 # from corresponding rendition group
2315 stream_group_id
= last_stream_inf
.get('VIDEO')
2316 if not stream_group_id
:
2318 stream_group
= groups
.get(stream_group_id
)
2319 if not stream_group
:
2320 return stream_group_id
2321 rendition
= stream_group
[0]
2322 return rendition
.get('NAME') or stream_group_id
2324 # parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
2325 # chance to detect video only formats when EXT-X-STREAM-INF tags
2326 # precede EXT-X-MEDIA tags in HLS manifest such as [3].
2327 for line
in m3u8_doc
.splitlines():
2328 if line
.startswith('#EXT-X-MEDIA:'):
2331 for line
in m3u8_doc
.splitlines():
2332 if line
.startswith('#EXT-X-STREAM-INF:'):
2333 last_stream_inf
= parse_m3u8_attributes(line
)
2334 elif line
.startswith('#') or not line
.strip():
2337 tbr
= float_or_none(
2338 last_stream_inf
.get('AVERAGE-BANDWIDTH')
2339 or last_stream_inf
.get('BANDWIDTH'), scale
=1000)
2340 manifest_url
= format_url(line
.strip())
2342 for idx
in _extract_m3u8_playlist_indices(manifest_url
):
2343 format_id
= [m3u8_id
, None, idx
]
2344 # Bandwidth of live streams may differ over time thus making
2345 # format_id unpredictable. So it's better to keep provided
2348 stream_name
= build_stream_name()
2349 format_id
[1] = stream_name
or '%d' % (tbr
or len(formats
))
2351 'format_id': join_nonempty(*format_id
),
2352 'format_index': idx
,
2353 'url': manifest_url
,
2354 'manifest_url': m3u8_url
,
2357 'fps': float_or_none(last_stream_inf
.get('FRAME-RATE')),
2358 'protocol': entry_protocol
,
2359 'preference': preference
,
2362 resolution
= last_stream_inf
.get('RESOLUTION')
2364 mobj
= re
.search(r
'(?P<width>\d+)[xX](?P<height>\d+)', resolution
)
2366 f
['width'] = int(mobj
.group('width'))
2367 f
['height'] = int(mobj
.group('height'))
2368 # Unified Streaming Platform
2370 r
'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f
['url'])
2372 abr
, vbr
= mobj
.groups()
2373 abr
, vbr
= float_or_none(abr
, 1000), float_or_none(vbr
, 1000)
2378 codecs
= parse_codecs(last_stream_inf
.get('CODECS'))
2380 audio_group_id
= last_stream_inf
.get('AUDIO')
2381 # As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
2382 # references a rendition group MUST have a CODECS attribute.
2383 # However, this is not always respected. E.g. [2]
2384 # contains EXT-X-STREAM-INF tag which references AUDIO
2385 # rendition group but does not have CODECS and despite
2386 # referencing an audio group it represents a complete
2387 # (with audio and video) format. So, for such cases we will
2388 # ignore references to rendition groups and treat them
2389 # as complete formats.
2390 if audio_group_id
and codecs
and f
.get('vcodec') != 'none':
2391 audio_group
= groups
.get(audio_group_id
)
2392 if audio_group
and audio_group
[0].get('URI'):
2393 # TODO: update acodec for audio only formats with
2395 f
['acodec'] = 'none'
2396 if not f
.get('ext'):
2397 f
['ext'] = 'm4a' if f
.get('vcodec') == 'none' else 'mp4'
2401 progressive_uri
= last_stream_inf
.get('PROGRESSIVE-URI')
2404 del http_f
['manifest_url']
2406 'format_id': f
['format_id'].replace('hls-', 'http-'),
2408 'url': progressive_uri
,
2410 formats
.append(http_f
)
2412 last_stream_inf
= {}
2413 return formats
, subtitles
2415 def _extract_m3u8_vod_duration(
2416 self
, m3u8_vod_url
, video_id
, note
=None, errnote
=None, data
=None, headers
={}, query={}
):
2418 m3u8_vod
= self
._download
_webpage
(
2419 m3u8_vod_url
, video_id
,
2420 note
='Downloading m3u8 VOD manifest' if note
is None else note
,
2421 errnote
='Failed to download VOD manifest' if errnote
is None else errnote
,
2422 fatal
=False, data
=data
, headers
=headers
, query
=query
)
2424 return self
._parse
_m
3u8_vod
_duration
(m3u8_vod
or '', video_id
)
2426 def _parse_m3u8_vod_duration(self
, m3u8_vod
, video_id
):
2427 if '#EXT-X-PLAYLIST-TYPE:VOD' not in m3u8_vod
:
2431 float(line
[len('#EXTINF:'):].split(',')[0])
2432 for line
in m3u8_vod
.splitlines() if line
.startswith('#EXTINF:'))) or None
2435 def _xpath_ns(path
, namespace
=None):
2439 for c
in path
.split('/'):
2440 if not c
or c
== '.':
2443 out
.append('{%s}%s' % (namespace
, c
))
2444 return '/'.join(out
)
2446 def _extract_smil_formats_and_subtitles(self
, smil_url
, video_id
, fatal
=True, f4m_params
=None, transform_source
=None):
2447 res
= self
._download
_smil
(smil_url
, video_id
, fatal
=fatal
, transform_source
=transform_source
)
2453 smil_url
= urlh
.geturl()
2455 namespace
= self
._parse
_smil
_namespace
(smil
)
2457 fmts
= self
._parse
_smil
_formats
(
2458 smil
, smil_url
, video_id
, namespace
=namespace
, f4m_params
=f4m_params
)
2459 subs
= self
._parse
_smil
_subtitles
(
2460 smil
, namespace
=namespace
)
2464 def _extract_smil_formats(self
, *args
, **kwargs
):
2465 fmts
, subs
= self
._extract
_smil
_formats
_and
_subtitles
(*args
, **kwargs
)
2467 self
._report
_ignoring
_subs
('SMIL')
2470 def _extract_smil_info(self
, smil_url
, video_id
, fatal
=True, f4m_params
=None):
2471 res
= self
._download
_smil
(smil_url
, video_id
, fatal
=fatal
)
2476 smil_url
= urlh
.geturl()
2478 return self
._parse
_smil
(smil
, smil_url
, video_id
, f4m_params
=f4m_params
)
2480 def _download_smil(self
, smil_url
, video_id
, fatal
=True, transform_source
=None):
2481 return self
._download
_xml
_handle
(
2482 smil_url
, video_id
, 'Downloading SMIL file',
2483 'Unable to download SMIL file', fatal
=fatal
, transform_source
=transform_source
)
2485 def _parse_smil(self
, smil
, smil_url
, video_id
, f4m_params
=None):
2486 namespace
= self
._parse
_smil
_namespace
(smil
)
2488 formats
= self
._parse
_smil
_formats
(
2489 smil
, smil_url
, video_id
, namespace
=namespace
, f4m_params
=f4m_params
)
2490 subtitles
= self
._parse
_smil
_subtitles
(smil
, namespace
=namespace
)
2492 video_id
= os
.path
.splitext(url_basename(smil_url
))[0]
2496 for meta
in smil
.findall(self
._xpath
_ns
('./head/meta', namespace
)):
2497 name
= meta
.attrib
.get('name')
2498 content
= meta
.attrib
.get('content')
2499 if not name
or not content
:
2501 if not title
and name
== 'title':
2503 elif not description
and name
in ('description', 'abstract'):
2504 description
= content
2505 elif not upload_date
and name
== 'date':
2506 upload_date
= unified_strdate(content
)
2509 'id': image
.get('type'),
2510 'url': image
.get('src'),
2511 'width': int_or_none(image
.get('width')),
2512 'height': int_or_none(image
.get('height')),
2513 } for image
in smil
.findall(self
._xpath
_ns
('.//image', namespace
)) if image
.get('src')]
2517 'title': title
or video_id
,
2518 'description': description
,
2519 'upload_date': upload_date
,
2520 'thumbnails': thumbnails
,
2522 'subtitles': subtitles
,
2525 def _parse_smil_namespace(self
, smil
):
2526 return self
._search
_regex
(
2527 r
'(?i)^{([^}]+)?}smil$', smil
.tag
, 'namespace', default
=None)
2529 def _parse_smil_formats(self
, smil
, smil_url
, video_id
, namespace
=None, f4m_params
=None, transform_rtmp_url
=None):
2531 for meta
in smil
.findall(self
._xpath
_ns
('./head/meta', namespace
)):
2532 b
= meta
.get('base') or meta
.get('httpBase')
2544 media
= smil
.findall(self
._xpath
_ns
('.//video', namespace
)) + smil
.findall(self
._xpath
_ns
('.//audio', namespace
))
2545 for medium
in media
:
2546 src
= medium
.get('src')
2547 if not src
or src
in srcs
:
2551 bitrate
= float_or_none(medium
.get('system-bitrate') or medium
.get('systemBitrate'), 1000)
2552 filesize
= int_or_none(medium
.get('size') or medium
.get('fileSize'))
2553 width
= int_or_none(medium
.get('width'))
2554 height
= int_or_none(medium
.get('height'))
2555 proto
= medium
.get('proto')
2556 ext
= medium
.get('ext')
2557 src_ext
= determine_ext(src
)
2558 streamer
= medium
.get('streamer') or base
2560 if proto
== 'rtmp' or streamer
.startswith('rtmp'):
2566 'format_id': 'rtmp-%d' % (rtmp_count
if bitrate
is None else bitrate
),
2568 'filesize': filesize
,
2572 if transform_rtmp_url
:
2573 streamer
, src
= transform_rtmp_url(streamer
, src
)
2574 formats
[-1].update({
2580 src_url
= src
if src
.startswith('http') else urllib
.parse
.urljoin(base
, src
)
2581 src_url
= src_url
.strip()
2583 if proto
== 'm3u8' or src_ext
== 'm3u8':
2584 m3u8_formats
= self
._extract
_m
3u8_formats
(
2585 src_url
, video_id
, ext
or 'mp4', m3u8_id
='hls', fatal
=False)
2586 if len(m3u8_formats
) == 1:
2588 m3u8_formats
[0].update({
2589 'format_id': 'hls-%d' % (m3u8_count
if bitrate
is None else bitrate
),
2594 formats
.extend(m3u8_formats
)
2595 elif src_ext
== 'f4m':
2600 'plugin': 'flowplayer-3.2.0.1',
2602 f4m_url
+= '&' if '?' in f4m_url
else '?'
2603 f4m_url
+= urllib
.parse
.urlencode(f4m_params
)
2604 formats
.extend(self
._extract
_f
4m
_formats
(f4m_url
, video_id
, f4m_id
='hds', fatal
=False))
2605 elif src_ext
== 'mpd':
2606 formats
.extend(self
._extract
_mpd
_formats
(
2607 src_url
, video_id
, mpd_id
='dash', fatal
=False))
2608 elif re
.search(r
'\.ism/[Mm]anifest', src_url
):
2609 formats
.extend(self
._extract
_ism
_formats
(
2610 src_url
, video_id
, ism_id
='mss', fatal
=False))
2611 elif src_url
.startswith('http') and self
._is
_valid
_url
(src
, video_id
):
2615 'ext': ext
or src_ext
or 'flv',
2616 'format_id': 'http-%d' % (bitrate
or http_count
),
2618 'filesize': filesize
,
2623 for medium
in smil
.findall(self
._xpath
_ns
('.//imagestream', namespace
)):
2624 src
= medium
.get('src')
2625 if not src
or src
in srcs
:
2631 'format_id': 'imagestream-%d' % (imgs_count
),
2633 'ext': mimetype2ext(medium
.get('type')),
2636 'width': int_or_none(medium
.get('width')),
2637 'height': int_or_none(medium
.get('height')),
2638 'format_note': 'SMIL storyboards',
2643 def _parse_smil_subtitles(self
, smil
, namespace
=None, subtitles_lang
='en'):
2646 for num
, textstream
in enumerate(smil
.findall(self
._xpath
_ns
('.//textstream', namespace
))):
2647 src
= textstream
.get('src')
2648 if not src
or src
in urls
:
2651 ext
= textstream
.get('ext') or mimetype2ext(textstream
.get('type')) or determine_ext(src
)
2652 lang
= textstream
.get('systemLanguage') or textstream
.get('systemLanguageName') or textstream
.get('lang') or subtitles_lang
2653 subtitles
.setdefault(lang
, []).append({
2659 def _extract_xspf_playlist(self
, xspf_url
, playlist_id
, fatal
=True):
2660 res
= self
._download
_xml
_handle
(
2661 xspf_url
, playlist_id
, 'Downloading xpsf playlist',
2662 'Unable to download xspf manifest', fatal
=fatal
)
2667 xspf_url
= urlh
.geturl()
2669 return self
._parse
_xspf
(
2670 xspf
, playlist_id
, xspf_url
=xspf_url
,
2671 xspf_base_url
=base_url(xspf_url
))
2673 def _parse_xspf(self
, xspf_doc
, playlist_id
, xspf_url
=None, xspf_base_url
=None):
2675 'xspf': 'http://xspf.org/ns/0/',
2676 's1': 'http://static.streamone.nl/player/ns/0',
2680 for track
in xspf_doc
.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP
)):
2682 track
, xpath_with_ns('./xspf:title', NS_MAP
), 'title', default
=playlist_id
)
2683 description
= xpath_text(
2684 track
, xpath_with_ns('./xspf:annotation', NS_MAP
), 'description')
2685 thumbnail
= xpath_text(
2686 track
, xpath_with_ns('./xspf:image', NS_MAP
), 'thumbnail')
2687 duration
= float_or_none(
2688 xpath_text(track
, xpath_with_ns('./xspf:duration', NS_MAP
), 'duration'), 1000)
2691 for location
in track
.findall(xpath_with_ns('./xspf:location', NS_MAP
)):
2692 format_url
= urljoin(xspf_base_url
, location
.text
)
2697 'manifest_url': xspf_url
,
2698 'format_id': location
.get(xpath_with_ns('s1:label', NS_MAP
)),
2699 'width': int_or_none(location
.get(xpath_with_ns('s1:width', NS_MAP
))),
2700 'height': int_or_none(location
.get(xpath_with_ns('s1:height', NS_MAP
))),
2702 self
._sort
_formats
(formats
)
2707 'description': description
,
2708 'thumbnail': thumbnail
,
2709 'duration': duration
,
2714 def _extract_mpd_formats(self
, *args
, **kwargs
):
2715 fmts
, subs
= self
._extract
_mpd
_formats
_and
_subtitles
(*args
, **kwargs
)
2717 self
._report
_ignoring
_subs
('DASH')
2720 def _extract_mpd_formats_and_subtitles(
2721 self
, mpd_url
, video_id
, mpd_id
=None, note
=None, errnote
=None,
2722 fatal
=True, data
=None, headers
={}, query={}
):
2723 res
= self
._download
_xml
_handle
(
2725 note
='Downloading MPD manifest' if note
is None else note
,
2726 errnote
='Failed to download MPD manifest' if errnote
is None else errnote
,
2727 fatal
=fatal
, data
=data
, headers
=headers
, query
=query
)
2734 # We could have been redirected to a new url when we retrieved our mpd file.
2735 mpd_url
= urlh
.geturl()
2736 mpd_base_url
= base_url(mpd_url
)
2738 return self
._parse
_mpd
_formats
_and
_subtitles
(
2739 mpd_doc
, mpd_id
, mpd_base_url
, mpd_url
)
2741 def _parse_mpd_formats(self
, *args
, **kwargs
):
2742 fmts
, subs
= self
._parse
_mpd
_formats
_and
_subtitles
(*args
, **kwargs
)
2744 self
._report
_ignoring
_subs
('DASH')
2747 def _parse_mpd_formats_and_subtitles(
2748 self
, mpd_doc
, mpd_id
=None, mpd_base_url
='', mpd_url
=None):
2750 Parse formats from MPD manifest.
2752 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
2753 http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2754 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
2756 if not self
.get_param('dynamic_mpd', True):
2757 if mpd_doc
.get('type') == 'dynamic':
2760 namespace
= self
._search
_regex
(r
'(?i)^{([^}]+)?}MPD$', mpd_doc
.tag
, 'namespace', default
=None)
2763 return self
._xpath
_ns
(path
, namespace
)
2765 def is_drm_protected(element
):
2766 return element
.find(_add_ns('ContentProtection')) is not None
2768 def extract_multisegment_info(element
, ms_parent_info
):
2769 ms_info
= ms_parent_info
.copy()
2771 # As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
2772 # common attributes and elements. We will only extract relevant
2774 def extract_common(source
):
2775 segment_timeline
= source
.find(_add_ns('SegmentTimeline'))
2776 if segment_timeline
is not None:
2777 s_e
= segment_timeline
.findall(_add_ns('S'))
2779 ms_info
['total_number'] = 0
2782 r
= int(s
.get('r', 0))
2783 ms_info
['total_number'] += 1 + r
2784 ms_info
['s'].append({
2785 't': int(s
.get('t', 0)),
2786 # @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
2787 'd': int(s
.attrib
['d']),
2790 start_number
= source
.get('startNumber')
2792 ms_info
['start_number'] = int(start_number
)
2793 timescale
= source
.get('timescale')
2795 ms_info
['timescale'] = int(timescale
)
2796 segment_duration
= source
.get('duration')
2797 if segment_duration
:
2798 ms_info
['segment_duration'] = float(segment_duration
)
2800 def extract_Initialization(source
):
2801 initialization
= source
.find(_add_ns('Initialization'))
2802 if initialization
is not None:
2803 ms_info
['initialization_url'] = initialization
.attrib
['sourceURL']
2805 segment_list
= element
.find(_add_ns('SegmentList'))
2806 if segment_list
is not None:
2807 extract_common(segment_list
)
2808 extract_Initialization(segment_list
)
2809 segment_urls_e
= segment_list
.findall(_add_ns('SegmentURL'))
2811 ms_info
['segment_urls'] = [segment
.attrib
['media'] for segment
in segment_urls_e
]
2813 segment_template
= element
.find(_add_ns('SegmentTemplate'))
2814 if segment_template
is not None:
2815 extract_common(segment_template
)
2816 media
= segment_template
.get('media')
2818 ms_info
['media'] = media
2819 initialization
= segment_template
.get('initialization')
2821 ms_info
['initialization'] = initialization
2823 extract_Initialization(segment_template
)
2826 mpd_duration
= parse_duration(mpd_doc
.get('mediaPresentationDuration'))
2827 formats
, subtitles
= [], {}
2828 stream_numbers
= collections
.defaultdict(int)
2829 for period
in mpd_doc
.findall(_add_ns('Period')):
2830 period_duration
= parse_duration(period
.get('duration')) or mpd_duration
2831 period_ms_info
= extract_multisegment_info(period
, {
2835 for adaptation_set
in period
.findall(_add_ns('AdaptationSet')):
2836 adaption_set_ms_info
= extract_multisegment_info(adaptation_set
, period_ms_info
)
2837 for representation
in adaptation_set
.findall(_add_ns('Representation')):
2838 representation_attrib
= adaptation_set
.attrib
.copy()
2839 representation_attrib
.update(representation
.attrib
)
2840 # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
2841 mime_type
= representation_attrib
['mimeType']
2842 content_type
= representation_attrib
.get('contentType', mime_type
.split('/')[0])
2844 codec_str
= representation_attrib
.get('codecs', '')
2845 # Some kind of binary subtitle found in some youtube livestreams
2846 if mime_type
== 'application/x-rawcc':
2847 codecs
= {'scodec': codec_str}
2849 codecs
= parse_codecs(codec_str
)
2850 if content_type
not in ('video', 'audio', 'text'):
2851 if mime_type
== 'image/jpeg':
2852 content_type
= mime_type
2853 elif codecs
.get('vcodec', 'none') != 'none':
2854 content_type
= 'video'
2855 elif codecs
.get('acodec', 'none') != 'none':
2856 content_type
= 'audio'
2857 elif codecs
.get('scodec', 'none') != 'none':
2858 content_type
= 'text'
2859 elif mimetype2ext(mime_type
) in ('tt', 'dfxp', 'ttml', 'xml', 'json'):
2860 content_type
= 'text'
2862 self
.report_warning('Unknown MIME type %s in DASH manifest' % mime_type
)
2866 for element
in (representation
, adaptation_set
, period
, mpd_doc
):
2867 base_url_e
= element
.find(_add_ns('BaseURL'))
2868 if try_call(lambda: base_url_e
.text
) is not None:
2869 base_url
= base_url_e
.text
+ base_url
2870 if re
.match(r
'^https?://', base_url
):
2872 if mpd_base_url
and base_url
.startswith('/'):
2873 base_url
= urllib
.parse
.urljoin(mpd_base_url
, base_url
)
2874 elif mpd_base_url
and not re
.match(r
'^https?://', base_url
):
2875 if not mpd_base_url
.endswith('/'):
2877 base_url
= mpd_base_url
+ base_url
2878 representation_id
= representation_attrib
.get('id')
2879 lang
= representation_attrib
.get('lang')
2880 url_el
= representation
.find(_add_ns('BaseURL'))
2881 filesize
= int_or_none(url_el
.attrib
.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el
is not None else None)
2882 bandwidth
= int_or_none(representation_attrib
.get('bandwidth'))
2883 if representation_id
is not None:
2884 format_id
= representation_id
2886 format_id
= content_type
2888 format_id
= mpd_id
+ '-' + format_id
2889 if content_type
in ('video', 'audio'):
2891 'format_id': format_id
,
2892 'manifest_url': mpd_url
,
2893 'ext': mimetype2ext(mime_type
),
2894 'width': int_or_none(representation_attrib
.get('width')),
2895 'height': int_or_none(representation_attrib
.get('height')),
2896 'tbr': float_or_none(bandwidth
, 1000),
2897 'asr': int_or_none(representation_attrib
.get('audioSamplingRate')),
2898 'fps': int_or_none(representation_attrib
.get('frameRate')),
2899 'language': lang
if lang
not in ('mul', 'und', 'zxx', 'mis') else None,
2900 'format_note': 'DASH %s' % content_type
,
2901 'filesize': filesize
,
2902 'container': mimetype2ext(mime_type
) + '_dash',
2905 elif content_type
== 'text':
2907 'ext': mimetype2ext(mime_type
),
2908 'manifest_url': mpd_url
,
2909 'filesize': filesize
,
2911 elif content_type
== 'image/jpeg':
2912 # See test case in VikiIE
2913 # https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1
2915 'format_id': format_id
,
2917 'manifest_url': mpd_url
,
2918 'format_note': 'DASH storyboards (jpeg)',
2922 if is_drm_protected(adaptation_set
) or is_drm_protected(representation
):
2924 representation_ms_info
= extract_multisegment_info(representation
, adaption_set_ms_info
)
2926 def prepare_template(template_name
, identifiers
):
2927 tmpl
= representation_ms_info
[template_name
]
2928 if representation_id
is not None:
2929 tmpl
= tmpl
.replace('$RepresentationID$', representation_id
)
2930 # First of, % characters outside $...$ templates
2931 # must be escaped by doubling for proper processing
2932 # by % operator string formatting used further (see
2933 # https://github.com/ytdl-org/youtube-dl/issues/16867).
2939 in_template
= not in_template
2940 elif c
== '%' and not in_template
:
2942 # Next, $...$ templates are translated to their
2943 # %(...) counterparts to be used with % operator
2944 t
= re
.sub(r
'\$(%s)\$' % '|'.join(identifiers
), r
'%(\1)d', t
)
2945 t
= re
.sub(r
'\$(%s)%%([^$]+)\$' % '|'.join(identifiers
), r
'%(\1)\2', t
)
2946 t
.replace('$$', '$')
2949 # @initialization is a regular template like @media one
2950 # so it should be handled just the same way (see
2951 # https://github.com/ytdl-org/youtube-dl/issues/11605)
2952 if 'initialization' in representation_ms_info
:
2953 initialization_template
= prepare_template(
2955 # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
2956 # $Time$ shall not be included for @initialization thus
2957 # only $Bandwidth$ remains
2959 representation_ms_info
['initialization_url'] = initialization_template
% {
2960 'Bandwidth': bandwidth
,
2963 def location_key(location
):
2964 return 'url' if re
.match(r
'^https?://', location
) else 'path'
2966 if 'segment_urls' not in representation_ms_info
and 'media' in representation_ms_info
:
2968 media_template
= prepare_template('media', ('Number', 'Bandwidth', 'Time'))
2969 media_location_key
= location_key(media_template
)
2971 # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
2972 # can't be used at the same time
2973 if '%(Number' in media_template
and 's' not in representation_ms_info
:
2974 segment_duration
= None
2975 if 'total_number' not in representation_ms_info
and 'segment_duration' in representation_ms_info
:
2976 segment_duration
= float_or_none(representation_ms_info
['segment_duration'], representation_ms_info
['timescale'])
2977 representation_ms_info
['total_number'] = int(math
.ceil(
2978 float_or_none(period_duration
, segment_duration
, default
=0)))
2979 representation_ms_info
['fragments'] = [{
2980 media_location_key
: media_template
% {
2981 'Number': segment_number
,
2982 'Bandwidth': bandwidth
,
2984 'duration': segment_duration
,
2985 } for segment_number
in range(
2986 representation_ms_info
['start_number'],
2987 representation_ms_info
['total_number'] + representation_ms_info
['start_number'])]
2989 # $Number*$ or $Time$ in media template with S list available
2990 # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
2991 # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
2992 representation_ms_info
['fragments'] = []
2995 segment_number
= representation_ms_info
['start_number']
2997 def add_segment_url():
2998 segment_url
= media_template
% {
2999 'Time': segment_time
,
3000 'Bandwidth': bandwidth
,
3001 'Number': segment_number
,
3003 representation_ms_info
['fragments'].append({
3004 media_location_key
: segment_url
,
3005 'duration': float_or_none(segment_d
, representation_ms_info
['timescale']),
3008 for num
, s
in enumerate(representation_ms_info
['s']):
3009 segment_time
= s
.get('t') or segment_time
3013 for r
in range(s
.get('r', 0)):
3014 segment_time
+= segment_d
3017 segment_time
+= segment_d
3018 elif 'segment_urls' in representation_ms_info
and 's' in representation_ms_info
:
3019 # No media template,
3020 # e.g. https://www.youtube.com/watch?v=iXZV5uAYMJI
3021 # or any YouTube dashsegments video
3024 timescale
= representation_ms_info
['timescale']
3025 for s
in representation_ms_info
['s']:
3026 duration
= float_or_none(s
['d'], timescale
)
3027 for r
in range(s
.get('r', 0) + 1):
3028 segment_uri
= representation_ms_info
['segment_urls'][segment_index
]
3030 location_key(segment_uri
): segment_uri
,
3031 'duration': duration
,
3034 representation_ms_info
['fragments'] = fragments
3035 elif 'segment_urls' in representation_ms_info
:
3036 # Segment URLs with no SegmentTimeline
3037 # E.g. https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
3038 # https://github.com/ytdl-org/youtube-dl/pull/14844
3040 segment_duration
= float_or_none(
3041 representation_ms_info
['segment_duration'],
3042 representation_ms_info
['timescale']) if 'segment_duration' in representation_ms_info
else None
3043 for segment_url
in representation_ms_info
['segment_urls']:
3045 location_key(segment_url
): segment_url
,
3047 if segment_duration
:
3048 fragment
['duration'] = segment_duration
3049 fragments
.append(fragment
)
3050 representation_ms_info
['fragments'] = fragments
3051 # If there is a fragments key available then we correctly recognized fragmented media.
3052 # Otherwise we will assume unfragmented media with direct access. Technically, such
3053 # assumption is not necessarily correct since we may simply have no support for
3054 # some forms of fragmented media renditions yet, but for now we'll use this fallback.
3055 if 'fragments' in representation_ms_info
:
3057 # NB: mpd_url may be empty when MPD manifest is parsed from a string
3058 'url': mpd_url
or base_url
,
3059 'fragment_base_url': base_url
,
3061 'protocol': 'http_dash_segments' if mime_type
!= 'image/jpeg' else 'mhtml',
3063 if 'initialization_url' in representation_ms_info
:
3064 initialization_url
= representation_ms_info
['initialization_url']
3065 if not f
.get('url'):
3066 f
['url'] = initialization_url
3067 f
['fragments'].append({location_key(initialization_url): initialization_url}
)
3068 f
['fragments'].extend(representation_ms_info
['fragments'])
3069 if not period_duration
:
3070 period_duration
= try_get(
3071 representation_ms_info
,
3072 lambda r
: sum(frag
['duration'] for frag
in r
['fragments']), float)
3074 # Assuming direct URL to unfragmented media.
3076 if content_type
in ('video', 'audio', 'image/jpeg'):
3077 f
['manifest_stream_number'] = stream_numbers
[f
['url']]
3078 stream_numbers
[f
['url']] += 1
3080 elif content_type
== 'text':
3081 subtitles
.setdefault(lang
or 'und', []).append(f
)
3083 return formats
, subtitles
3085 def _extract_ism_formats(self
, *args
, **kwargs
):
3086 fmts
, subs
= self
._extract
_ism
_formats
_and
_subtitles
(*args
, **kwargs
)
3088 self
._report
_ignoring
_subs
('ISM')
3091 def _extract_ism_formats_and_subtitles(self
, ism_url
, video_id
, ism_id
=None, note
=None, errnote
=None, fatal
=True, data
=None, headers
={}, query={}
):
3092 res
= self
._download
_xml
_handle
(
3094 note
='Downloading ISM manifest' if note
is None else note
,
3095 errnote
='Failed to download ISM manifest' if errnote
is None else errnote
,
3096 fatal
=fatal
, data
=data
, headers
=headers
, query
=query
)
3103 return self
._parse
_ism
_formats
_and
_subtitles
(ism_doc
, urlh
.geturl(), ism_id
)
3105 def _parse_ism_formats_and_subtitles(self
, ism_doc
, ism_url
, ism_id
=None):
3107 Parse formats from ISM manifest.
3109 1. [MS-SSTR]: Smooth Streaming Protocol,
3110 https://msdn.microsoft.com/en-us/library/ff469518.aspx
3112 if ism_doc
.get('IsLive') == 'TRUE':
3115 duration
= int(ism_doc
.attrib
['Duration'])
3116 timescale
= int_or_none(ism_doc
.get('TimeScale')) or 10000000
3120 for stream
in ism_doc
.findall('StreamIndex'):
3121 stream_type
= stream
.get('Type')
3122 if stream_type
not in ('video', 'audio', 'text'):
3124 url_pattern
= stream
.attrib
['Url']
3125 stream_timescale
= int_or_none(stream
.get('TimeScale')) or timescale
3126 stream_name
= stream
.get('Name')
3127 stream_language
= stream
.get('Language', 'und')
3128 for track
in stream
.findall('QualityLevel'):
3129 KNOWN_TAGS
= {'255': 'AACL', '65534': 'EC-3'}
3130 fourcc
= track
.get('FourCC') or KNOWN_TAGS
.get(track
.get('AudioTag'))
3131 # TODO: add support for WVC1 and WMAP
3132 if fourcc
not in ('H264', 'AVC1', 'AACL', 'TTML', 'EC-3'):
3133 self
.report_warning('%s is not a supported codec' % fourcc
)
3135 tbr
= int(track
.attrib
['Bitrate']) // 1000
3136 # [1] does not mention Width and Height attributes. However,
3137 # they're often present while MaxWidth and MaxHeight are
3138 # missing, so should be used as fallbacks
3139 width
= int_or_none(track
.get('MaxWidth') or track
.get('Width'))
3140 height
= int_or_none(track
.get('MaxHeight') or track
.get('Height'))
3141 sampling_rate
= int_or_none(track
.get('SamplingRate'))
3143 track_url_pattern
= re
.sub(r
'{[Bb]itrate}', track
.attrib
['Bitrate'], url_pattern
)
3144 track_url_pattern
= urllib
.parse
.urljoin(ism_url
, track_url_pattern
)
3150 stream_fragments
= stream
.findall('c')
3151 for stream_fragment_index
, stream_fragment
in enumerate(stream_fragments
):
3152 fragment_ctx
['time'] = int_or_none(stream_fragment
.get('t')) or fragment_ctx
['time']
3153 fragment_repeat
= int_or_none(stream_fragment
.get('r')) or 1
3154 fragment_ctx
['duration'] = int_or_none(stream_fragment
.get('d'))
3155 if not fragment_ctx
['duration']:
3157 next_fragment_time
= int(stream_fragment
[stream_fragment_index
+ 1].attrib
['t'])
3159 next_fragment_time
= duration
3160 fragment_ctx
['duration'] = (next_fragment_time
- fragment_ctx
['time']) / fragment_repeat
3161 for _
in range(fragment_repeat
):
3163 'url': re
.sub(r
'{start[ _]time}', str(fragment_ctx
['time']), track_url_pattern
),
3164 'duration': fragment_ctx
['duration'] / stream_timescale
,
3166 fragment_ctx
['time'] += fragment_ctx
['duration']
3168 if stream_type
== 'text':
3169 subtitles
.setdefault(stream_language
, []).append({
3173 'manifest_url': ism_url
,
3174 'fragments': fragments
,
3175 '_download_params': {
3176 'stream_type': stream_type
,
3177 'duration': duration
,
3178 'timescale': stream_timescale
,
3180 'language': stream_language
,
3181 'codec_private_data': track
.get('CodecPrivateData'),
3184 elif stream_type
in ('video', 'audio'):
3186 'format_id': join_nonempty(ism_id
, stream_name
, tbr
),
3188 'manifest_url': ism_url
,
3189 'ext': 'ismv' if stream_type
== 'video' else 'isma',
3193 'asr': sampling_rate
,
3194 'vcodec': 'none' if stream_type
== 'audio' else fourcc
,
3195 'acodec': 'none' if stream_type
== 'video' else fourcc
,
3197 'fragments': fragments
,
3198 'has_drm': ism_doc
.find('Protection') is not None,
3199 '_download_params': {
3200 'stream_type': stream_type
,
3201 'duration': duration
,
3202 'timescale': stream_timescale
,
3203 'width': width
or 0,
3204 'height': height
or 0,
3206 'language': stream_language
,
3207 'codec_private_data': track
.get('CodecPrivateData'),
3208 'sampling_rate': sampling_rate
,
3209 'channels': int_or_none(track
.get('Channels', 2)),
3210 'bits_per_sample': int_or_none(track
.get('BitsPerSample', 16)),
3211 'nal_unit_length_field': int_or_none(track
.get('NALUnitLengthField', 4)),
3214 return formats
, subtitles
3216 def _parse_html5_media_entries(self
, base_url
, webpage
, video_id
, m3u8_id
=None, m3u8_entry_protocol
='m3u8_native', mpd_id
=None, preference
=None, quality
=None):
3217 def absolute_url(item_url
):
3218 return urljoin(base_url
, item_url
)
3220 def parse_content_type(content_type
):
3221 if not content_type
:
3223 ctr
= re
.search(r
'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type
)
3225 mimetype
, codecs
= ctr
.groups()
3226 f
= parse_codecs(codecs
)
3227 f
['ext'] = mimetype2ext(mimetype
)
3231 def _media_formats(src
, cur_media_type
, type_info
=None):
3232 type_info
= type_info
or {}
3233 full_url
= absolute_url(src
)
3234 ext
= type_info
.get('ext') or determine_ext(full_url
)
3236 is_plain_url
= False
3237 formats
= self
._extract
_m
3u8_formats
(
3238 full_url
, video_id
, ext
='mp4',
3239 entry_protocol
=m3u8_entry_protocol
, m3u8_id
=m3u8_id
,
3240 preference
=preference
, quality
=quality
, fatal
=False)
3242 is_plain_url
= False
3243 formats
= self
._extract
_mpd
_formats
(
3244 full_url
, video_id
, mpd_id
=mpd_id
, fatal
=False)
3249 'vcodec': 'none' if cur_media_type
== 'audio' else None,
3252 return is_plain_url
, formats
3255 # amp-video and amp-audio are very similar to their HTML5 counterparts
3256 # so we will include them right here (see
3257 # https://www.ampproject.org/docs/reference/components/amp-video)
3258 # For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
3259 _MEDIA_TAG_NAME_RE
= r
'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
3260 media_tags
= [(media_tag
, media_tag_name
, media_type
, '')
3261 for media_tag
, media_tag_name
, media_type
3262 in re
.findall(r
'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE
, webpage
)]
3263 media_tags
.extend(re
.findall(
3264 # We only allow video|audio followed by a whitespace or '>'.
3265 # Allowing more characters may end up in significant slow down (see
3266 # https://github.com/ytdl-org/youtube-dl/issues/11979,
3267 # e.g. http://www.porntrex.com/maps/videositemap.xml).
3268 r
'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE
, webpage
))
3269 for media_tag
, _
, media_type
, media_content
in media_tags
:
3274 media_attributes
= extract_attributes(media_tag
)
3275 src
= strip_or_none(dict_get(media_attributes
, ('src', 'data-video-src', 'data-src', 'data-source')))
3277 f
= parse_content_type(media_attributes
.get('type'))
3278 _
, formats
= _media_formats(src
, media_type
, f
)
3279 media_info
['formats'].extend(formats
)
3280 media_info
['thumbnail'] = absolute_url(media_attributes
.get('poster'))
3282 for source_tag
in re
.findall(r
'<source[^>]+>', media_content
):
3283 s_attr
= extract_attributes(source_tag
)
3284 # data-video-src and data-src are non standard but seen
3285 # several times in the wild
3286 src
= strip_or_none(dict_get(s_attr
, ('src', 'data-video-src', 'data-src', 'data-source')))
3289 f
= parse_content_type(s_attr
.get('type'))
3290 is_plain_url
, formats
= _media_formats(src
, media_type
, f
)
3292 # width, height, res, label and title attributes are
3293 # all not standard but seen several times in the wild
3296 for lbl
in ('label', 'title')
3297 if str_or_none(s_attr
.get(lbl
))
3299 width
= int_or_none(s_attr
.get('width'))
3300 height
= (int_or_none(s_attr
.get('height'))
3301 or int_or_none(s_attr
.get('res')))
3302 if not width
or not height
:
3304 resolution
= parse_resolution(lbl
)
3307 width
= width
or resolution
.get('width')
3308 height
= height
or resolution
.get('height')
3310 tbr
= parse_bitrate(lbl
)
3319 'format_id': s_attr
.get('label') or s_attr
.get('title'),
3321 f
.update(formats
[0])
3322 media_info
['formats'].append(f
)
3324 media_info
['formats'].extend(formats
)
3325 for track_tag
in re
.findall(r
'<track[^>]+>', media_content
):
3326 track_attributes
= extract_attributes(track_tag
)
3327 kind
= track_attributes
.get('kind')
3328 if not kind
or kind
in ('subtitles', 'captions'):
3329 src
= strip_or_none(track_attributes
.get('src'))
3332 lang
= track_attributes
.get('srclang') or track_attributes
.get('lang') or track_attributes
.get('label')
3333 media_info
['subtitles'].setdefault(lang
, []).append({
3334 'url': absolute_url(src
),
3336 for f
in media_info
['formats']:
3337 f
.setdefault('http_headers', {})['Referer'] = base_url
3338 if media_info
['formats'] or media_info
['subtitles']:
3339 entries
.append(media_info
)
3342 def _extract_akamai_formats(self
, *args
, **kwargs
):
3343 fmts
, subs
= self
._extract
_akamai
_formats
_and
_subtitles
(*args
, **kwargs
)
3345 self
._report
_ignoring
_subs
('akamai')
3348 def _extract_akamai_formats_and_subtitles(self
, manifest_url
, video_id
, hosts
={}):
3349 signed
= 'hdnea=' in manifest_url
3351 # https://learn.akamai.com/en-us/webhelp/media-services-on-demand/stream-packaging-user-guide/GUID-BE6C0F73-1E06-483B-B0EA-57984B91B7F9.html
3352 manifest_url
= re
.sub(
3353 r
'(?:b=[\d,-]+|(?:__a__|attributes)=off|__b__=\d+)&?',
3354 '', manifest_url
).strip('?')
3359 hdcore_sign
= 'hdcore=3.7.0'
3360 f4m_url
= re
.sub(r
'(https?://[^/]+)/i/', r
'\1/z/', manifest_url
).replace('/master.m3u8', '/manifest.f4m')
3361 hds_host
= hosts
.get('hds')
3363 f4m_url
= re
.sub(r
'(https?://)[^/]+', r
'\1' + hds_host
, f4m_url
)
3364 if 'hdcore=' not in f4m_url
:
3365 f4m_url
+= ('&' if '?' in f4m_url
else '?') + hdcore_sign
3366 f4m_formats
= self
._extract
_f
4m
_formats
(
3367 f4m_url
, video_id
, f4m_id
='hds', fatal
=False)
3368 for entry
in f4m_formats
:
3369 entry
.update({'extra_param_to_segment_url': hdcore_sign}
)
3370 formats
.extend(f4m_formats
)
3372 m3u8_url
= re
.sub(r
'(https?://[^/]+)/z/', r
'\1/i/', manifest_url
).replace('/manifest.f4m', '/master.m3u8')
3373 hls_host
= hosts
.get('hls')
3375 m3u8_url
= re
.sub(r
'(https?://)[^/]+', r
'\1' + hls_host
, m3u8_url
)
3376 m3u8_formats
, m3u8_subtitles
= self
._extract
_m
3u8_formats
_and
_subtitles
(
3377 m3u8_url
, video_id
, 'mp4', 'm3u8_native',
3378 m3u8_id
='hls', fatal
=False)
3379 formats
.extend(m3u8_formats
)
3380 subtitles
= self
._merge
_subtitles
(subtitles
, m3u8_subtitles
)
3382 http_host
= hosts
.get('http')
3383 if http_host
and m3u8_formats
and not signed
:
3384 REPL_REGEX
= r
'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
3385 qualities
= re
.match(REPL_REGEX
, m3u8_url
).group(2).split(',')
3386 qualities_length
= len(qualities
)
3387 if len(m3u8_formats
) in (qualities_length
, qualities_length
+ 1):
3389 for f
in m3u8_formats
:
3390 if f
['vcodec'] != 'none':
3391 for protocol
in ('http', 'https'):
3393 del http_f
['manifest_url']
3395 REPL_REGEX
, protocol
+ fr
'://{http_host}/\g<1>{qualities[i]}\3', f
['url'])
3397 'format_id': http_f
['format_id'].replace('hls-', protocol
+ '-'),
3399 'protocol': protocol
,
3401 formats
.append(http_f
)
3404 return formats
, subtitles
3406 def _extract_wowza_formats(self
, url
, video_id
, m3u8_entry_protocol
='m3u8_native', skip_protocols
=[]):
3407 query
= urllib
.parse
.urlparse(url
).query
3408 url
= re
.sub(r
'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url
)
3410 r
'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url
)
3411 url_base
= mobj
.group('url')
3412 http_base_url
= '%s%s:%s' % ('http', mobj
.group('s') or '', url_base
)
3415 def manifest_url(manifest
):
3416 m_url
= f
'{http_base_url}/{manifest}'
3418 m_url
+= '?%s' % query
3421 if 'm3u8' not in skip_protocols
:
3422 formats
.extend(self
._extract
_m
3u8_formats
(
3423 manifest_url('playlist.m3u8'), video_id
, 'mp4',
3424 m3u8_entry_protocol
, m3u8_id
='hls', fatal
=False))
3425 if 'f4m' not in skip_protocols
:
3426 formats
.extend(self
._extract
_f
4m
_formats
(
3427 manifest_url('manifest.f4m'),
3428 video_id
, f4m_id
='hds', fatal
=False))
3429 if 'dash' not in skip_protocols
:
3430 formats
.extend(self
._extract
_mpd
_formats
(
3431 manifest_url('manifest.mpd'),
3432 video_id
, mpd_id
='dash', fatal
=False))
3433 if re
.search(r
'(?:/smil:|\.smil)', url_base
):
3434 if 'smil' not in skip_protocols
:
3435 rtmp_formats
= self
._extract
_smil
_formats
(
3436 manifest_url('jwplayer.smil'),
3437 video_id
, fatal
=False)
3438 for rtmp_format
in rtmp_formats
:
3439 rtsp_format
= rtmp_format
.copy()
3440 rtsp_format
['url'] = '%s/%s' % (rtmp_format
['url'], rtmp_format
['play_path'])
3441 del rtsp_format
['play_path']
3442 del rtsp_format
['ext']
3443 rtsp_format
.update({
3444 'url': rtsp_format
['url'].replace('rtmp://', 'rtsp://'),
3445 'format_id': rtmp_format
['format_id'].replace('rtmp', 'rtsp'),
3448 formats
.extend([rtmp_format
, rtsp_format
])
3450 for protocol
in ('rtmp', 'rtsp'):
3451 if protocol
not in skip_protocols
:
3453 'url': f
'{protocol}:{url_base}',
3454 'format_id': protocol
,
3455 'protocol': protocol
,
3459 def _find_jwplayer_data(self
, webpage
, video_id
=None, transform_source
=js_to_json
):
3461 r
'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P
=quote
)\
)(?
!</script
>).*?\
.setup\s
*\
((?P
<options
>[^
)]+)\
)',
3465 jwplayer_data = self._parse_json(mobj.group('options
'),
3467 transform_source=transform_source)
3468 except ExtractorError:
3471 if isinstance(jwplayer_data, dict):
3472 return jwplayer_data
3474 def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
3475 jwplayer_data = self._find_jwplayer_data(
3476 webpage, video_id, transform_source=js_to_json)
3477 return self._parse_jwplayer_data(
3478 jwplayer_data, video_id, *args, **kwargs)
3480 def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
3481 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
3482 # JWPlayer backward compatibility: flattened playlists
3483 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
3484 if 'playlist
' not in jwplayer_data:
3485 jwplayer_data = {'playlist': [jwplayer_data]}
3489 # JWPlayer backward compatibility: single playlist item
3490 # https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
3491 if not isinstance(jwplayer_data['playlist
'], list):
3492 jwplayer_data['playlist
'] = [jwplayer_data['playlist
']]
3494 for video_data in jwplayer_data['playlist
']:
3495 # JWPlayer backward compatibility: flattened sources
3496 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
3497 if 'sources
' not in video_data:
3498 video_data['sources
'] = [video_data]
3500 this_video_id = video_id or video_data['mediaid
']
3502 formats = self._parse_jwplayer_formats(
3503 video_data['sources
'], video_id=this_video_id, m3u8_id=m3u8_id,
3504 mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
3507 tracks = video_data.get('tracks
')
3508 if tracks and isinstance(tracks, list):
3509 for track in tracks:
3510 if not isinstance(track, dict):
3512 track_kind = track.get('kind
')
3513 if not track_kind or not isinstance(track_kind, str):
3515 if track_kind.lower() not in ('captions
', 'subtitles
'):
3517 track_url = urljoin(base_url, track.get('file'))
3520 subtitles.setdefault(track.get('label
') or 'en
', []).append({
3521 'url
': self._proto_relative_url(track_url)
3525 'id': this_video_id,
3526 'title
': unescapeHTML(video_data['title
'] if require_title else video_data.get('title
')),
3527 'description
': clean_html(video_data.get('description
')),
3528 'thumbnail
': urljoin(base_url, self._proto_relative_url(video_data.get('image
'))),
3529 'timestamp
': int_or_none(video_data.get('pubdate
')),
3530 'duration
': float_or_none(jwplayer_data.get('duration
') or video_data.get('duration
')),
3531 'subtitles
': subtitles,
3533 # https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
3534 if len(formats) == 1 and re.search(r'^
(?
:http|
//).*(?
:youtube\
.com|youtu\
.be
)/.+', formats[0]['url
']):
3536 '_type
': 'url_transparent
',
3537 'url
': formats[0]['url
'],
3540 self._sort_formats(formats)
3541 entry['formats
'] = formats
3542 entries.append(entry)
3543 if len(entries) == 1:
3546 return self.playlist_result(entries)
3548 def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
3549 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
3552 for source in jwplayer_sources_data:
3553 if not isinstance(source, dict):
3555 source_url = urljoin(
3556 base_url, self._proto_relative_url(source.get('file')))
3557 if not source_url or source_url in urls:
3559 urls.append(source_url)
3560 source_type = source.get('type') or ''
3561 ext = mimetype2ext(source_type) or determine_ext(source_url)
3562 if source_type == 'hls
' or ext == 'm3u8
':
3563 formats.extend(self._extract_m3u8_formats(
3564 source_url, video_id, 'mp4
', entry_protocol='m3u8_native
',
3565 m3u8_id=m3u8_id, fatal=False))
3566 elif source_type == 'dash
' or ext == 'mpd
':
3567 formats.extend(self._extract_mpd_formats(
3568 source_url, video_id, mpd_id=mpd_id, fatal=False))
3570 formats.extend(self._extract_smil_formats(
3571 source_url, video_id, fatal=False))
3572 # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
3573 elif source_type.startswith('audio
') or ext in (
3574 'oga
', 'aac
', 'mp3
', 'mpeg
', 'vorbis
'):
3581 height = int_or_none(source.get('height
'))
3583 # Often no height is provided but there is a label in
3584 # format like "1080p", "720p SD", or 1080.
3585 height = int_or_none(self._search_regex(
3586 r'^
(\d{3,4}
)[pP
]?
(?
:\b|$
)', str(source.get('label
') or ''),
3587 'height
', default=None))
3590 'width
': int_or_none(source.get('width
')),
3592 'tbr
': int_or_none(source.get('bitrate
'), scale=1000),
3593 'filesize
': int_or_none(source.get('filesize
')),
3596 if source_url.startswith('rtmp
'):
3597 a_format['ext
'] = 'flv
'
3598 # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
3599 # of jwplayer.flash.swf
3600 rtmp_url_parts = re.split(
3601 r'((?
:mp4|mp3|flv
):)', source_url, 1)
3602 if len(rtmp_url_parts) == 3:
3603 rtmp_url, prefix, play_path = rtmp_url_parts
3606 'play_path
': prefix + play_path,
3609 a_format.update(rtmp_params)
3610 formats.append(a_format)
3613 def _live_title(self, name):
3614 self._downloader.deprecation_warning('yt_dlp
.InfoExtractor
._live
_title
is deprecated
and does
not work
as expected
')
3617 def _int(self, v, name, fatal=False, **kwargs):
3618 res = int_or_none(v, **kwargs)
3620 msg = f'Failed to extract {name}
: Could
not parse value {v!r}
'
3622 raise ExtractorError(msg)
3624 self.report_warning(msg)
3627 def _float(self, v, name, fatal=False, **kwargs):
3628 res = float_or_none(v, **kwargs)
3630 msg = f'Failed to extract {name}
: Could
not parse value {v!r}
'
3632 raise ExtractorError(msg)
3634 self.report_warning(msg)
3637 def _set_cookie(self, domain, name, value, expire_time=None, port=None,
3638 path='/', secure=False, discard=False, rest={}, **kwargs):
3639 cookie = http.cookiejar.Cookie(
3640 0, name, value, port, port is not None, domain, True,
3641 domain.startswith('.'), path, True, secure, expire_time,
3642 discard, None, None, rest)
3643 self.cookiejar.set_cookie(cookie)
3645 def _get_cookies(self, url):
3646 """ Return a http.cookies.SimpleCookie with the cookies for the url """
3647 return LenientSimpleCookie(self._downloader._calc_cookies(url))
3649 def _apply_first_set_cookie_header(self, url_handle, cookie):
3651 Apply first Set-Cookie header instead of the last. Experimental.
3653 Some sites (e.g. [1-3]) may serve two cookies under the same name
3654 in Set-Cookie header and expect the first (old) one to be set rather
3655 than second (new). However, as of RFC6265 the newer one cookie
3656 should be set into cookie store what actually happens.
3657 We will workaround this issue by resetting the cookie to
3658 the first one manually.
3659 1. https://new.vk.com/
3660 2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3661 3. https://learning.oreilly.com/
3663 for header, cookies in url_handle.headers.items():
3664 if header.lower() != 'set-cookie
':
3666 cookies = cookies.encode('iso
-8859-1').decode('utf
-8')
3667 cookie_value = re.search(
3668 r'%s=(.+?
);.*?
\b[Dd
]omain
=(.+?
)(?
:[,;]|$
)' % cookie, cookies)
3670 value, domain = cookie_value.groups()
3671 self._set_cookie(domain, cookie, value)
3675 def get_testcases(cls, include_onlymatching=False):
3676 t = getattr(cls, '_TEST
', None)
3678 assert not hasattr(cls, '_TESTS
'), f'{cls.ie_key()}IE has _TEST
and _TESTS
'
3681 tests = getattr(cls, '_TESTS
', [])
3683 if not include_onlymatching and t.get('only_matching
', False):
3685 t['name
'] = cls.ie_key()
3689 def get_webpage_testcases(cls):
3690 tests = getattr(cls, '_WEBPAGE_TESTS
', [])
3692 t['name
'] = cls.ie_key()
3697 """Get age limit from the testcases"""
3698 return max(traverse_obj(
3699 (*cls.get_testcases(include_onlymatching=False), *cls.get_webpage_testcases()),
3700 (..., (('playlist
', 0), None), 'info_dict
', 'age_limit
')) or [0])
3703 def is_suitable(cls, age_limit):
3704 """Test whether the extractor is generally suitable for the given age limit"""
3705 return not age_restricted(cls.age_limit, age_limit)
3708 def description(cls, *, markdown=True, search_examples=None):
3709 """Description of the extractor"""
3711 if cls._NETRC_MACHINE:
3713 desc += f' [<abbr title
="netrc machine"><em
>{cls._NETRC_MACHINE}
</em
></abbr
>]'
3715 desc += f' [{cls._NETRC_MACHINE}
]'
3716 if cls.IE_DESC is False:
3719 desc += f' {cls.IE_DESC}
'
3721 desc += f'; "{cls.SEARCH_KEY}:" prefix
'
3723 _COUNTS = ('', '5', '10', 'all
')
3724 desc += f' (e
.g
. "{cls.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(search_examples)}")'
3725 if not cls.working():
3726 desc += ' (**Currently broken
**)' if markdown else ' (Currently broken
)'
3728 name = f' - **{cls.IE_NAME}
**' if markdown else cls.IE_NAME
3729 return f'{name}
:{desc}
' if desc else name
3731 def extract_subtitles(self, *args, **kwargs):
3732 if (self.get_param('writesubtitles
', False)
3733 or self.get_param('listsubtitles
')):
3734 return self._get_subtitles(*args, **kwargs)
3737 def _get_subtitles(self, *args, **kwargs):
3738 raise NotImplementedError('This method must be implemented by subclasses
')
3740 def extract_comments(self, *args, **kwargs):
3741 if not self.get_param('getcomments
'):
3743 generator = self._get_comments(*args, **kwargs)
3750 comments.append(next(generator))
3751 except StopIteration:
3753 except KeyboardInterrupt:
3754 self.to_screen('Interrupted by user
')
3755 except Exception as e:
3756 if self.get_param('ignoreerrors
') is not True:
3758 self._downloader.report_error(e)
3759 comment_count = len(comments)
3760 self.to_screen(f'Extracted {comment_count} comments
')
3762 'comments
': comments,
3763 'comment_count
': None if interrupted else comment_count
3767 def _get_comments(self, *args, **kwargs):
3768 raise NotImplementedError('This method must be implemented by subclasses
')
3771 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
3772 """ Merge subtitle items for one language. Items with duplicated URLs/data
3773 will be dropped. """
3774 list1_data = {(item.get('url'), item.get('data')) for item in subtitle_list1}
3775 ret = list(subtitle_list1)
3776 ret.extend(item for item in subtitle_list2 if (item.get('url
'), item.get('data
')) not in list1_data)
3780 def _merge_subtitles(cls, *dicts, target=None):
3781 """ Merge subtitle dictionaries, language by language. """
3785 for lang, subs in d.items():
3786 target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs)
3789 def extract_automatic_captions(self, *args, **kwargs):
3790 if (self.get_param('writeautomaticsub
', False)
3791 or self.get_param('listsubtitles
')):
3792 return self._get_automatic_captions(*args, **kwargs)
3795 def _get_automatic_captions(self, *args, **kwargs):
3796 raise NotImplementedError('This method must be implemented by subclasses
')
3798 @functools.cached_property
3799 def _cookies_passed(self):
3800 """Whether cookies have been passed to YoutubeDL"""
3801 return self.get_param('cookiefile
') is not None or self.get_param('cookiesfrombrowser
') is not None
3803 def mark_watched(self, *args, **kwargs):
3804 if not self.get_param('mark_watched
', False):
3806 if self.supports_login() and self._get_login_info()[0] is not None or self._cookies_passed:
3807 self._mark_watched(*args, **kwargs)
3809 def _mark_watched(self, *args, **kwargs):
3810 raise NotImplementedError('This method must be implemented by subclasses
')
3812 def geo_verification_headers(self):
3814 geo_verification_proxy = self.get_param('geo_verification_proxy
')
3815 if geo_verification_proxy:
3816 headers['Ytdl
-request
-proxy
'] = geo_verification_proxy
3820 def _generic_id(url):
3821 return urllib.parse.unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
3823 def _generic_title(self, url='', webpage='', *, default=None):
3824 return (self._og_search_title(webpage, default=None)
3825 or self._html_extract_title(webpage, default=None)
3826 or urllib.parse.unquote(os.path.splitext(url_basename(url))[0])
3830 def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None):
3831 all_known = all(map(
3832 lambda x: x is not None,
3833 (is_private, needs_premium, needs_subscription, needs_auth, is_unlisted)))
3835 'private
' if is_private
3836 else 'premium_only
' if needs_premium
3837 else 'subscriber_only
' if needs_subscription
3838 else 'needs_auth
' if needs_auth
3839 else 'unlisted
' if is_unlisted
3840 else 'public
' if all_known
3843 def _configuration_arg(self, key, default=NO_DEFAULT, *, ie_key=None, casesense=False):
3845 @returns A list of values for the extractor argument given by "key"
3846 or "default" if no such key is present
3847 @param default The default value to return when the key is not present (default: [])
3848 @param casesense When false, the values are converted to lower case
3850 ie_key = ie_key if isinstance(ie_key, str) else (ie_key or self).ie_key()
3851 val = traverse_obj(self._downloader.params, ('extractor_args
', ie_key.lower(), key))
3853 return [] if default is NO_DEFAULT else default
3854 return list(val) if casesense else [x.lower() for x in val]
3856 def _yes_playlist(self, playlist_id, video_id, smuggled_data=None, *, playlist_label='playlist
', video_label='video
'):
3857 if not playlist_id or not video_id:
3860 no_playlist = (smuggled_data or {}).get('force_noplaylist
')
3861 if no_playlist is not None:
3862 return not no_playlist
3864 video_id = '' if video_id is True else f' {video_id}
'
3865 playlist_id = '' if playlist_id is True else f' {playlist_id}
'
3866 if self.get_param('noplaylist
'):
3867 self.to_screen(f'Downloading just the {video_label}{video_id} because of
--no
-playlist
')
3869 self.to_screen(f'Downloading {playlist_label}{playlist_id}
- add
--no
-playlist to download just the {video_label}{video_id}
')
3872 def _error_or_warning(self, err, _count=None, _retries=0, *, fatal=True):
3873 RetryManager.report_retry(
3874 err, _count or int(fatal), _retries,
3875 info=self.to_screen, warn=self.report_warning, error=None if fatal else self.report_warning,
3876 sleep_func=self.get_param('retry_sleep_functions
', {}).get('extractor
'))
3878 def RetryManager(self, **kwargs):
3879 return RetryManager(self.get_param('extractor_retries
', 3), self._error_or_warning, **kwargs)
3881 def _extract_generic_embeds(self, url, *args, info_dict={}, note='Extracting generic embeds
', **kwargs):
3882 display_id = traverse_obj(info_dict, 'display_id
', 'id')
3883 self.to_screen(f'{format_field(display_id, None, "%s: ")}{note}
')
3884 return self._downloader.get_info_extractor('Generic
')._extract_embeds(
3885 smuggle_url(url, {'block_ies': [self.ie_key()]}), *args, **kwargs)
3888 def extract_from_webpage(cls, ydl, url, webpage):
3889 ie = (cls if isinstance(cls._extract_from_webpage, types.MethodType)
3890 else ydl.get_info_extractor(cls.ie_key()))
3891 for info in ie._extract_from_webpage(url, webpage) or []:
3892 # url = None since we do not want to set (webpage/original)_url
3893 ydl.add_default_extra_info(info, ie, None)
3897 def _extract_from_webpage(cls, url, webpage):
3898 for embed_url in orderedSet(
3899 cls._extract_embed_urls(url, webpage) or [], lazy=True):
3900 yield cls.url_result(embed_url, None if cls._VALID_URL is False else cls)
3903 def _extract_embed_urls(cls, url, webpage):
3904 """@returns all the embed urls on the webpage"""
3905 if '_EMBED_URL_RE
' not in cls.__dict__:
3906 assert isinstance(cls._EMBED_REGEX, (list, tuple))
3907 for idx, regex in enumerate(cls._EMBED_REGEX):
3908 assert regex.count('(?P
<url
>') == 1, \
3909 f'{cls.__name__}
._EMBED
_REGEX
[{idx}
] must have exactly
1 url group
\n\t{regex}
'
3910 cls._EMBED_URL_RE = tuple(map(re.compile, cls._EMBED_REGEX))
3912 for regex in cls._EMBED_URL_RE:
3913 for mobj in regex.finditer(webpage):
3914 embed_url = urllib.parse.urljoin(url, unescapeHTML(mobj.group('url
')))
3915 if cls._VALID_URL is False or cls.suitable(embed_url):
3918 class StopExtraction(Exception):
3922 def _extract_url(cls, webpage): # TODO: Remove
3923 """Only for compatibility with some older extractors"""
3924 return next(iter(cls._extract_embed_urls(None, webpage) or []), None)
3927 def __init_subclass__(cls, *, plugin_name=None, **kwargs):
3929 mro = inspect.getmro(cls)
3930 super_class = cls.__wrapped__ = mro[mro.index(cls) + 1]
3931 cls.IE_NAME, cls.ie_key = f'{super_class.IE_NAME}
+{plugin_name}
', super_class.ie_key
3932 while getattr(super_class, '__wrapped__
', None):
3933 super_class = super_class.__wrapped__
3934 setattr(sys.modules[super_class.__module__], super_class.__name__, cls)
3936 return super().__init_subclass__(**kwargs)
3939 class SearchInfoExtractor(InfoExtractor):
3941 Base class for paged search queries extractors.
3942 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
3943 Instances should define _SEARCH_KEY and optionally _MAX_RESULTS
3946 _MAX_RESULTS = float('inf
')
3949 def _VALID_URL(cls):
3950 return r'%s(?P
<prefix
>|
[1-9][0-9]*|all
):(?P
<query
>[\s\S
]+)' % cls._SEARCH_KEY
3952 def _real_extract(self, query):
3953 prefix, query = self._match_valid_url(query).group('prefix
', 'query
')
3955 return self._get_n_results(query, 1)
3956 elif prefix == 'all
':
3957 return self._get_n_results(query, self._MAX_RESULTS)
3961 raise ExtractorError(f'invalid download number {n}
for query
"{query}"')
3962 elif n > self._MAX_RESULTS:
3963 self.report_warning('%s returns
max %i results (you requested
%i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
3964 n = self._MAX_RESULTS
3965 return self._get_n_results(query, n)
3967 def _get_n_results(self, query, n):
3968 """Get a specified number of results for a query.
3969 Either this function or _search_results must be overridden by subclasses """
3970 return self.playlist_result(
3971 itertools.islice(self._search_results(query), 0, None if n == float('inf
') else n),
3974 def _search_results(self, query):
3975 """Returns an iterator of search results"""
3976 raise NotImplementedError('This method must be implemented by subclasses
')
3979 def SEARCH_KEY(cls):
3980 return cls._SEARCH_KEY
3983 class UnsupportedURLIE(InfoExtractor):
3988 def _real_extract(self, url):
3989 raise UnsupportedError(url)