21 import xml
.etree
.ElementTree
23 from ..compat
import functools
# isort: split
24 from ..compat
import compat_etree_fromstring
, compat_expanduser
, compat_os_name
25 from ..cookies
import LenientSimpleCookie
26 from ..downloader
.f4m
import get_base_url
, remove_encrypted_media
64 parse_m3u8_attributes
,
92 """Information Extractor class.
94 Information extractors are the classes that, given a URL, extract
95 information about the video (or videos) the URL refers to. This
96 information includes the real video URL, the video title, author and
97 others. The information is stored in a dictionary which is then
98 passed to the YoutubeDL. The YoutubeDL processes this
99 information possibly downloading the video to the file system, among
100 other possible outcomes.
102 The type field determines the type of the result.
103 By far the most common value (and the default if _type is missing) is
104 "video", which indicates a single video.
106 For a video, the dictionaries must include the following fields:
108 id: Video identifier.
109 title: Video title, unescaped. Set to an empty string if video has
110 no title as opposed to "None" which signifies that the
111 extractor failed to obtain a title
113 Additionally, it must contain either a formats entry or a url one:
115 formats: A list of dictionaries for each format available, ordered
116 from worst to best quality.
119 * url The mandatory URL representing the media:
120 for plain file media - HTTP URL of this file,
122 for HLS - URL of the M3U8 media playlist,
123 for HDS - URL of the F4M manifest,
125 - HTTP URL to plain file media (in case of
127 - URL of the MPD manifest or base URL
128 representing the media if MPD manifest
129 is parsed from a string (in case of
131 for MSS - URL of the ISM manifest.
133 The URL of the manifest file in case of
135 for HLS - URL of the M3U8 master playlist,
136 for HDS - URL of the F4M manifest,
137 for DASH - URL of the MPD manifest,
138 for MSS - URL of the ISM manifest.
139 * manifest_stream_number (For internal use only)
140 The index of the stream in the manifest file
141 * ext Will be calculated from URL if missing
142 * format A human-readable description of the format
143 ("mp4 container with h264/opus").
144 Calculated from the format_id, width, height.
145 and format_note fields if missing.
146 * format_id A short description of the format
147 ("mp4_h264_opus" or "19").
148 Technically optional, but strongly recommended.
149 * format_note Additional info about the format
150 ("3D" or "DASH video")
151 * width Width of the video, if known
152 * height Height of the video, if known
153 * aspect_ratio Aspect ratio of the video, if known
154 Automatically calculated from width and height
155 * resolution Textual description of width and height
156 Automatically calculated from width and height
157 * dynamic_range The dynamic range of the video. One of:
158 "SDR" (None), "HDR10", "HDR10+, "HDR12", "HLG, "DV"
159 * tbr Average bitrate of audio and video in KBit/s
160 * abr Average audio bitrate in KBit/s
161 * acodec Name of the audio codec in use
162 * asr Audio sampling rate in Hertz
163 * audio_channels Number of audio channels
164 * vbr Average video bitrate in KBit/s
166 * vcodec Name of the video codec in use
167 * container Name of the container format
168 * filesize The number of bytes, if known in advance
169 * filesize_approx An estimate for the number of bytes
170 * player_url SWF Player URL (used for rtmpdump).
171 * protocol The protocol that will be used for the actual
172 download, lower-case. One of "http", "https" or
173 one of the protocols defined in downloader.PROTOCOL_MAP
175 Base URL for fragments. Each fragment's path
176 value (if present) will be relative to
178 * fragments A list of fragments of a fragmented media.
179 Each fragment entry must contain either an url
180 or a path. If an url is present it should be
181 considered by a client. Otherwise both path and
182 fragment_base_url must be present. Here is
183 the list of all potential fields:
184 * "url" - fragment's URL
185 * "path" - fragment's path relative to
187 * "duration" (optional, int or float)
188 * "filesize" (optional, int)
189 * is_from_start Is a live format that can be downloaded
190 from the start. Boolean
191 * preference Order number of this format. If this field is
192 present and not None, the formats get sorted
193 by this field, regardless of all other values.
194 -1 for default (order by other properties),
195 -2 or smaller for less than default.
196 < -1000 to hide the format (if there is
197 another one which is strictly better)
198 * language Language code, e.g. "de" or "en-US".
199 * language_preference Is this in the language mentioned in
201 10 if it's what the URL is about,
202 -1 for default (don't know),
203 -10 otherwise, other values reserved for now.
204 * quality Order number of the video quality of this
205 format, irrespective of the file format.
206 -1 for default (order by other properties),
207 -2 or smaller for less than default.
208 * source_preference Order number for this video source
209 (quality takes higher priority)
210 -1 for default (order by other properties),
211 -2 or smaller for less than default.
212 * http_headers A dictionary of additional HTTP headers
213 to add to the request.
214 * stretched_ratio If given and not 1, indicates that the
215 video's pixels are not square.
216 width : height ratio as float.
217 * no_resume The server does not support resuming the
218 (HTTP or RTMP) download. Boolean.
219 * has_drm The format has DRM and cannot be downloaded. Boolean
220 * downloader_options A dictionary of downloader options
221 (For internal use only)
222 * http_chunk_size Chunk size for HTTP downloads
223 * ffmpeg_args Extra arguments for ffmpeg downloader
224 RTMP formats can also have the additional fields: page_url,
225 app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn,
226 rtmp_protocol, rtmp_real_time
228 url: Final video URL.
229 ext: Video filename extension.
230 format: The video format, defaults to ext (used for --get-format)
231 player_url: SWF Player URL (used for rtmpdump).
233 The following fields are optional:
235 direct: True if a direct video file was given (must only be set by GenericIE)
236 alt_title: A secondary title of the video.
237 display_id An alternative identifier for the video, not necessarily
238 unique, but available before title. Typically, id is
239 something like "4234987", title "Dancing naked mole rats",
240 and display_id "dancing-naked-mole-rats"
241 thumbnails: A list of dictionaries, with the following entries:
242 * "id" (optional, string) - Thumbnail format ID
244 * "preference" (optional, int) - quality of the image
245 * "width" (optional, int)
246 * "height" (optional, int)
247 * "resolution" (optional, string "{width}x{height}",
249 * "filesize" (optional, int)
250 * "http_headers" (dict) - HTTP headers for the request
251 thumbnail: Full URL to a video thumbnail image.
252 description: Full video description.
253 uploader: Full name of the video uploader.
254 license: License name the video is licensed under.
255 creator: The creator of the video.
256 timestamp: UNIX timestamp of the moment the video was uploaded
257 upload_date: Video upload date in UTC (YYYYMMDD).
258 If not explicitly set, calculated from timestamp
259 release_timestamp: UNIX timestamp of the moment the video was released.
260 If it is not clear whether to use timestamp or this, use the former
261 release_date: The date (YYYYMMDD) when the video was released in UTC.
262 If not explicitly set, calculated from release_timestamp
263 modified_timestamp: UNIX timestamp of the moment the video was last modified.
264 modified_date: The date (YYYYMMDD) when the video was last modified in UTC.
265 If not explicitly set, calculated from modified_timestamp
266 uploader_id: Nickname or id of the video uploader.
267 uploader_url: Full URL to a personal webpage of the video uploader.
268 channel: Full name of the channel the video is uploaded on.
269 Note that channel fields may or may not repeat uploader
270 fields. This depends on a particular extractor.
271 channel_id: Id of the channel.
272 channel_url: Full URL to a channel webpage.
273 channel_follower_count: Number of followers of the channel.
274 location: Physical location where the video was filmed.
275 subtitles: The available subtitles as a dictionary in the format
276 {tag: subformats}. "tag" is usually a language code, and
277 "subformats" is a list sorted from lower to higher
278 preference, each element is a dictionary with the "ext"
280 * "data": The subtitles file contents
281 * "url": A URL pointing to the subtitles file
282 It can optionally also have:
283 * "name": Name or description of the subtitles
284 * "http_headers": A dictionary of additional HTTP headers
285 to add to the request.
286 "ext" will be calculated from URL if missing
287 automatic_captions: Like 'subtitles'; contains automatically generated
288 captions instead of normal subtitles
289 duration: Length of the video in seconds, as an integer or float.
290 view_count: How many users have watched the video on the platform.
291 concurrent_view_count: How many users are currently watching the video on the platform.
292 like_count: Number of positive ratings of the video
293 dislike_count: Number of negative ratings of the video
294 repost_count: Number of reposts of the video
295 average_rating: Average rating give by users, the scale used depends on the webpage
296 comment_count: Number of comments on the video
297 comments: A list of comments, each with one or more of the following
298 properties (all but one of text or html optional):
299 * "author" - human-readable name of the comment author
300 * "author_id" - user ID of the comment author
301 * "author_thumbnail" - The thumbnail of the comment author
303 * "html" - Comment as HTML
304 * "text" - Plain text of the comment
305 * "timestamp" - UNIX timestamp of comment
306 * "parent" - ID of the comment this one is replying to.
307 Set to "root" to indicate that this is a
308 comment to the original video.
309 * "like_count" - Number of positive ratings of the comment
310 * "dislike_count" - Number of negative ratings of the comment
311 * "is_favorited" - Whether the comment is marked as
312 favorite by the video uploader
313 * "author_is_uploader" - Whether the comment is made by
315 age_limit: Age restriction for the video, as an integer (years)
316 webpage_url: The URL to the video webpage, if given to yt-dlp it
317 should allow to get the same result again. (It will be set
318 by YoutubeDL if it's missing)
319 categories: A list of categories that the video falls in, for example
321 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
322 cast: A list of the video cast
323 is_live: True, False, or None (=unknown). Whether this video is a
324 live stream that goes on instead of a fixed-length video.
325 was_live: True, False, or None (=unknown). Whether this video was
326 originally a live stream.
327 live_status: None (=unknown), 'is_live', 'is_upcoming', 'was_live', 'not_live',
328 or 'post_live' (was live, but VOD is not yet processed)
329 If absent, automatically set from is_live, was_live
330 start_time: Time in seconds where the reproduction should start, as
331 specified in the URL.
332 end_time: Time in seconds where the reproduction should end, as
333 specified in the URL.
334 chapters: A list of dictionaries, with the following entries:
335 * "start_time" - The start time of the chapter in seconds
336 * "end_time" - The end time of the chapter in seconds
337 * "title" (optional, string)
338 playable_in_embed: Whether this video is allowed to play in embedded
339 players on other sites. Can be True (=always allowed),
340 False (=never allowed), None (=unknown), or a string
341 specifying the criteria for embedability; e.g. 'whitelist'
342 availability: Under what condition the video is available. One of
343 'private', 'premium_only', 'subscriber_only', 'needs_auth',
344 'unlisted' or 'public'. Use 'InfoExtractor._availability'
346 _old_archive_ids: A list of old archive ids needed for backward compatibility
347 _format_sort_fields: A list of fields to use for sorting formats
348 __post_extractor: A function to be called just before the metadata is
349 written to either disk, logger or console. The function
350 must return a dict which will be added to the info_dict.
351 This is usefull for additional information that is
352 time-consuming to extract. Note that the fields thus
353 extracted will not be available to output template and
354 match_filter. So, only "comments" and "comment_count" are
355 currently allowed to be extracted via this method.
357 The following fields should only be used when the video belongs to some logical
360 chapter: Name or title of the chapter the video belongs to.
361 chapter_number: Number of the chapter the video belongs to, as an integer.
362 chapter_id: Id of the chapter the video belongs to, as a unicode string.
364 The following fields should only be used when the video is an episode of some
365 series, programme or podcast:
367 series: Title of the series or programme the video episode belongs to.
368 series_id: Id of the series or programme the video episode belongs to, as a unicode string.
369 season: Title of the season the video episode belongs to.
370 season_number: Number of the season the video episode belongs to, as an integer.
371 season_id: Id of the season the video episode belongs to, as a unicode string.
372 episode: Title of the video episode. Unlike mandatory video title field,
373 this field should denote the exact title of the video episode
374 without any kind of decoration.
375 episode_number: Number of the video episode within a season, as an integer.
376 episode_id: Id of the video episode, as a unicode string.
378 The following fields should only be used when the media is a track or a part of
381 track: Title of the track.
382 track_number: Number of the track within an album or a disc, as an integer.
383 track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
385 artist: Artist(s) of the track.
386 genre: Genre(s) of the track.
387 album: Title of the album the track belongs to.
388 album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
389 album_artist: List of all artists appeared on the album (e.g.
390 "Ash Borer / Fell Voices" or "Various Artists", useful for splits
392 disc_number: Number of the disc or other physical medium the track belongs to,
394 release_year: Year (YYYY) when the album was released.
395 composer: Composer of the piece
397 The following fields should only be set for clips that should be cut from the original video:
399 section_start: Start time of the section in seconds
400 section_end: End time of the section in seconds
402 The following fields should only be set for storyboards:
403 rows: Number of rows in each storyboard fragment, as an integer
404 columns: Number of columns in each storyboard fragment, as an integer
406 Unless mentioned otherwise, the fields should be Unicode strings.
408 Unless mentioned otherwise, None is equivalent to absence of information.
411 _type "playlist" indicates multiple videos.
412 There must be a key "entries", which is a list, an iterable, or a PagedList
413 object, each element of which is a valid dictionary by this specification.
415 Additionally, playlists can have "id", "title", and any other relevant
416 attributes with the same semantics as videos (see above).
418 It can also have the following optional fields:
420 playlist_count: The total number of videos in a playlist. If not given,
421 YoutubeDL tries to calculate it from "entries"
424 _type "multi_video" indicates that there are multiple videos that
425 form a single show, for examples multiple acts of an opera or TV episode.
426 It must have an entries key like a playlist and contain all the keys
427 required for a video at the same time.
430 _type "url" indicates that the video must be extracted from another
431 location, possibly by a different extractor. Its only required key is:
432 "url" - the next URL to extract.
433 The key "ie_key" can be set to the class name (minus the trailing "IE",
434 e.g. "Youtube") if the extractor class is known in advance.
435 Additionally, the dictionary may have any properties of the resolved entity
436 known in advance, for example "title" if the title of the referred video is
440 _type "url_transparent" entities have the same specification as "url", but
441 indicate that the given additional information is more precise than the one
442 associated with the resolved URL.
443 This is useful when a site employs a video service that hosts the video and
444 its technical metadata, but that video service does not embed a useful
445 title, description etc.
448 Subclasses of this should also be added to the list of extractors and
449 should define a _VALID_URL regexp and, re-define the _real_extract() and
450 (optionally) _real_initialize() methods.
452 Subclasses may also override suitable() if necessary, but ensure the function
453 signature is preserved and that this function imports everything it needs
454 (except other extractors), so that lazy_extractors works correctly.
456 Subclasses can define a list of _EMBED_REGEX, which will be searched for in
457 the HTML of Generic webpages. It may also override _extract_embed_urls
458 or _extract_from_webpage as necessary. While these are normally classmethods,
459 _extract_from_webpage is allowed to be an instance method.
461 _extract_from_webpage may raise self.StopExtraction() to stop further
462 processing of the webpage and obtain exclusive rights to it. This is useful
463 when the extractor cannot reliably be matched using just the URL,
464 e.g. invidious/peertube instances
466 Embed-only extractors can be defined by setting _VALID_URL = False.
468 To support username + password (or netrc) login, the extractor must define a
469 _NETRC_MACHINE and re-define _perform_login(username, password) and
470 (optionally) _initialize_pre_login() methods. The _perform_login method will
471 be called between _initialize_pre_login and _real_initialize if credentials
472 are passed by the user. In cases where it is necessary to have the login
473 process as part of the extraction rather than initialization, _perform_login
474 can be left undefined.
476 _GEO_BYPASS attribute may be set to False in order to disable
477 geo restriction bypass mechanisms for a particular extractor.
478 Though it won't disable explicit geo restriction bypass based on
479 country code provided with geo_bypass_country.
481 _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
482 countries for this extractor. One of these countries will be used by
483 geo restriction bypass mechanism right away in order to bypass
484 geo restriction, of course, if the mechanism is not disabled.
486 _GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
487 IP blocks in CIDR notation for this extractor. One of these IP blocks
488 will be used by geo restriction bypass mechanism similarly
491 The _ENABLED attribute should be set to False for IEs that
492 are disabled by default and must be explicitly enabled.
494 The _WORKING attribute should be set to False for broken IEs
495 in order to warn the users and skip the tests.
500 _x_forwarded_for_ip
= None
502 _GEO_COUNTRIES
= None
503 _GEO_IP_BLOCKS
= None
506 _NETRC_MACHINE
= None
512 def _login_hint(self
, method
=NO_DEFAULT
, netrc
=None):
513 password_hint
= f
'--username and --password, or --netrc ({netrc or self._NETRC_MACHINE}) to provide account credentials'
516 'any': f
'Use --cookies, --cookies-from-browser, {password_hint}',
517 'password': f
'Use {password_hint}',
519 'Use --cookies-from-browser or --cookies for the authentication. '
520 'See https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp for how to manually pass cookies'),
521 }[method
if method
is not NO_DEFAULT
else 'any' if self
.supports_login() else 'cookies']
523 def __init__(self
, downloader
=None):
524 """Constructor. Receives an optional downloader (a YoutubeDL instance).
525 If a downloader is not passed during initialization,
526 it must be set using "set_downloader()" before "extract()" is called"""
528 self
._x
_forwarded
_for
_ip
= None
529 self
._printed
_messages
= set()
530 self
.set_downloader(downloader
)
533 def _match_valid_url(cls
, url
):
534 if cls
._VALID
_URL
is False:
536 # This does not use has/getattr intentionally - we want to know whether
537 # we have cached the regexp for *this* class, whereas getattr would also
538 # match the superclass
539 if '_VALID_URL_RE' not in cls
.__dict
__:
540 cls
._VALID
_URL
_RE
= re
.compile(cls
._VALID
_URL
)
541 return cls
._VALID
_URL
_RE
.match(url
)
544 def suitable(cls
, url
):
545 """Receives a URL and returns True if suitable for this IE."""
546 # This function must import everything it needs (except other extractors),
547 # so that lazy_extractors works correctly
548 return cls
._match
_valid
_url
(url
) is not None
551 def _match_id(cls
, url
):
552 return cls
._match
_valid
_url
(url
).group('id')
555 def get_temp_id(cls
, url
):
557 return cls
._match
_id
(url
)
558 except (IndexError, AttributeError):
563 """Getter method for _WORKING."""
567 def supports_login(cls
):
568 return bool(cls
._NETRC
_MACHINE
)
570 def initialize(self
):
571 """Initializes an instance (authentication, etc)."""
572 self
._printed
_messages
= set()
573 self
._initialize
_geo
_bypass
({
574 'countries': self
._GEO
_COUNTRIES
,
575 'ip_blocks': self
._GEO
_IP
_BLOCKS
,
578 self
._initialize
_pre
_login
()
579 if self
.supports_login():
580 username
, password
= self
._get
_login
_info
()
582 self
._perform
_login
(username
, password
)
583 elif self
.get_param('username') and False not in (self
.IE_DESC
, self
._NETRC
_MACHINE
):
584 self
.report_warning(f
'Login with password is not supported for this website. {self._login_hint("cookies")}')
585 self
._real
_initialize
()
588 def _initialize_geo_bypass(self
, geo_bypass_context
):
590 Initialize geo restriction bypass mechanism.
592 This method is used to initialize geo bypass mechanism based on faking
593 X-Forwarded-For HTTP header. A random country from provided country list
594 is selected and a random IP belonging to this country is generated. This
595 IP will be passed as X-Forwarded-For HTTP header in all subsequent
598 This method will be used for initial geo bypass mechanism initialization
599 during the instance initialization with _GEO_COUNTRIES and
602 You may also manually call it from extractor's code if geo bypass
603 information is not available beforehand (e.g. obtained during
604 extraction) or due to some other reason. In this case you should pass
605 this information in geo bypass context passed as first argument. It may
606 contain following fields:
608 countries: List of geo unrestricted countries (similar
610 ip_blocks: List of geo unrestricted IP blocks in CIDR notation
611 (similar to _GEO_IP_BLOCKS)
614 if not self
._x
_forwarded
_for
_ip
:
616 # Geo bypass mechanism is explicitly disabled by user
617 if not self
.get_param('geo_bypass', True):
620 if not geo_bypass_context
:
621 geo_bypass_context
= {}
623 # Backward compatibility: previously _initialize_geo_bypass
624 # expected a list of countries, some 3rd party code may still use
626 if isinstance(geo_bypass_context
, (list, tuple)):
627 geo_bypass_context
= {
628 'countries': geo_bypass_context
,
631 # The whole point of geo bypass mechanism is to fake IP
632 # as X-Forwarded-For HTTP header based on some IP block or
635 # Path 1: bypassing based on IP block in CIDR notation
637 # Explicit IP block specified by user, use it right away
638 # regardless of whether extractor is geo bypassable or not
639 ip_block
= self
.get_param('geo_bypass_ip_block', None)
641 # Otherwise use random IP block from geo bypass context but only
642 # if extractor is known as geo bypassable
644 ip_blocks
= geo_bypass_context
.get('ip_blocks')
645 if self
._GEO
_BYPASS
and ip_blocks
:
646 ip_block
= random
.choice(ip_blocks
)
649 self
._x
_forwarded
_for
_ip
= GeoUtils
.random_ipv4(ip_block
)
650 self
.write_debug(f
'Using fake IP {self._x_forwarded_for_ip} as X-Forwarded-For')
653 # Path 2: bypassing based on country code
655 # Explicit country code specified by user, use it right away
656 # regardless of whether extractor is geo bypassable or not
657 country
= self
.get_param('geo_bypass_country', None)
659 # Otherwise use random country code from geo bypass context but
660 # only if extractor is known as geo bypassable
662 countries
= geo_bypass_context
.get('countries')
663 if self
._GEO
_BYPASS
and countries
:
664 country
= random
.choice(countries
)
667 self
._x
_forwarded
_for
_ip
= GeoUtils
.random_ipv4(country
)
668 self
._downloader
.write_debug(
669 f
'Using fake IP {self._x_forwarded_for_ip} ({country.upper()}) as X-Forwarded-For')
671 def extract(self
, url
):
672 """Extracts URL information and returns it in list of dicts."""
677 self
.write_debug('Extracting URL: %s' % url
)
678 ie_result
= self
._real
_extract
(url
)
679 if ie_result
is None:
681 if self
._x
_forwarded
_for
_ip
:
682 ie_result
['__x_forwarded_for_ip'] = self
._x
_forwarded
_for
_ip
683 subtitles
= ie_result
.get('subtitles') or {}
684 if 'no-live-chat' in self
.get_param('compat_opts'):
685 for lang
in ('live_chat', 'comments', 'danmaku'):
686 subtitles
.pop(lang
, None)
688 except GeoRestrictedError
as e
:
689 if self
.__maybe
_fake
_ip
_and
_retry
(e
.countries
):
692 except UnsupportedError
:
694 except ExtractorError
as e
:
696 'video_id': e
.video_id
or self
.get_temp_id(url
),
698 'tb': e
.traceback
or sys
.exc_info()[2],
699 'expected': e
.expected
,
702 if hasattr(e
, 'countries'):
703 kwargs
['countries'] = e
.countries
704 raise type(e
)(e
.orig_msg
, **kwargs
)
705 except http
.client
.IncompleteRead
as e
:
706 raise ExtractorError('A network error has occurred.', cause
=e
, expected
=True, video_id
=self
.get_temp_id(url
))
707 except (KeyError, StopIteration) as e
:
708 raise ExtractorError('An extractor error has occurred.', cause
=e
, video_id
=self
.get_temp_id(url
))
710 def __maybe_fake_ip_and_retry(self
, countries
):
711 if (not self
.get_param('geo_bypass_country', None)
713 and self
.get_param('geo_bypass', True)
714 and not self
._x
_forwarded
_for
_ip
716 country_code
= random
.choice(countries
)
717 self
._x
_forwarded
_for
_ip
= GeoUtils
.random_ipv4(country_code
)
718 if self
._x
_forwarded
_for
_ip
:
720 'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
721 % (self
._x
_forwarded
_for
_ip
, country_code
.upper()))
725 def set_downloader(self
, downloader
):
726 """Sets a YoutubeDL instance as the downloader for this IE."""
727 self
._downloader
= downloader
731 return self
._downloader
.cache
735 return self
._downloader
.cookiejar
737 def _initialize_pre_login(self
):
738 """ Initialization before login. Redefine in subclasses."""
741 def _perform_login(self
, username
, password
):
742 """ Login with username and password. Redefine in subclasses."""
745 def _real_initialize(self
):
746 """Real initialization process. Redefine in subclasses."""
749 def _real_extract(self
, url
):
750 """Real extraction process. Redefine in subclasses."""
751 raise NotImplementedError('This method must be implemented by subclasses')
755 """A string for getting the InfoExtractor with get_info_extractor"""
756 return cls
.__name
__[:-2]
760 return cls
.__name
__[:-2]
763 def __can_accept_status_code(err
, expected_status
):
764 assert isinstance(err
, urllib
.error
.HTTPError
)
765 if expected_status
is None:
767 elif callable(expected_status
):
768 return expected_status(err
.code
) is True
770 return err
.code
in variadic(expected_status
)
772 def _create_request(self
, url_or_request
, data
=None, headers
=None, query
=None):
773 if isinstance(url_or_request
, urllib
.request
.Request
):
774 return update_Request(url_or_request
, data
=data
, headers
=headers
, query
=query
)
776 url_or_request
= update_url_query(url_or_request
, query
)
777 return sanitized_Request(url_or_request
, data
, headers
or {})
779 def _request_webpage(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True, data
=None, headers
=None, query
=None, expected_status
=None):
781 Return the response handle.
783 See _download_webpage docstring for arguments specification.
785 if not self
._downloader
._first
_webpage
_request
:
786 sleep_interval
= self
.get_param('sleep_interval_requests') or 0
787 if sleep_interval
> 0:
788 self
.to_screen('Sleeping %s seconds ...' % sleep_interval
)
789 time
.sleep(sleep_interval
)
791 self
._downloader
._first
_webpage
_request
= False
794 self
.report_download_webpage(video_id
)
795 elif note
is not False:
797 self
.to_screen(str(note
))
799 self
.to_screen(f
'{video_id}: {note}')
801 # Some sites check X-Forwarded-For HTTP header in order to figure out
802 # the origin of the client behind proxy. This allows bypassing geo
803 # restriction by faking this header's value to IP that belongs to some
804 # geo unrestricted country. We will do so once we encounter any
805 # geo restriction error.
806 if self
._x
_forwarded
_for
_ip
:
807 headers
= (headers
or {}).copy()
808 headers
.setdefault('X-Forwarded-For', self
._x
_forwarded
_for
_ip
)
811 return self
._downloader
.urlopen(self
._create
_request
(url_or_request
, data
, headers
, query
))
812 except network_exceptions
as err
:
813 if isinstance(err
, urllib
.error
.HTTPError
):
814 if self
.__can
_accept
_status
_code
(err
, expected_status
):
815 # Retain reference to error to prevent file object from
816 # being closed before it can be read. Works around the
817 # effects of <https://bugs.python.org/issue15002>
818 # introduced in Python 3.4.1.
825 errnote
= 'Unable to download webpage'
827 errmsg
= f
'{errnote}: {error_to_compat_str(err)}'
829 raise ExtractorError(errmsg
, cause
=err
)
831 self
.report_warning(errmsg
)
834 def _download_webpage_handle(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True,
835 encoding
=None, data
=None, headers
={}, query={}
, expected_status
=None):
837 Return a tuple (page content as string, URL handle).
840 url_or_request -- plain text URL as a string or
841 a urllib.request.Request object
842 video_id -- Video/playlist/item identifier (string)
845 note -- note printed before downloading (string)
846 errnote -- note printed in case of an error (string)
847 fatal -- flag denoting whether error should be considered fatal,
848 i.e. whether it should cause ExtractionError to be raised,
849 otherwise a warning will be reported and extraction continued
850 encoding -- encoding for a page content decoding, guessed automatically
851 when not explicitly specified
852 data -- POST data (bytes)
853 headers -- HTTP headers (dict)
854 query -- URL query (dict)
855 expected_status -- allows to accept failed HTTP requests (non 2xx
856 status code) by explicitly specifying a set of accepted status
857 codes. Can be any of the following entities:
858 - an integer type specifying an exact failed status code to
860 - a list or a tuple of integer types specifying a list of
861 failed status codes to accept
862 - a callable accepting an actual failed status code and
863 returning True if it should be accepted
864 Note that this argument does not affect success status codes (2xx)
865 which are always accepted.
868 # Strip hashes from the URL (#1038)
869 if isinstance(url_or_request
, str):
870 url_or_request
= url_or_request
.partition('#')[0]
872 urlh
= self
._request
_webpage
(url_or_request
, video_id
, note
, errnote
, fatal
, data
=data
, headers
=headers
, query
=query
, expected_status
=expected_status
)
876 content
= self
._webpage
_read
_content
(urlh
, url_or_request
, video_id
, note
, errnote
, fatal
, encoding
=encoding
)
877 return (content
, urlh
)
880 def _guess_encoding_from_content(content_type
, webpage_bytes
):
881 m
= re
.match(r
'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type
)
883 encoding
= m
.group(1)
885 m
= re
.search(br
'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
886 webpage_bytes[:1024])
888 encoding = m.group(1).decode('ascii')
889 elif webpage_bytes.startswith(b'\xff\xfe'):
896 def __check_blocked(self, content):
897 first_block = content[:512]
898 if ('<title>Access to this site is blocked</title>' in content
899 and 'Websense' in first_block):
900 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
901 blocked_iframe = self._html_search_regex(
902 r'<iframe src="([^
"]+)"', content,
903 'Websense information URL
', default=None)
905 msg += ' Visit
%s for more details
' % blocked_iframe
906 raise ExtractorError(msg, expected=True)
907 if '<title
>The URL you requested has been blocked
</title
>' in first_block:
909 'Access to this webpage has been blocked by Indian censorship
. '
910 'Use a VPN
or proxy
server (with --proxy
) to route around it
.')
911 block_msg = self._html_search_regex(
912 r'</h1
><p
>(.*?
)</p
>',
913 content, 'block message
', default=None)
915 msg += ' (Message
: "%s")' % block_msg.replace('\n', ' ')
916 raise ExtractorError(msg, expected=True)
917 if ('<title
>TTK
:: Доступ к ресурсу ограничен
</title
>' in content
918 and 'blocklist
.rkn
.gov
.ru
' in content):
919 raise ExtractorError(
920 'Access to this webpage has been blocked by decision of the Russian government
. '
921 'Visit http
://blocklist
.rkn
.gov
.ru
/ for a block reason
.',
924 def _request_dump_filename(self, url, video_id):
925 basen = f'{video_id}_{url}
'
926 trim_length = self.get_param('trim_file_name
') or 240
927 if len(basen) > trim_length:
928 h = '___
' + hashlib.md5(basen.encode('utf
-8')).hexdigest()
929 basen = basen[:trim_length - len(h)] + h
930 filename = sanitize_filename(f'{basen}
.dump
', restricted=True)
931 # Working around MAX_PATH limitation on Windows (see
932 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
933 if compat_os_name == 'nt
':
934 absfilepath = os.path.abspath(filename)
935 if len(absfilepath) > 259:
936 filename = fR'\\?\{absfilepath}
'
939 def __decode_webpage(self, webpage_bytes, encoding, headers):
941 encoding = self._guess_encoding_from_content(headers.get('Content
-Type
', ''), webpage_bytes)
943 return webpage_bytes.decode(encoding, 'replace
')
945 return webpage_bytes.decode('utf
-8', 'replace
')
947 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
948 webpage_bytes = urlh.read()
949 if prefix is not None:
950 webpage_bytes = prefix + webpage_bytes
951 if self.get_param('dump_intermediate_pages
', False):
952 self.to_screen('Dumping request to
' + urlh.geturl())
953 dump = base64.b64encode(webpage_bytes).decode('ascii
')
954 self._downloader.to_screen(dump)
955 if self.get_param('write_pages
'):
956 filename = self._request_dump_filename(urlh.geturl(), video_id)
957 self.to_screen(f'Saving request to {filename}
')
958 with open(filename, 'wb
') as outf:
959 outf.write(webpage_bytes)
961 content = self.__decode_webpage(webpage_bytes, encoding, urlh.headers)
962 self.__check_blocked(content)
966 def __print_error(self, errnote, fatal, video_id, err):
968 raise ExtractorError(f'{video_id}
: {errnote}
', cause=err)
970 self.report_warning(f'{video_id}
: {errnote}
: {err}
')
972 def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True, errnote=None):
974 xml_string = transform_source(xml_string)
976 return compat_etree_fromstring(xml_string.encode('utf
-8'))
977 except xml.etree.ElementTree.ParseError as ve:
978 self.__print_error('Failed to parse XML
' if errnote is None else errnote, fatal, video_id, ve)
980 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True, errnote=None, **parser_kwargs):
983 json_string, cls=LenientJSONDecoder, strict=False, transform_source=transform_source, **parser_kwargs)
984 except ValueError as ve:
985 self.__print_error('Failed to parse JSON
' if errnote is None else errnote, fatal, video_id, ve)
987 def _parse_socket_response_as_json(self, data, *args, **kwargs):
988 return self._parse_json(data[data.find('{'):data.rfind('}
') + 1], *args, **kwargs)
990 def __create_download_methods(name, parser, note, errnote, return_value):
992 def parse(ie, content, *args, errnote=errnote, **kwargs):
996 kwargs['errnote
'] = errnote
997 # parser is fetched by name so subclasses can override it
998 return getattr(ie, parser)(content, *args, **kwargs)
1000 def download_handle(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
1001 fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
1002 res = self._download_webpage_handle(
1003 url_or_request, video_id, note=note, errnote=errnote, fatal=fatal, encoding=encoding,
1004 data=data, headers=headers, query=query, expected_status=expected_status)
1008 return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote), urlh
1010 def download_content(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
1011 fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
1012 if self.get_param('load_pages
'):
1013 url_or_request = self._create_request(url_or_request, data, headers, query)
1014 filename = self._request_dump_filename(url_or_request.full_url, video_id)
1015 self.to_screen(f'Loading request
from {filename}
')
1017 with open(filename, 'rb
') as dumpf:
1018 webpage_bytes = dumpf.read()
1019 except OSError as e:
1020 self.report_warning(f'Unable to load request
from disk
: {e}
')
1022 content = self.__decode_webpage(webpage_bytes, encoding, url_or_request.headers)
1023 return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote)
1027 'transform_source
': transform_source,
1029 'encoding
': encoding,
1033 'expected_status
': expected_status,
1036 kwargs.pop('transform_source
')
1037 # The method is fetched by name so subclasses can override _download_..._handle
1038 res = getattr(self, download_handle.__name__)(url_or_request, video_id, **kwargs)
1039 return res if res is False else res[0]
1041 def impersonate(func, name, return_value):
1042 func.__name__, func.__qualname__ = name, f'InfoExtractor
.{name}
'
1044 @param transform_source Apply this transformation before parsing
1045 @returns {return_value}
1047 See _download_webpage_handle docstring for other arguments specification
1050 impersonate(download_handle, f'_download_{name}_handle
', f'({return_value}
, URL handle
)')
1051 impersonate(download_content, f'_download_{name}
', f'{return_value}
')
1052 return download_handle, download_content
1054 _download_xml_handle, _download_xml = __create_download_methods(
1055 'xml
', '_parse_xml
', 'Downloading XML
', 'Unable to download XML
', 'xml
as an xml
.etree
.ElementTree
.Element
')
1056 _download_json_handle, _download_json = __create_download_methods(
1057 'json
', '_parse_json
', 'Downloading JSON metadata
', 'Unable to download JSON metadata
', 'JSON
object as a
dict')
1058 _download_socket_json_handle, _download_socket_json = __create_download_methods(
1059 'socket_json
', '_parse_socket_response_as_json
', 'Polling socket
', 'Unable to poll socket
', 'JSON
object as a
dict')
1060 __download_webpage = __create_download_methods('webpage
', None, None, None, 'data of the page
as a string
')[1]
1062 def _download_webpage(
1063 self, url_or_request, video_id, note=None, errnote=None,
1064 fatal=True, tries=1, timeout=NO_DEFAULT, *args, **kwargs):
1066 Return the data of the page as a string.
1069 tries -- number of tries
1070 timeout -- sleep interval between tries
1072 See _download_webpage_handle docstring for other arguments specification.
1075 R''' # NB: These are unused; should they be deprecated?
1077 self._downloader.deprecation_warning('tries argument
is deprecated
in InfoExtractor
._download
_webpage
')
1078 if timeout is NO_DEFAULT:
1081 self._downloader.deprecation_warning('timeout argument
is deprecated
in InfoExtractor
._download
_webpage
')
1087 return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
1088 except http.client.IncompleteRead as e:
1090 if try_count >= tries:
1092 self._sleep(timeout, video_id)
1094 def report_warning(self, msg, video_id=None, *args, only_once=False, **kwargs):
1095 idstr = format_field(video_id, None, '%s: ')
1096 msg = f'[{self.IE_NAME}
] {idstr}{msg}
'
1098 if f'WARNING
: {msg}
' in self._printed_messages:
1100 self._printed_messages.add(f'WARNING
: {msg}
')
1101 self._downloader.report_warning(msg, *args, **kwargs)
1103 def to_screen(self, msg, *args, **kwargs):
1104 """Print msg to screen, prefixing it with '[ie_name
]'"""
1105 self._downloader.to_screen(f'[{self.IE_NAME}
] {msg}
', *args, **kwargs)
1107 def write_debug(self, msg, *args, **kwargs):
1108 self._downloader.write_debug(f'[{self.IE_NAME}
] {msg}
', *args, **kwargs)
1110 def get_param(self, name, default=None, *args, **kwargs):
1111 if self._downloader:
1112 return self._downloader.params.get(name, default, *args, **kwargs)
1115 def report_drm(self, video_id, partial=NO_DEFAULT):
1116 if partial is not NO_DEFAULT:
1117 self._downloader.deprecation_warning('InfoExtractor
.report_drm no longer accepts the argument partial
')
1118 self.raise_no_formats('This video
is DRM protected
', expected=True, video_id=video_id)
1120 def report_extraction(self, id_or_name):
1121 """Report information extraction."""
1122 self.to_screen('%s: Extracting information
' % id_or_name)
1124 def report_download_webpage(self, video_id):
1125 """Report webpage download."""
1126 self.to_screen('%s: Downloading webpage
' % video_id)
1128 def report_age_confirmation(self):
1129 """Report attempt to confirm age."""
1130 self.to_screen('Confirming age
')
1132 def report_login(self):
1133 """Report attempt to log in."""
1134 self.to_screen('Logging
in')
1136 def raise_login_required(
1137 self, msg='This video
is only available
for registered users
',
1138 metadata_available=False, method=NO_DEFAULT):
1139 if metadata_available and (
1140 self.get_param('ignore_no_formats_error
') or self.get_param('wait_for_video
')):
1141 self.report_warning(msg)
1143 msg += format_field(self._login_hint(method), None, '. %s')
1144 raise ExtractorError(msg, expected=True)
1146 def raise_geo_restricted(
1147 self, msg='This video
is not available
from your location due to geo restriction
',
1148 countries=None, metadata_available=False):
1149 if metadata_available and (
1150 self.get_param('ignore_no_formats_error
') or self.get_param('wait_for_video
')):
1151 self.report_warning(msg)
1153 raise GeoRestrictedError(msg, countries=countries)
1155 def raise_no_formats(self, msg, expected=False, video_id=None):
1157 self.get_param('ignore_no_formats_error
') or self.get_param('wait_for_video
')):
1158 self.report_warning(msg, video_id)
1159 elif isinstance(msg, ExtractorError):
1162 raise ExtractorError(msg, expected=expected, video_id=video_id)
1164 # Methods for following #608
1166 def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs):
1167 """Returns a URL that points to a page that should be processed"""
1169 kwargs['ie_key
'] = ie if isinstance(ie, str) else ie.ie_key()
1170 if video_id is not None:
1171 kwargs['id'] = video_id
1172 if video_title is not None:
1173 kwargs['title
'] = video_title
1176 '_type
': 'url_transparent
' if url_transparent else 'url
',
1181 def playlist_from_matches(cls, matches, playlist_id=None, playlist_title=None,
1182 getter=IDENTITY, ie=None, video_kwargs=None, **kwargs):
1183 return cls.playlist_result(
1184 (cls.url_result(m, ie, **(video_kwargs or {})) for m in orderedSet(map(getter, matches), lazy=True)),
1185 playlist_id, playlist_title, **kwargs)
1188 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, *, multi_video=False, **kwargs):
1189 """Returns a playlist"""
1191 kwargs['id'] = playlist_id
1193 kwargs['title
'] = playlist_title
1194 if playlist_description is not None:
1195 kwargs['description
'] = playlist_description
1198 '_type
': 'multi_video
' if multi_video else 'playlist
',
1202 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
1204 Perform a regex search on the given string, using a single or a list of
1205 patterns returning the first matching group.
1206 In case of failure return a default value or raise a WARNING or a
1207 RegexNotFoundError, depending on fatal, specifying the field name.
1211 elif isinstance(pattern, (str, re.Pattern)):
1212 mobj = re.search(pattern, string, flags)
1215 mobj = re.search(p, string, flags)
1219 _name = self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
1223 # return the first matching group
1224 return next(g for g in mobj.groups() if g is not None)
1225 elif isinstance(group, (list, tuple)):
1226 return tuple(mobj.group(g) for g in group)
1228 return mobj.group(group)
1229 elif default is not NO_DEFAULT:
1232 raise RegexNotFoundError('Unable to extract
%s' % _name)
1234 self.report_warning('unable to extract
%s' % _name + bug_reports_message())
1237 def _search_json(self, start_pattern, string, name, video_id, *, end_pattern='',
1238 contains_pattern=r'{(?s:.+)}
', fatal=True, default=NO_DEFAULT, **kwargs):
1239 """Searches string for the JSON object specified by start_pattern"""
1240 # NB: end_pattern is only used to reduce the size of the initial match
1241 if default is NO_DEFAULT:
1242 default, has_default = {}, False
1244 fatal, has_default = False, True
1246 json_string = self._search_regex(
1247 rf'(?
:{start_pattern}
)\s
*(?P
<json
>{contains_pattern}
)\s
*(?
:{end_pattern}
)',
1248 string, name, group='json
', fatal=fatal, default=None if has_default else NO_DEFAULT)
1252 _name = self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
1254 return self._parse_json(json_string, video_id, ignore_extra=True, **kwargs)
1255 except ExtractorError as e:
1257 raise ExtractorError(
1258 f'Unable to extract {_name}
- Failed to parse JSON
', cause=e.cause, video_id=video_id)
1259 elif not has_default:
1260 self.report_warning(
1261 f'Unable to extract {_name}
- Failed to parse JSON
: {e}
', video_id=video_id)
1264 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
1266 Like _search_regex, but strips HTML tags and unescapes entities.
1268 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
1270 return clean_html(res).strip()
1274 def _get_netrc_login_info(self, netrc_machine=None):
1277 netrc_machine = netrc_machine or self._NETRC_MACHINE
1279 if self.get_param('usenetrc
', False):
1281 netrc_file = compat_expanduser(self.get_param('netrc_location
') or '~
')
1282 if os.path.isdir(netrc_file):
1283 netrc_file = os.path.join(netrc_file, '.netrc
')
1284 info = netrc.netrc(file=netrc_file).authenticators(netrc_machine)
1285 if info is not None:
1289 raise netrc.NetrcParseError(
1290 'No authenticators
for %s' % netrc_machine)
1291 except (OSError, netrc.NetrcParseError) as err:
1292 self.report_warning(
1293 'parsing
.netrc
: %s' % error_to_compat_str(err))
1295 return username, password
1297 def _get_login_info(self, username_option='username
', password_option='password
', netrc_machine=None):
1299 Get the login info as (username, password)
1300 First look for the manually specified credentials using username_option
1301 and password_option as keys in params dictionary. If no such credentials
1302 available look in the netrc file using the netrc_machine or _NETRC_MACHINE
1304 If there's no info available
, return (None, None)
1307 # Attempt to use provided username and password or .netrc data
1308 username = self.get_param(username_option)
1309 if username is not None:
1310 password = self.get_param(password_option)
1312 username, password = self._get_netrc_login_info(netrc_machine)
1314 return username, password
1316 def _get_tfa_info(self, note='two-factor verification code'):
1318 Get the two
-factor authentication info
1319 TODO
- asking the user will be required
for sms
/phone verify
1320 currently just uses the command line option
1321 If there
's no info available, return None
1324 tfa = self.get_param('twofactor
')
1328 return getpass.getpass('Type
%s and press
[Return
]: ' % note)
1330 # Helper functions for extracting OpenGraph info
1332 def _og_regexes(prop):
1333 content_re = r'content
=(?
:"([^"]+?
)"|\'([^\']+?)\'|\s*([^\s"\'=<>`
]+?
))'
1334 property_re = (r'(?
:name|
property)=(?
:\'og
%(sep)s%(prop)s\'|
"og%(sep)s%(prop)s"|\s
*og
%(sep)s%(prop)s\b)'
1335 % {'prop': re.escape(prop), 'sep': '(?::|[:-])'})
1336 template = r'<meta
[^
>]+?
%s[^
>]+?
%s'
1338 template % (property_re, content_re),
1339 template % (content_re, property_re),
1343 def _meta_regex(prop):
1344 return r'''(?isx)<meta
1345 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
1346 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
1348 def _og_search_property(self, prop, html, name=None, **kargs):
1349 prop = variadic(prop)
1351 name = 'OpenGraph
%s' % prop[0]
1354 og_regexes.extend(self._og_regexes(p))
1355 escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
1358 return unescapeHTML(escaped)
1360 def _og_search_thumbnail(self, html, **kargs):
1361 return self._og_search_property('image
', html, 'thumbnail URL
', fatal=False, **kargs)
1363 def _og_search_description(self, html, **kargs):
1364 return self._og_search_property('description
', html, fatal=False, **kargs)
1366 def _og_search_title(self, html, *, fatal=False, **kargs):
1367 return self._og_search_property('title
', html, fatal=fatal, **kargs)
1369 def _og_search_video_url(self, html, name='video url
', secure=True, **kargs):
1370 regexes = self._og_regexes('video
') + self._og_regexes('video
:url
')
1372 regexes = self._og_regexes('video
:secure_url
') + regexes
1373 return self._html_search_regex(regexes, html, name, **kargs)
1375 def _og_search_url(self, html, **kargs):
1376 return self._og_search_property('url
', html, **kargs)
1378 def _html_extract_title(self, html, name='title
', *, fatal=False, **kwargs):
1379 return self._html_search_regex(r'(?s
)<title
\b[^
>]*>([^
<]+)</title
>', html, name, fatal=fatal, **kwargs)
1381 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
1382 name = variadic(name)
1383 if display_name is None:
1384 display_name = name[0]
1385 return self._html_search_regex(
1386 [self._meta_regex(n) for n in name],
1387 html, display_name, fatal=fatal, group='content
', **kwargs)
1389 def _dc_search_uploader(self, html):
1390 return self._html_search_meta('dc
.creator
', html, 'uploader
')
1393 def _rta_search(html):
1394 # See http://www.rtalabel.org/index.php?content=howtofaq#single
1395 if re.search(r'(?ix
)<meta\s
+name
="rating"\s
+'
1396 r' content
="RTA-5042-1996-1400-1577-RTA"',
1400 # And then there are the jokers who advertise that they use RTA, but actually don't
.
1401 AGE_LIMIT_MARKERS
= [
1402 r
'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
1404 if any(re
.search(marker
, html
) for marker
in AGE_LIMIT_MARKERS
):
1408 def _media_rating_search(self
, html
):
1409 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
1410 rating
= self
._html
_search
_meta
('rating', html
)
1422 return RATING_TABLE
.get(rating
.lower())
1424 def _family_friendly_search(self
, html
):
1425 # See http://schema.org/VideoObject
1426 family_friendly
= self
._html
_search
_meta
(
1427 'isFamilyFriendly', html
, default
=None)
1429 if not family_friendly
:
1438 return RATING_TABLE
.get(family_friendly
.lower())
1440 def _twitter_search_player(self
, html
):
1441 return self
._html
_search
_meta
('twitter:player', html
,
1442 'twitter card player')
1444 def _yield_json_ld(self
, html
, video_id
, *, fatal
=True, default
=NO_DEFAULT
):
1445 """Yield all json ld objects in the html"""
1446 if default
is not NO_DEFAULT
:
1448 for mobj
in re
.finditer(JSON_LD_RE
, html
):
1449 json_ld_item
= self
._parse
_json
(mobj
.group('json_ld'), video_id
, fatal
=fatal
)
1450 for json_ld
in variadic(json_ld_item
):
1451 if isinstance(json_ld
, dict):
1454 def _search_json_ld(self
, html
, video_id
, expected_type
=None, *, fatal
=True, default
=NO_DEFAULT
):
1455 """Search for a video in any json ld in the html"""
1456 if default
is not NO_DEFAULT
:
1458 info
= self
._json
_ld
(
1459 list(self
._yield
_json
_ld
(html
, video_id
, fatal
=fatal
, default
=default
)),
1460 video_id
, fatal
=fatal
, expected_type
=expected_type
)
1463 if default
is not NO_DEFAULT
:
1466 raise RegexNotFoundError('Unable to extract JSON-LD')
1468 self
.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
1471 def _json_ld(self
, json_ld
, video_id
, fatal
=True, expected_type
=None):
1472 if isinstance(json_ld
, str):
1473 json_ld
= self
._parse
_json
(json_ld
, video_id
, fatal
=fatal
)
1478 INTERACTION_TYPE_MAP
= {
1479 'CommentAction': 'comment',
1480 'AgreeAction': 'like',
1481 'DisagreeAction': 'dislike',
1482 'LikeAction': 'like',
1483 'DislikeAction': 'dislike',
1484 'ListenAction': 'view',
1485 'WatchAction': 'view',
1486 'ViewAction': 'view',
1489 def is_type(e
, *expected_types
):
1490 type = variadic(traverse_obj(e
, '@type'))
1491 return any(x
in type for x
in expected_types
)
1493 def extract_interaction_type(e
):
1494 interaction_type
= e
.get('interactionType')
1495 if isinstance(interaction_type
, dict):
1496 interaction_type
= interaction_type
.get('@type')
1497 return str_or_none(interaction_type
)
1499 def extract_interaction_statistic(e
):
1500 interaction_statistic
= e
.get('interactionStatistic')
1501 if isinstance(interaction_statistic
, dict):
1502 interaction_statistic
= [interaction_statistic
]
1503 if not isinstance(interaction_statistic
, list):
1505 for is_e
in interaction_statistic
:
1506 if not is_type(is_e
, 'InteractionCounter'):
1508 interaction_type
= extract_interaction_type(is_e
)
1509 if not interaction_type
:
1511 # For interaction count some sites provide string instead of
1512 # an integer (as per spec) with non digit characters (e.g. ",")
1513 # so extracting count with more relaxed str_to_int
1514 interaction_count
= str_to_int(is_e
.get('userInteractionCount'))
1515 if interaction_count
is None:
1517 count_kind
= INTERACTION_TYPE_MAP
.get(interaction_type
.split('/')[-1])
1520 count_key
= '%s_count' % count_kind
1521 if info
.get(count_key
) is not None:
1523 info
[count_key
] = interaction_count
1525 def extract_chapter_information(e
):
1527 'title': part
.get('name'),
1528 'start_time': part
.get('startOffset'),
1529 'end_time': part
.get('endOffset'),
1530 } for part
in variadic(e
.get('hasPart') or []) if part
.get('@type') == 'Clip']
1531 for idx
, (last_c
, current_c
, next_c
) in enumerate(zip(
1532 [{'end_time': 0}
] + chapters
, chapters
, chapters
[1:])):
1533 current_c
['end_time'] = current_c
['end_time'] or next_c
['start_time']
1534 current_c
['start_time'] = current_c
['start_time'] or last_c
['end_time']
1535 if None in current_c
.values():
1536 self
.report_warning(f
'Chapter {idx} contains broken data. Not extracting chapters')
1539 chapters
[-1]['end_time'] = chapters
[-1]['end_time'] or info
['duration']
1540 info
['chapters'] = chapters
1542 def extract_video_object(e
):
1543 author
= e
.get('author')
1545 'url': url_or_none(e
.get('contentUrl')),
1546 'ext': mimetype2ext(e
.get('encodingFormat')),
1547 'title': unescapeHTML(e
.get('name')),
1548 'description': unescapeHTML(e
.get('description')),
1549 'thumbnails': [{'url': unescapeHTML(url)}
1550 for url
in variadic(traverse_obj(e
, 'thumbnailUrl', 'thumbnailURL'))
1551 if url_or_none(url
)],
1552 'duration': parse_duration(e
.get('duration')),
1553 'timestamp': unified_timestamp(e
.get('uploadDate')),
1554 # author can be an instance of 'Organization' or 'Person' types.
1555 # both types can have 'name' property(inherited from 'Thing' type). [1]
1556 # however some websites are using 'Text' type instead.
1557 # 1. https://schema.org/VideoObject
1558 'uploader': author
.get('name') if isinstance(author
, dict) else author
if isinstance(author
, str) else None,
1559 'artist': traverse_obj(e
, ('byArtist', 'name'), expected_type
=str),
1560 'filesize': int_or_none(float_or_none(e
.get('contentSize'))),
1561 'tbr': int_or_none(e
.get('bitrate')),
1562 'width': int_or_none(e
.get('width')),
1563 'height': int_or_none(e
.get('height')),
1564 'view_count': int_or_none(e
.get('interactionCount')),
1565 'tags': try_call(lambda: e
.get('keywords').split(',')),
1567 if is_type(e
, 'AudioObject'):
1570 'abr': int_or_none(e
.get('bitrate')),
1572 extract_interaction_statistic(e
)
1573 extract_chapter_information(e
)
1575 def traverse_json_ld(json_ld
, at_top_level
=True):
1576 for e
in variadic(json_ld
):
1577 if not isinstance(e
, dict):
1579 if at_top_level
and '@context' not in e
:
1581 if at_top_level
and set(e
.keys()) == {'@context', '@graph'}
:
1582 traverse_json_ld(e
['@graph'], at_top_level
=False)
1584 if expected_type
is not None and not is_type(e
, expected_type
):
1586 rating
= traverse_obj(e
, ('aggregateRating', 'ratingValue'), expected_type
=float_or_none
)
1587 if rating
is not None:
1588 info
['average_rating'] = rating
1589 if is_type(e
, 'TVEpisode', 'Episode'):
1590 episode_name
= unescapeHTML(e
.get('name'))
1592 'episode': episode_name
,
1593 'episode_number': int_or_none(e
.get('episodeNumber')),
1594 'description': unescapeHTML(e
.get('description')),
1596 if not info
.get('title') and episode_name
:
1597 info
['title'] = episode_name
1598 part_of_season
= e
.get('partOfSeason')
1599 if is_type(part_of_season
, 'TVSeason', 'Season', 'CreativeWorkSeason'):
1601 'season': unescapeHTML(part_of_season
.get('name')),
1602 'season_number': int_or_none(part_of_season
.get('seasonNumber')),
1604 part_of_series
= e
.get('partOfSeries') or e
.get('partOfTVSeries')
1605 if is_type(part_of_series
, 'TVSeries', 'Series', 'CreativeWorkSeries'):
1606 info
['series'] = unescapeHTML(part_of_series
.get('name'))
1607 elif is_type(e
, 'Movie'):
1609 'title': unescapeHTML(e
.get('name')),
1610 'description': unescapeHTML(e
.get('description')),
1611 'duration': parse_duration(e
.get('duration')),
1612 'timestamp': unified_timestamp(e
.get('dateCreated')),
1614 elif is_type(e
, 'Article', 'NewsArticle'):
1616 'timestamp': parse_iso8601(e
.get('datePublished')),
1617 'title': unescapeHTML(e
.get('headline')),
1618 'description': unescapeHTML(e
.get('articleBody') or e
.get('description')),
1620 if is_type(traverse_obj(e
, ('video', 0)), 'VideoObject'):
1621 extract_video_object(e
['video'][0])
1622 elif is_type(traverse_obj(e
, ('subjectOf', 0)), 'VideoObject'):
1623 extract_video_object(e
['subjectOf'][0])
1624 elif is_type(e
, 'VideoObject', 'AudioObject'):
1625 extract_video_object(e
)
1626 if expected_type
is None:
1630 video
= e
.get('video')
1631 if is_type(video
, 'VideoObject'):
1632 extract_video_object(video
)
1633 if expected_type
is None:
1638 traverse_json_ld(json_ld
)
1639 return filter_dict(info
)
1641 def _search_nextjs_data(self
, webpage
, video_id
, *, transform_source
=None, fatal
=True, **kw
):
1642 return self
._parse
_json
(
1644 r
'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^
>]*>([^
<]+)</script
>',
1645 webpage, 'next
.js data
', fatal=fatal, **kw),
1646 video_id, transform_source=transform_source, fatal=fatal)
1648 def _search_nuxt_data(self, webpage, video_id, context_name='__NUXT__
', *, fatal=True, traverse=('data
', 0)):
1649 """Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function"""
1650 rectx = re.escape(context_name)
1651 FUNCTION_RE = r'\
(function\
((?P
<arg_keys
>.*?
)\
){return\s+(?P<js>{.*?}
)\s
*;?\s
*}\
((?P
<arg_vals
>.*?
)\
)'
1652 js, arg_keys, arg_vals = self._search_regex(
1653 (rf'<script
>\s
*window\
.{rectx}
={FUNCTION_RE}\s
*\
)\s
*;?\s
*</script
>', rf'{rectx}\
(.*?{FUNCTION_RE}
'),
1654 webpage, context_name, group=('js
', 'arg_keys
', 'arg_vals
'),
1655 default=NO_DEFAULT if fatal else (None, None, None))
1659 args = dict(zip(arg_keys.split(','), arg_vals.split(',')))
1661 for key, val in args.items():
1662 if val in ('undefined
', 'void
0'):
1665 ret = self._parse_json(js, video_id, transform_source=functools.partial(js_to_json, vars=args), fatal=fatal)
1666 return traverse_obj(ret, traverse) or {}
1669 def _hidden_inputs(html):
1670 html = re.sub(r'<!--(?
:(?
!<!--).)*-->', '', html)
1672 for input in re.findall(r'(?i
)(<input[^
>]+>)', html):
1673 attrs = extract_attributes(input)
1676 if attrs.get('type') not in ('hidden
', 'submit
'):
1678 name = attrs.get('name
') or attrs.get('id')
1679 value = attrs.get('value
')
1680 if name and value is not None:
1681 hidden_inputs[name] = value
1682 return hidden_inputs
1684 def _form_hidden_inputs(self, form_id, html):
1685 form = self._search_regex(
1686 r'(?
is)<form
[^
>]+?
id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
1687 html, '%s form' % form_id, group='form')
1688 return self._hidden_inputs(form)
1690 @classproperty(cache=True)
1691 def FormatSort(cls):
1692 class FormatSort(FormatSorter):
1693 def __init__(ie, *args, **kwargs):
1694 super().__init__(ie._downloader, *args, **kwargs)
1696 deprecation_warning(
1697 'yt_dlp.InfoExtractor.FormatSort is deprecated and may be removed in the future. '
1698 'Use yt_dlp.utils.FormatSorter instead')
1701 def _sort_formats(self, formats, field_preference=[]):
1702 if not field_preference:
1703 self._downloader.deprecation_warning(
1704 'yt_dlp.InfoExtractor._sort_formats is deprecated and is no longer required')
1706 self._downloader.deprecation_warning(
1707 'yt_dlp.InfoExtractor._sort_formats is deprecated and no longer works as expected. '
1708 'Return _format_sort_fields in the info_dict instead')
1710 formats[0]['__sort_fields'] = field_preference
1712 def _check_formats(self, formats, video_id):
1714 formats[:] = filter(
1715 lambda f: self._is_valid_url(
1717 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
1721 def _remove_duplicate_formats(formats):
1725 if f['url'] not in format_urls:
1726 format_urls.add(f['url'])
1727 unique_formats.append(f)
1728 formats[:] = unique_formats
1730 def _is_valid_url(self, url, video_id, item='video', headers={}):
1731 url = self._proto_relative_url(url, scheme='http:')
1732 # For now assume non HTTP(S) URLs always valid
1733 if not (url.startswith('http://') or url.startswith('https://')):
1736 self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
1738 except ExtractorError as e:
1740 '%s: %s URL is invalid, skipping: %s'
1741 % (video_id, item, error_to_compat_str(e.cause)))
1744 def http_scheme(self):
1745 """ Either "http
:" or "https
:", depending on the user's preferences """
1748 if self.get_param('prefer_insecure', False)
1751 def _proto_relative_url(self, url, scheme=None):
1752 scheme = scheme or self.http_scheme()
1753 assert scheme.endswith(':')
1754 return sanitize_url(url, scheme=scheme[:-1])
1756 def _sleep(self, timeout, video_id, msg_template=None):
1757 if msg_template is None:
1758 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
1759 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
1763 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
1764 transform_source=lambda s: fix_xml_ampersands(s).strip(),
1765 fatal=True, m3u8_id=None, data=None, headers={}, query={}):
1766 res = self._download_xml_handle(
1767 manifest_url, video_id, 'Downloading f4m manifest',
1768 'Unable to download f4m manifest',
1769 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
1770 # (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
1771 transform_source=transform_source,
1772 fatal=fatal, data=data, headers=headers, query=query)
1776 manifest, urlh = res
1777 manifest_url = urlh.geturl()
1779 return self._parse_f4m_formats(
1780 manifest, manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
1781 transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
1783 def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
1784 transform_source=lambda s: fix_xml_ampersands(s).strip(),
1785 fatal=True, m3u8_id=None):
1786 if not isinstance(manifest, xml.etree.ElementTree.Element) and not fatal:
1789 # currently yt-dlp cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
1790 akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
1791 if akamai_pv is not None and ';' in akamai_pv.text:
1792 playerVerificationChallenge = akamai_pv.text.split(';')[0]
1793 if playerVerificationChallenge.strip() != '':
1797 manifest_version = '1.0'
1798 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
1800 manifest_version = '2.0'
1801 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
1802 # Remove unsupported DRM protected media from final formats
1803 # rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
1804 media_nodes = remove_encrypted_media(media_nodes)
1808 manifest_base_url = get_base_url(manifest)
1810 bootstrap_info = xpath_element(
1811 manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
1812 'bootstrap info', default=None)
1815 mime_type = xpath_text(
1816 manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
1817 'base URL', default=None)
1818 if mime_type and mime_type.startswith('audio/'):
1821 for i, media_el in enumerate(media_nodes):
1822 tbr = int_or_none(media_el.attrib.get('bitrate'))
1823 width = int_or_none(media_el.attrib.get('width'))
1824 height = int_or_none(media_el.attrib.get('height'))
1825 format_id = join_nonempty(f4m_id, tbr or i)
1826 # If <bootstrapInfo> is present, the specified f4m is a
1827 # stream-level manifest, and only set-level manifests may refer to
1828 # external resources. See section 11.4 and section 4 of F4M spec
1829 if bootstrap_info is None:
1831 # @href is introduced in 2.0, see section 11.6 of F4M spec
1832 if manifest_version == '2.0':
1833 media_url = media_el.attrib.get('href')
1834 if media_url is None:
1835 media_url = media_el.attrib.get('url')
1839 media_url if media_url.startswith('http://') or media_url.startswith('https://')
1840 else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
1841 # If media_url is itself a f4m manifest do the recursive extraction
1842 # since bitrates in parent manifest (this one) and media_url manifest
1843 # may differ leading to inability to resolve the format by requested
1844 # bitrate in f4m downloader
1845 ext = determine_ext(manifest_url)
1847 f4m_formats = self._extract_f4m_formats(
1848 manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
1849 transform_source=transform_source, fatal=fatal)
1850 # Sometimes stream-level manifest contains single media entry that
1851 # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
1852 # At the same time parent's media entry in set-level manifest may
1853 # contain it. We will copy it from parent in such cases.
1854 if len(f4m_formats) == 1:
1857 'tbr': f.get('tbr') or tbr,
1858 'width': f.get('width') or width,
1859 'height': f.get('height') or height,
1860 'format_id': f.get('format_id') if not tbr else format_id,
1863 formats.extend(f4m_formats)
1866 formats.extend(self._extract_m3u8_formats(
1867 manifest_url, video_id, 'mp4', preference=preference,
1868 quality=quality, m3u8_id=m3u8_id, fatal=fatal))
1871 'format_id': format_id,
1872 'url': manifest_url,
1873 'manifest_url': manifest_url,
1874 'ext': 'flv' if bootstrap_info is not None else None,
1880 'preference': preference,
1885 def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, quality=None, m3u8_id=None):
1887 'format_id': join_nonempty(m3u8_id, 'meta'),
1891 'preference': preference - 100 if preference else -100,
1893 'resolution': 'multiple',
1894 'format_note': 'Quality selection URL',
1897 def _report_ignoring_subs(self, name):
1898 self.report_warning(bug_reports_message(
1899 f'Ignoring subtitle tracks found in the {name} manifest; '
1900 'if any subtitle tracks are missing,'
1903 def _extract_m3u8_formats(self, *args, **kwargs):
1904 fmts, subs = self._extract_m3u8_formats_and_subtitles(*args, **kwargs)
1906 self._report_ignoring_subs('HLS')
1909 def _extract_m3u8_formats_and_subtitles(
1910 self, m3u8_url, video_id, ext=None, entry_protocol='m3u8_native',
1911 preference=None, quality=None, m3u8_id=None, note=None,
1912 errnote=None, fatal=True, live=False, data=None, headers={},
1915 res = self._download_webpage_handle(
1917 note='Downloading m3u8 information' if note is None else note,
1918 errnote='Failed to download m3u8 information' if errnote is None else errnote,
1919 fatal=fatal, data=data, headers=headers, query=query)
1924 m3u8_doc, urlh = res
1925 m3u8_url = urlh.geturl()
1927 return self._parse_m3u8_formats_and_subtitles(
1928 m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
1929 preference=preference, quality=quality, m3u8_id=m3u8_id,
1930 note=note, errnote=errnote, fatal=fatal, live=live, data=data,
1931 headers=headers, query=query, video_id=video_id)
1933 def _parse_m3u8_formats_and_subtitles(
1934 self, m3u8_doc, m3u8_url=None, ext=None, entry_protocol='m3u8_native',
1935 preference=None, quality=None, m3u8_id=None, live=False, note=None,
1936 errnote=None, fatal=True, data=None, headers={}, query={},
1938 formats, subtitles = [], {}
1940 has_drm = re.search('|'.join([
1941 r'#EXT-X-FAXS-CM:', # Adobe Flash Access
1942 r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd
://', # Apple FairPlay
1945 def format_url(url):
1946 return url if re.match(r'^https?
://', url) else urllib.parse.urljoin(m3u8_url, url)
1948 if self.get_param('hls_split_discontinuity
', False):
1949 def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None):
1951 if not manifest_url:
1953 m3u8_doc = self._download_webpage(
1954 manifest_url, video_id, fatal=fatal, data=data, headers=headers,
1955 note=False, errnote='Failed to download m3u8 playlist information
')
1956 if m3u8_doc is False:
1958 return range(1 + sum(line.startswith('#EXT-X-DISCONTINUITY') for line in m3u8_doc.splitlines()))
1961 def _extract_m3u8_playlist_indices(*args
, **kwargs
):
1965 # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
1966 # 2. https://github.com/ytdl-org/youtube-dl/issues/12211
1967 # 3. https://github.com/ytdl-org/youtube-dl/issues/18923
1969 # We should try extracting formats only from master playlists [1, 4.3.4],
1970 # i.e. playlists that describe available qualities. On the other hand
1971 # media playlists [1, 4.3.3] should be returned as is since they contain
1972 # just the media without qualities renditions.
1973 # Fortunately, master playlist can be easily distinguished from media
1974 # playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
1975 # master playlist tags MUST NOT appear in a media playlist and vice versa.
1976 # As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
1977 # media playlist and MUST NOT appear in master playlist thus we can
1978 # clearly detect media playlist with this criterion.
1980 if '#EXT-X-TARGETDURATION' in m3u8_doc
: # media playlist, return as is
1982 'format_id': join_nonempty(m3u8_id
, idx
),
1983 'format_index': idx
,
1984 'url': m3u8_url
or encode_data_uri(m3u8_doc
.encode('utf-8'), 'application/x-mpegurl'),
1986 'protocol': entry_protocol
,
1987 'preference': preference
,
1990 } for idx
in _extract_m3u8_playlist_indices(m3u8_doc
=m3u8_doc
)]
1992 return formats
, subtitles
1995 last_stream_inf
= {}
1997 def extract_media(x_media_line
):
1998 media
= parse_m3u8_attributes(x_media_line
)
1999 # As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
2000 media_type
, group_id
, name
= media
.get('TYPE'), media
.get('GROUP-ID'), media
.get('NAME')
2001 if not (media_type
and group_id
and name
):
2003 groups
.setdefault(group_id
, []).append(media
)
2004 # <https://tools.ietf.org/html/rfc8216#section-4.3.4.1>
2005 if media_type
== 'SUBTITLES':
2006 # According to RFC 8216 §4.3.4.2.1, URI is REQUIRED in the
2007 # EXT-X-MEDIA tag if the media type is SUBTITLES.
2008 # However, lack of URI has been spotted in the wild.
2009 # e.g. NebulaIE; see https://github.com/yt-dlp/yt-dlp/issues/339
2010 if not media
.get('URI'):
2012 url
= format_url(media
['URI'])
2015 'ext': determine_ext(url
),
2017 if sub_info
['ext'] == 'm3u8':
2018 # Per RFC 8216 §3.1, the only possible subtitle format m3u8
2019 # files may contain is WebVTT:
2020 # <https://tools.ietf.org/html/rfc8216#section-3.1>
2021 sub_info
['ext'] = 'vtt'
2022 sub_info
['protocol'] = 'm3u8_native'
2023 lang
= media
.get('LANGUAGE') or 'und'
2024 subtitles
.setdefault(lang
, []).append(sub_info
)
2025 if media_type
not in ('VIDEO', 'AUDIO'):
2027 media_url
= media
.get('URI')
2029 manifest_url
= format_url(media_url
)
2031 'format_id': join_nonempty(m3u8_id
, group_id
, name
, idx
),
2032 'format_note': name
,
2033 'format_index': idx
,
2034 'url': manifest_url
,
2035 'manifest_url': m3u8_url
,
2036 'language': media
.get('LANGUAGE'),
2038 'protocol': entry_protocol
,
2039 'preference': preference
,
2041 'vcodec': 'none' if media_type
== 'AUDIO' else None,
2042 } for idx
in _extract_m3u8_playlist_indices(manifest_url
))
2044 def build_stream_name():
2045 # Despite specification does not mention NAME attribute for
2046 # EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
2047 # or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
2048 # 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
2049 stream_name
= last_stream_inf
.get('NAME')
2052 # If there is no NAME in EXT-X-STREAM-INF it will be obtained
2053 # from corresponding rendition group
2054 stream_group_id
= last_stream_inf
.get('VIDEO')
2055 if not stream_group_id
:
2057 stream_group
= groups
.get(stream_group_id
)
2058 if not stream_group
:
2059 return stream_group_id
2060 rendition
= stream_group
[0]
2061 return rendition
.get('NAME') or stream_group_id
2063 # parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
2064 # chance to detect video only formats when EXT-X-STREAM-INF tags
2065 # precede EXT-X-MEDIA tags in HLS manifest such as [3].
2066 for line
in m3u8_doc
.splitlines():
2067 if line
.startswith('#EXT-X-MEDIA:'):
2070 for line
in m3u8_doc
.splitlines():
2071 if line
.startswith('#EXT-X-STREAM-INF:'):
2072 last_stream_inf
= parse_m3u8_attributes(line
)
2073 elif line
.startswith('#') or not line
.strip():
2076 tbr
= float_or_none(
2077 last_stream_inf
.get('AVERAGE-BANDWIDTH')
2078 or last_stream_inf
.get('BANDWIDTH'), scale
=1000)
2079 manifest_url
= format_url(line
.strip())
2081 for idx
in _extract_m3u8_playlist_indices(manifest_url
):
2082 format_id
= [m3u8_id
, None, idx
]
2083 # Bandwidth of live streams may differ over time thus making
2084 # format_id unpredictable. So it's better to keep provided
2087 stream_name
= build_stream_name()
2088 format_id
[1] = stream_name
or '%d' % (tbr
or len(formats
))
2090 'format_id': join_nonempty(*format_id
),
2091 'format_index': idx
,
2092 'url': manifest_url
,
2093 'manifest_url': m3u8_url
,
2096 'fps': float_or_none(last_stream_inf
.get('FRAME-RATE')),
2097 'protocol': entry_protocol
,
2098 'preference': preference
,
2101 resolution
= last_stream_inf
.get('RESOLUTION')
2103 mobj
= re
.search(r
'(?P<width>\d+)[xX](?P<height>\d+)', resolution
)
2105 f
['width'] = int(mobj
.group('width'))
2106 f
['height'] = int(mobj
.group('height'))
2107 # Unified Streaming Platform
2109 r
'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f
['url'])
2111 abr
, vbr
= mobj
.groups()
2112 abr
, vbr
= float_or_none(abr
, 1000), float_or_none(vbr
, 1000)
2117 codecs
= parse_codecs(last_stream_inf
.get('CODECS'))
2119 audio_group_id
= last_stream_inf
.get('AUDIO')
2120 # As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
2121 # references a rendition group MUST have a CODECS attribute.
2122 # However, this is not always respected. E.g. [2]
2123 # contains EXT-X-STREAM-INF tag which references AUDIO
2124 # rendition group but does not have CODECS and despite
2125 # referencing an audio group it represents a complete
2126 # (with audio and video) format. So, for such cases we will
2127 # ignore references to rendition groups and treat them
2128 # as complete formats.
2129 if audio_group_id
and codecs
and f
.get('vcodec') != 'none':
2130 audio_group
= groups
.get(audio_group_id
)
2131 if audio_group
and audio_group
[0].get('URI'):
2132 # TODO: update acodec for audio only formats with
2134 f
['acodec'] = 'none'
2135 if not f
.get('ext'):
2136 f
['ext'] = 'm4a' if f
.get('vcodec') == 'none' else 'mp4'
2140 progressive_uri
= last_stream_inf
.get('PROGRESSIVE-URI')
2143 del http_f
['manifest_url']
2145 'format_id': f
['format_id'].replace('hls-', 'http-'),
2147 'url': progressive_uri
,
2149 formats
.append(http_f
)
2151 last_stream_inf
= {}
2152 return formats
, subtitles
2154 def _extract_m3u8_vod_duration(
2155 self
, m3u8_vod_url
, video_id
, note
=None, errnote
=None, data
=None, headers
={}, query={}
):
2157 m3u8_vod
= self
._download
_webpage
(
2158 m3u8_vod_url
, video_id
,
2159 note
='Downloading m3u8 VOD manifest' if note
is None else note
,
2160 errnote
='Failed to download VOD manifest' if errnote
is None else errnote
,
2161 fatal
=False, data
=data
, headers
=headers
, query
=query
)
2163 return self
._parse
_m
3u8_vod
_duration
(m3u8_vod
or '', video_id
)
2165 def _parse_m3u8_vod_duration(self
, m3u8_vod
, video_id
):
2166 if '#EXT-X-PLAYLIST-TYPE:VOD' not in m3u8_vod
:
2170 float(line
[len('#EXTINF:'):].split(',')[0])
2171 for line
in m3u8_vod
.splitlines() if line
.startswith('#EXTINF:'))) or None
2174 def _xpath_ns(path
, namespace
=None):
2178 for c
in path
.split('/'):
2179 if not c
or c
== '.':
2182 out
.append('{%s}%s' % (namespace
, c
))
2183 return '/'.join(out
)
2185 def _extract_smil_formats_and_subtitles(self
, smil_url
, video_id
, fatal
=True, f4m_params
=None, transform_source
=None):
2186 res
= self
._download
_smil
(smil_url
, video_id
, fatal
=fatal
, transform_source
=transform_source
)
2192 smil_url
= urlh
.geturl()
2194 namespace
= self
._parse
_smil
_namespace
(smil
)
2196 fmts
= self
._parse
_smil
_formats
(
2197 smil
, smil_url
, video_id
, namespace
=namespace
, f4m_params
=f4m_params
)
2198 subs
= self
._parse
_smil
_subtitles
(
2199 smil
, namespace
=namespace
)
2203 def _extract_smil_formats(self
, *args
, **kwargs
):
2204 fmts
, subs
= self
._extract
_smil
_formats
_and
_subtitles
(*args
, **kwargs
)
2206 self
._report
_ignoring
_subs
('SMIL')
2209 def _extract_smil_info(self
, smil_url
, video_id
, fatal
=True, f4m_params
=None):
2210 res
= self
._download
_smil
(smil_url
, video_id
, fatal
=fatal
)
2215 smil_url
= urlh
.geturl()
2217 return self
._parse
_smil
(smil
, smil_url
, video_id
, f4m_params
=f4m_params
)
2219 def _download_smil(self
, smil_url
, video_id
, fatal
=True, transform_source
=None):
2220 return self
._download
_xml
_handle
(
2221 smil_url
, video_id
, 'Downloading SMIL file',
2222 'Unable to download SMIL file', fatal
=fatal
, transform_source
=transform_source
)
2224 def _parse_smil(self
, smil
, smil_url
, video_id
, f4m_params
=None):
2225 namespace
= self
._parse
_smil
_namespace
(smil
)
2227 formats
= self
._parse
_smil
_formats
(
2228 smil
, smil_url
, video_id
, namespace
=namespace
, f4m_params
=f4m_params
)
2229 subtitles
= self
._parse
_smil
_subtitles
(smil
, namespace
=namespace
)
2231 video_id
= os
.path
.splitext(url_basename(smil_url
))[0]
2235 for meta
in smil
.findall(self
._xpath
_ns
('./head/meta', namespace
)):
2236 name
= meta
.attrib
.get('name')
2237 content
= meta
.attrib
.get('content')
2238 if not name
or not content
:
2240 if not title
and name
== 'title':
2242 elif not description
and name
in ('description', 'abstract'):
2243 description
= content
2244 elif not upload_date
and name
== 'date':
2245 upload_date
= unified_strdate(content
)
2248 'id': image
.get('type'),
2249 'url': image
.get('src'),
2250 'width': int_or_none(image
.get('width')),
2251 'height': int_or_none(image
.get('height')),
2252 } for image
in smil
.findall(self
._xpath
_ns
('.//image', namespace
)) if image
.get('src')]
2256 'title': title
or video_id
,
2257 'description': description
,
2258 'upload_date': upload_date
,
2259 'thumbnails': thumbnails
,
2261 'subtitles': subtitles
,
2264 def _parse_smil_namespace(self
, smil
):
2265 return self
._search
_regex
(
2266 r
'(?i)^{([^}]+)?}smil$', smil
.tag
, 'namespace', default
=None)
2268 def _parse_smil_formats(self
, smil
, smil_url
, video_id
, namespace
=None, f4m_params
=None, transform_rtmp_url
=None):
2270 for meta
in smil
.findall(self
._xpath
_ns
('./head/meta', namespace
)):
2271 b
= meta
.get('base') or meta
.get('httpBase')
2283 media
= smil
.findall(self
._xpath
_ns
('.//video', namespace
)) + smil
.findall(self
._xpath
_ns
('.//audio', namespace
))
2284 for medium
in media
:
2285 src
= medium
.get('src')
2286 if not src
or src
in srcs
:
2290 bitrate
= float_or_none(medium
.get('system-bitrate') or medium
.get('systemBitrate'), 1000)
2291 filesize
= int_or_none(medium
.get('size') or medium
.get('fileSize'))
2292 width
= int_or_none(medium
.get('width'))
2293 height
= int_or_none(medium
.get('height'))
2294 proto
= medium
.get('proto')
2295 ext
= medium
.get('ext')
2296 src_ext
= determine_ext(src
)
2297 streamer
= medium
.get('streamer') or base
2299 if proto
== 'rtmp' or streamer
.startswith('rtmp'):
2305 'format_id': 'rtmp-%d' % (rtmp_count
if bitrate
is None else bitrate
),
2307 'filesize': filesize
,
2311 if transform_rtmp_url
:
2312 streamer
, src
= transform_rtmp_url(streamer
, src
)
2313 formats
[-1].update({
2319 src_url
= src
if src
.startswith('http') else urllib
.parse
.urljoin(base
, src
)
2320 src_url
= src_url
.strip()
2322 if proto
== 'm3u8' or src_ext
== 'm3u8':
2323 m3u8_formats
= self
._extract
_m
3u8_formats
(
2324 src_url
, video_id
, ext
or 'mp4', m3u8_id
='hls', fatal
=False)
2325 if len(m3u8_formats
) == 1:
2327 m3u8_formats
[0].update({
2328 'format_id': 'hls-%d' % (m3u8_count
if bitrate
is None else bitrate
),
2333 formats
.extend(m3u8_formats
)
2334 elif src_ext
== 'f4m':
2339 'plugin': 'flowplayer-3.2.0.1',
2341 f4m_url
+= '&' if '?' in f4m_url
else '?'
2342 f4m_url
+= urllib
.parse
.urlencode(f4m_params
)
2343 formats
.extend(self
._extract
_f
4m
_formats
(f4m_url
, video_id
, f4m_id
='hds', fatal
=False))
2344 elif src_ext
== 'mpd':
2345 formats
.extend(self
._extract
_mpd
_formats
(
2346 src_url
, video_id
, mpd_id
='dash', fatal
=False))
2347 elif re
.search(r
'\.ism/[Mm]anifest', src_url
):
2348 formats
.extend(self
._extract
_ism
_formats
(
2349 src_url
, video_id
, ism_id
='mss', fatal
=False))
2350 elif src_url
.startswith('http') and self
._is
_valid
_url
(src
, video_id
):
2354 'ext': ext
or src_ext
or 'flv',
2355 'format_id': 'http-%d' % (bitrate
or http_count
),
2357 'filesize': filesize
,
2362 for medium
in smil
.findall(self
._xpath
_ns
('.//imagestream', namespace
)):
2363 src
= medium
.get('src')
2364 if not src
or src
in srcs
:
2370 'format_id': 'imagestream-%d' % (imgs_count
),
2372 'ext': mimetype2ext(medium
.get('type')),
2375 'width': int_or_none(medium
.get('width')),
2376 'height': int_or_none(medium
.get('height')),
2377 'format_note': 'SMIL storyboards',
2382 def _parse_smil_subtitles(self
, smil
, namespace
=None, subtitles_lang
='en'):
2385 for num
, textstream
in enumerate(smil
.findall(self
._xpath
_ns
('.//textstream', namespace
))):
2386 src
= textstream
.get('src')
2387 if not src
or src
in urls
:
2390 ext
= textstream
.get('ext') or mimetype2ext(textstream
.get('type')) or determine_ext(src
)
2391 lang
= textstream
.get('systemLanguage') or textstream
.get('systemLanguageName') or textstream
.get('lang') or subtitles_lang
2392 subtitles
.setdefault(lang
, []).append({
2398 def _extract_xspf_playlist(self
, xspf_url
, playlist_id
, fatal
=True):
2399 res
= self
._download
_xml
_handle
(
2400 xspf_url
, playlist_id
, 'Downloading xpsf playlist',
2401 'Unable to download xspf manifest', fatal
=fatal
)
2406 xspf_url
= urlh
.geturl()
2408 return self
._parse
_xspf
(
2409 xspf
, playlist_id
, xspf_url
=xspf_url
,
2410 xspf_base_url
=base_url(xspf_url
))
2412 def _parse_xspf(self
, xspf_doc
, playlist_id
, xspf_url
=None, xspf_base_url
=None):
2414 'xspf': 'http://xspf.org/ns/0/',
2415 's1': 'http://static.streamone.nl/player/ns/0',
2419 for track
in xspf_doc
.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP
)):
2421 track
, xpath_with_ns('./xspf:title', NS_MAP
), 'title', default
=playlist_id
)
2422 description
= xpath_text(
2423 track
, xpath_with_ns('./xspf:annotation', NS_MAP
), 'description')
2424 thumbnail
= xpath_text(
2425 track
, xpath_with_ns('./xspf:image', NS_MAP
), 'thumbnail')
2426 duration
= float_or_none(
2427 xpath_text(track
, xpath_with_ns('./xspf:duration', NS_MAP
), 'duration'), 1000)
2430 for location
in track
.findall(xpath_with_ns('./xspf:location', NS_MAP
)):
2431 format_url
= urljoin(xspf_base_url
, location
.text
)
2436 'manifest_url': xspf_url
,
2437 'format_id': location
.get(xpath_with_ns('s1:label', NS_MAP
)),
2438 'width': int_or_none(location
.get(xpath_with_ns('s1:width', NS_MAP
))),
2439 'height': int_or_none(location
.get(xpath_with_ns('s1:height', NS_MAP
))),
2445 'description': description
,
2446 'thumbnail': thumbnail
,
2447 'duration': duration
,
2452 def _extract_mpd_formats(self
, *args
, **kwargs
):
2453 fmts
, subs
= self
._extract
_mpd
_formats
_and
_subtitles
(*args
, **kwargs
)
2455 self
._report
_ignoring
_subs
('DASH')
2458 def _extract_mpd_formats_and_subtitles(
2459 self
, mpd_url
, video_id
, mpd_id
=None, note
=None, errnote
=None,
2460 fatal
=True, data
=None, headers
={}, query={}
):
2461 res
= self
._download
_xml
_handle
(
2463 note
='Downloading MPD manifest' if note
is None else note
,
2464 errnote
='Failed to download MPD manifest' if errnote
is None else errnote
,
2465 fatal
=fatal
, data
=data
, headers
=headers
, query
=query
)
2472 # We could have been redirected to a new url when we retrieved our mpd file.
2473 mpd_url
= urlh
.geturl()
2474 mpd_base_url
= base_url(mpd_url
)
2476 return self
._parse
_mpd
_formats
_and
_subtitles
(
2477 mpd_doc
, mpd_id
, mpd_base_url
, mpd_url
)
2479 def _parse_mpd_formats(self
, *args
, **kwargs
):
2480 fmts
, subs
= self
._parse
_mpd
_formats
_and
_subtitles
(*args
, **kwargs
)
2482 self
._report
_ignoring
_subs
('DASH')
2485 def _parse_mpd_formats_and_subtitles(
2486 self
, mpd_doc
, mpd_id
=None, mpd_base_url
='', mpd_url
=None):
2488 Parse formats from MPD manifest.
2490 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
2491 http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2492 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
2494 if not self
.get_param('dynamic_mpd', True):
2495 if mpd_doc
.get('type') == 'dynamic':
2498 namespace
= self
._search
_regex
(r
'(?i)^{([^}]+)?}MPD$', mpd_doc
.tag
, 'namespace', default
=None)
2501 return self
._xpath
_ns
(path
, namespace
)
2503 def is_drm_protected(element
):
2504 return element
.find(_add_ns('ContentProtection')) is not None
2506 def extract_multisegment_info(element
, ms_parent_info
):
2507 ms_info
= ms_parent_info
.copy()
2509 # As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
2510 # common attributes and elements. We will only extract relevant
2512 def extract_common(source
):
2513 segment_timeline
= source
.find(_add_ns('SegmentTimeline'))
2514 if segment_timeline
is not None:
2515 s_e
= segment_timeline
.findall(_add_ns('S'))
2517 ms_info
['total_number'] = 0
2520 r
= int(s
.get('r', 0))
2521 ms_info
['total_number'] += 1 + r
2522 ms_info
['s'].append({
2523 't': int(s
.get('t', 0)),
2524 # @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
2525 'd': int(s
.attrib
['d']),
2528 start_number
= source
.get('startNumber')
2530 ms_info
['start_number'] = int(start_number
)
2531 timescale
= source
.get('timescale')
2533 ms_info
['timescale'] = int(timescale
)
2534 segment_duration
= source
.get('duration')
2535 if segment_duration
:
2536 ms_info
['segment_duration'] = float(segment_duration
)
2538 def extract_Initialization(source
):
2539 initialization
= source
.find(_add_ns('Initialization'))
2540 if initialization
is not None:
2541 ms_info
['initialization_url'] = initialization
.attrib
['sourceURL']
2543 segment_list
= element
.find(_add_ns('SegmentList'))
2544 if segment_list
is not None:
2545 extract_common(segment_list
)
2546 extract_Initialization(segment_list
)
2547 segment_urls_e
= segment_list
.findall(_add_ns('SegmentURL'))
2549 ms_info
['segment_urls'] = [segment
.attrib
['media'] for segment
in segment_urls_e
]
2551 segment_template
= element
.find(_add_ns('SegmentTemplate'))
2552 if segment_template
is not None:
2553 extract_common(segment_template
)
2554 media
= segment_template
.get('media')
2556 ms_info
['media'] = media
2557 initialization
= segment_template
.get('initialization')
2559 ms_info
['initialization'] = initialization
2561 extract_Initialization(segment_template
)
2564 mpd_duration
= parse_duration(mpd_doc
.get('mediaPresentationDuration'))
2565 formats
, subtitles
= [], {}
2566 stream_numbers
= collections
.defaultdict(int)
2567 for period
in mpd_doc
.findall(_add_ns('Period')):
2568 period_duration
= parse_duration(period
.get('duration')) or mpd_duration
2569 period_ms_info
= extract_multisegment_info(period
, {
2573 for adaptation_set
in period
.findall(_add_ns('AdaptationSet')):
2574 adaption_set_ms_info
= extract_multisegment_info(adaptation_set
, period_ms_info
)
2575 for representation
in adaptation_set
.findall(_add_ns('Representation')):
2576 representation_attrib
= adaptation_set
.attrib
.copy()
2577 representation_attrib
.update(representation
.attrib
)
2578 # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
2579 mime_type
= representation_attrib
['mimeType']
2580 content_type
= representation_attrib
.get('contentType', mime_type
.split('/')[0])
2582 codec_str
= representation_attrib
.get('codecs', '')
2583 # Some kind of binary subtitle found in some youtube livestreams
2584 if mime_type
== 'application/x-rawcc':
2585 codecs
= {'scodec': codec_str}
2587 codecs
= parse_codecs(codec_str
)
2588 if content_type
not in ('video', 'audio', 'text'):
2589 if mime_type
== 'image/jpeg':
2590 content_type
= mime_type
2591 elif codecs
.get('vcodec', 'none') != 'none':
2592 content_type
= 'video'
2593 elif codecs
.get('acodec', 'none') != 'none':
2594 content_type
= 'audio'
2595 elif codecs
.get('scodec', 'none') != 'none':
2596 content_type
= 'text'
2597 elif mimetype2ext(mime_type
) in ('tt', 'dfxp', 'ttml', 'xml', 'json'):
2598 content_type
= 'text'
2600 self
.report_warning('Unknown MIME type %s in DASH manifest' % mime_type
)
2604 for element
in (representation
, adaptation_set
, period
, mpd_doc
):
2605 base_url_e
= element
.find(_add_ns('BaseURL'))
2606 if try_call(lambda: base_url_e
.text
) is not None:
2607 base_url
= base_url_e
.text
+ base_url
2608 if re
.match(r
'^https?://', base_url
):
2610 if mpd_base_url
and base_url
.startswith('/'):
2611 base_url
= urllib
.parse
.urljoin(mpd_base_url
, base_url
)
2612 elif mpd_base_url
and not re
.match(r
'^https?://', base_url
):
2613 if not mpd_base_url
.endswith('/'):
2615 base_url
= mpd_base_url
+ base_url
2616 representation_id
= representation_attrib
.get('id')
2617 lang
= representation_attrib
.get('lang')
2618 url_el
= representation
.find(_add_ns('BaseURL'))
2619 filesize
= int_or_none(url_el
.attrib
.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el
is not None else None)
2620 bandwidth
= int_or_none(representation_attrib
.get('bandwidth'))
2621 if representation_id
is not None:
2622 format_id
= representation_id
2624 format_id
= content_type
2626 format_id
= mpd_id
+ '-' + format_id
2627 if content_type
in ('video', 'audio'):
2629 'format_id': format_id
,
2630 'manifest_url': mpd_url
,
2631 'ext': mimetype2ext(mime_type
),
2632 'width': int_or_none(representation_attrib
.get('width')),
2633 'height': int_or_none(representation_attrib
.get('height')),
2634 'tbr': float_or_none(bandwidth
, 1000),
2635 'asr': int_or_none(representation_attrib
.get('audioSamplingRate')),
2636 'fps': int_or_none(representation_attrib
.get('frameRate')),
2637 'language': lang
if lang
not in ('mul', 'und', 'zxx', 'mis') else None,
2638 'format_note': 'DASH %s' % content_type
,
2639 'filesize': filesize
,
2640 'container': mimetype2ext(mime_type
) + '_dash',
2643 elif content_type
== 'text':
2645 'ext': mimetype2ext(mime_type
),
2646 'manifest_url': mpd_url
,
2647 'filesize': filesize
,
2649 elif content_type
== 'image/jpeg':
2650 # See test case in VikiIE
2651 # https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1
2653 'format_id': format_id
,
2655 'manifest_url': mpd_url
,
2656 'format_note': 'DASH storyboards (jpeg)',
2660 if is_drm_protected(adaptation_set
) or is_drm_protected(representation
):
2662 representation_ms_info
= extract_multisegment_info(representation
, adaption_set_ms_info
)
2664 def prepare_template(template_name
, identifiers
):
2665 tmpl
= representation_ms_info
[template_name
]
2666 if representation_id
is not None:
2667 tmpl
= tmpl
.replace('$RepresentationID$', representation_id
)
2668 # First of, % characters outside $...$ templates
2669 # must be escaped by doubling for proper processing
2670 # by % operator string formatting used further (see
2671 # https://github.com/ytdl-org/youtube-dl/issues/16867).
2677 in_template
= not in_template
2678 elif c
== '%' and not in_template
:
2680 # Next, $...$ templates are translated to their
2681 # %(...) counterparts to be used with % operator
2682 t
= re
.sub(r
'\$(%s)\$' % '|'.join(identifiers
), r
'%(\1)d', t
)
2683 t
= re
.sub(r
'\$(%s)%%([^$]+)\$' % '|'.join(identifiers
), r
'%(\1)\2', t
)
2684 t
.replace('$$', '$')
2687 # @initialization is a regular template like @media one
2688 # so it should be handled just the same way (see
2689 # https://github.com/ytdl-org/youtube-dl/issues/11605)
2690 if 'initialization' in representation_ms_info
:
2691 initialization_template
= prepare_template(
2693 # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
2694 # $Time$ shall not be included for @initialization thus
2695 # only $Bandwidth$ remains
2697 representation_ms_info
['initialization_url'] = initialization_template
% {
2698 'Bandwidth': bandwidth
,
2701 def location_key(location
):
2702 return 'url' if re
.match(r
'^https?://', location
) else 'path'
2704 if 'segment_urls' not in representation_ms_info
and 'media' in representation_ms_info
:
2706 media_template
= prepare_template('media', ('Number', 'Bandwidth', 'Time'))
2707 media_location_key
= location_key(media_template
)
2709 # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
2710 # can't be used at the same time
2711 if '%(Number' in media_template
and 's' not in representation_ms_info
:
2712 segment_duration
= None
2713 if 'total_number' not in representation_ms_info
and 'segment_duration' in representation_ms_info
:
2714 segment_duration
= float_or_none(representation_ms_info
['segment_duration'], representation_ms_info
['timescale'])
2715 representation_ms_info
['total_number'] = int(math
.ceil(
2716 float_or_none(period_duration
, segment_duration
, default
=0)))
2717 representation_ms_info
['fragments'] = [{
2718 media_location_key
: media_template
% {
2719 'Number': segment_number
,
2720 'Bandwidth': bandwidth
,
2722 'duration': segment_duration
,
2723 } for segment_number
in range(
2724 representation_ms_info
['start_number'],
2725 representation_ms_info
['total_number'] + representation_ms_info
['start_number'])]
2727 # $Number*$ or $Time$ in media template with S list available
2728 # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
2729 # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
2730 representation_ms_info
['fragments'] = []
2733 segment_number
= representation_ms_info
['start_number']
2735 def add_segment_url():
2736 segment_url
= media_template
% {
2737 'Time': segment_time
,
2738 'Bandwidth': bandwidth
,
2739 'Number': segment_number
,
2741 representation_ms_info
['fragments'].append({
2742 media_location_key
: segment_url
,
2743 'duration': float_or_none(segment_d
, representation_ms_info
['timescale']),
2746 for num
, s
in enumerate(representation_ms_info
['s']):
2747 segment_time
= s
.get('t') or segment_time
2751 for r
in range(s
.get('r', 0)):
2752 segment_time
+= segment_d
2755 segment_time
+= segment_d
2756 elif 'segment_urls' in representation_ms_info
and 's' in representation_ms_info
:
2757 # No media template,
2758 # e.g. https://www.youtube.com/watch?v=iXZV5uAYMJI
2759 # or any YouTube dashsegments video
2762 timescale
= representation_ms_info
['timescale']
2763 for s
in representation_ms_info
['s']:
2764 duration
= float_or_none(s
['d'], timescale
)
2765 for r
in range(s
.get('r', 0) + 1):
2766 segment_uri
= representation_ms_info
['segment_urls'][segment_index
]
2768 location_key(segment_uri
): segment_uri
,
2769 'duration': duration
,
2772 representation_ms_info
['fragments'] = fragments
2773 elif 'segment_urls' in representation_ms_info
:
2774 # Segment URLs with no SegmentTimeline
2775 # E.g. https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
2776 # https://github.com/ytdl-org/youtube-dl/pull/14844
2778 segment_duration
= float_or_none(
2779 representation_ms_info
['segment_duration'],
2780 representation_ms_info
['timescale']) if 'segment_duration' in representation_ms_info
else None
2781 for segment_url
in representation_ms_info
['segment_urls']:
2783 location_key(segment_url
): segment_url
,
2785 if segment_duration
:
2786 fragment
['duration'] = segment_duration
2787 fragments
.append(fragment
)
2788 representation_ms_info
['fragments'] = fragments
2789 # If there is a fragments key available then we correctly recognized fragmented media.
2790 # Otherwise we will assume unfragmented media with direct access. Technically, such
2791 # assumption is not necessarily correct since we may simply have no support for
2792 # some forms of fragmented media renditions yet, but for now we'll use this fallback.
2793 if 'fragments' in representation_ms_info
:
2795 # NB: mpd_url may be empty when MPD manifest is parsed from a string
2796 'url': mpd_url
or base_url
,
2797 'fragment_base_url': base_url
,
2799 'protocol': 'http_dash_segments' if mime_type
!= 'image/jpeg' else 'mhtml',
2801 if 'initialization_url' in representation_ms_info
:
2802 initialization_url
= representation_ms_info
['initialization_url']
2803 if not f
.get('url'):
2804 f
['url'] = initialization_url
2805 f
['fragments'].append({location_key(initialization_url): initialization_url}
)
2806 f
['fragments'].extend(representation_ms_info
['fragments'])
2807 if not period_duration
:
2808 period_duration
= try_get(
2809 representation_ms_info
,
2810 lambda r
: sum(frag
['duration'] for frag
in r
['fragments']), float)
2812 # Assuming direct URL to unfragmented media.
2814 if content_type
in ('video', 'audio', 'image/jpeg'):
2815 f
['manifest_stream_number'] = stream_numbers
[f
['url']]
2816 stream_numbers
[f
['url']] += 1
2818 elif content_type
== 'text':
2819 subtitles
.setdefault(lang
or 'und', []).append(f
)
2821 return formats
, subtitles
2823 def _extract_ism_formats(self
, *args
, **kwargs
):
2824 fmts
, subs
= self
._extract
_ism
_formats
_and
_subtitles
(*args
, **kwargs
)
2826 self
._report
_ignoring
_subs
('ISM')
2829 def _extract_ism_formats_and_subtitles(self
, ism_url
, video_id
, ism_id
=None, note
=None, errnote
=None, fatal
=True, data
=None, headers
={}, query={}
):
2830 res
= self
._download
_xml
_handle
(
2832 note
='Downloading ISM manifest' if note
is None else note
,
2833 errnote
='Failed to download ISM manifest' if errnote
is None else errnote
,
2834 fatal
=fatal
, data
=data
, headers
=headers
, query
=query
)
2841 return self
._parse
_ism
_formats
_and
_subtitles
(ism_doc
, urlh
.geturl(), ism_id
)
2843 def _parse_ism_formats_and_subtitles(self
, ism_doc
, ism_url
, ism_id
=None):
2845 Parse formats from ISM manifest.
2847 1. [MS-SSTR]: Smooth Streaming Protocol,
2848 https://msdn.microsoft.com/en-us/library/ff469518.aspx
2850 if ism_doc
.get('IsLive') == 'TRUE':
2853 duration
= int(ism_doc
.attrib
['Duration'])
2854 timescale
= int_or_none(ism_doc
.get('TimeScale')) or 10000000
2858 for stream
in ism_doc
.findall('StreamIndex'):
2859 stream_type
= stream
.get('Type')
2860 if stream_type
not in ('video', 'audio', 'text'):
2862 url_pattern
= stream
.attrib
['Url']
2863 stream_timescale
= int_or_none(stream
.get('TimeScale')) or timescale
2864 stream_name
= stream
.get('Name')
2865 stream_language
= stream
.get('Language', 'und')
2866 for track
in stream
.findall('QualityLevel'):
2867 KNOWN_TAGS
= {'255': 'AACL', '65534': 'EC-3'}
2868 fourcc
= track
.get('FourCC') or KNOWN_TAGS
.get(track
.get('AudioTag'))
2869 # TODO: add support for WVC1 and WMAP
2870 if fourcc
not in ('H264', 'AVC1', 'AACL', 'TTML', 'EC-3'):
2871 self
.report_warning('%s is not a supported codec' % fourcc
)
2873 tbr
= int(track
.attrib
['Bitrate']) // 1000
2874 # [1] does not mention Width and Height attributes. However,
2875 # they're often present while MaxWidth and MaxHeight are
2876 # missing, so should be used as fallbacks
2877 width
= int_or_none(track
.get('MaxWidth') or track
.get('Width'))
2878 height
= int_or_none(track
.get('MaxHeight') or track
.get('Height'))
2879 sampling_rate
= int_or_none(track
.get('SamplingRate'))
2881 track_url_pattern
= re
.sub(r
'{[Bb]itrate}', track
.attrib
['Bitrate'], url_pattern
)
2882 track_url_pattern
= urllib
.parse
.urljoin(ism_url
, track_url_pattern
)
2888 stream_fragments
= stream
.findall('c')
2889 for stream_fragment_index
, stream_fragment
in enumerate(stream_fragments
):
2890 fragment_ctx
['time'] = int_or_none(stream_fragment
.get('t')) or fragment_ctx
['time']
2891 fragment_repeat
= int_or_none(stream_fragment
.get('r')) or 1
2892 fragment_ctx
['duration'] = int_or_none(stream_fragment
.get('d'))
2893 if not fragment_ctx
['duration']:
2895 next_fragment_time
= int(stream_fragment
[stream_fragment_index
+ 1].attrib
['t'])
2897 next_fragment_time
= duration
2898 fragment_ctx
['duration'] = (next_fragment_time
- fragment_ctx
['time']) / fragment_repeat
2899 for _
in range(fragment_repeat
):
2901 'url': re
.sub(r
'{start[ _]time}', str(fragment_ctx
['time']), track_url_pattern
),
2902 'duration': fragment_ctx
['duration'] / stream_timescale
,
2904 fragment_ctx
['time'] += fragment_ctx
['duration']
2906 if stream_type
== 'text':
2907 subtitles
.setdefault(stream_language
, []).append({
2911 'manifest_url': ism_url
,
2912 'fragments': fragments
,
2913 '_download_params': {
2914 'stream_type': stream_type
,
2915 'duration': duration
,
2916 'timescale': stream_timescale
,
2918 'language': stream_language
,
2919 'codec_private_data': track
.get('CodecPrivateData'),
2922 elif stream_type
in ('video', 'audio'):
2924 'format_id': join_nonempty(ism_id
, stream_name
, tbr
),
2926 'manifest_url': ism_url
,
2927 'ext': 'ismv' if stream_type
== 'video' else 'isma',
2931 'asr': sampling_rate
,
2932 'vcodec': 'none' if stream_type
== 'audio' else fourcc
,
2933 'acodec': 'none' if stream_type
== 'video' else fourcc
,
2935 'fragments': fragments
,
2936 'has_drm': ism_doc
.find('Protection') is not None,
2937 '_download_params': {
2938 'stream_type': stream_type
,
2939 'duration': duration
,
2940 'timescale': stream_timescale
,
2941 'width': width
or 0,
2942 'height': height
or 0,
2944 'language': stream_language
,
2945 'codec_private_data': track
.get('CodecPrivateData'),
2946 'sampling_rate': sampling_rate
,
2947 'channels': int_or_none(track
.get('Channels', 2)),
2948 'bits_per_sample': int_or_none(track
.get('BitsPerSample', 16)),
2949 'nal_unit_length_field': int_or_none(track
.get('NALUnitLengthField', 4)),
2952 return formats
, subtitles
2954 def _parse_html5_media_entries(self
, base_url
, webpage
, video_id
, m3u8_id
=None, m3u8_entry_protocol
='m3u8_native', mpd_id
=None, preference
=None, quality
=None):
2955 def absolute_url(item_url
):
2956 return urljoin(base_url
, item_url
)
2958 def parse_content_type(content_type
):
2959 if not content_type
:
2961 ctr
= re
.search(r
'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type
)
2963 mimetype
, codecs
= ctr
.groups()
2964 f
= parse_codecs(codecs
)
2965 f
['ext'] = mimetype2ext(mimetype
)
2969 def _media_formats(src
, cur_media_type
, type_info
=None):
2970 type_info
= type_info
or {}
2971 full_url
= absolute_url(src
)
2972 ext
= type_info
.get('ext') or determine_ext(full_url
)
2974 is_plain_url
= False
2975 formats
= self
._extract
_m
3u8_formats
(
2976 full_url
, video_id
, ext
='mp4',
2977 entry_protocol
=m3u8_entry_protocol
, m3u8_id
=m3u8_id
,
2978 preference
=preference
, quality
=quality
, fatal
=False)
2980 is_plain_url
= False
2981 formats
= self
._extract
_mpd
_formats
(
2982 full_url
, video_id
, mpd_id
=mpd_id
, fatal
=False)
2987 'vcodec': 'none' if cur_media_type
== 'audio' else None,
2990 return is_plain_url
, formats
2993 # amp-video and amp-audio are very similar to their HTML5 counterparts
2994 # so we will include them right here (see
2995 # https://www.ampproject.org/docs/reference/components/amp-video)
2996 # For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
2997 _MEDIA_TAG_NAME_RE
= r
'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
2998 media_tags
= [(media_tag
, media_tag_name
, media_type
, '')
2999 for media_tag
, media_tag_name
, media_type
3000 in re
.findall(r
'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE
, webpage
)]
3001 media_tags
.extend(re
.findall(
3002 # We only allow video|audio followed by a whitespace or '>'.
3003 # Allowing more characters may end up in significant slow down (see
3004 # https://github.com/ytdl-org/youtube-dl/issues/11979,
3005 # e.g. http://www.porntrex.com/maps/videositemap.xml).
3006 r
'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE
, webpage
))
3007 for media_tag
, _
, media_type
, media_content
in media_tags
:
3012 media_attributes
= extract_attributes(media_tag
)
3013 src
= strip_or_none(dict_get(media_attributes
, ('src', 'data-video-src', 'data-src', 'data-source')))
3015 f
= parse_content_type(media_attributes
.get('type'))
3016 _
, formats
= _media_formats(src
, media_type
, f
)
3017 media_info
['formats'].extend(formats
)
3018 media_info
['thumbnail'] = absolute_url(media_attributes
.get('poster'))
3020 for source_tag
in re
.findall(r
'<source[^>]+>', media_content
):
3021 s_attr
= extract_attributes(source_tag
)
3022 # data-video-src and data-src are non standard but seen
3023 # several times in the wild
3024 src
= strip_or_none(dict_get(s_attr
, ('src', 'data-video-src', 'data-src', 'data-source')))
3027 f
= parse_content_type(s_attr
.get('type'))
3028 is_plain_url
, formats
= _media_formats(src
, media_type
, f
)
3030 # width, height, res, label and title attributes are
3031 # all not standard but seen several times in the wild
3034 for lbl
in ('label', 'title')
3035 if str_or_none(s_attr
.get(lbl
))
3037 width
= int_or_none(s_attr
.get('width'))
3038 height
= (int_or_none(s_attr
.get('height'))
3039 or int_or_none(s_attr
.get('res')))
3040 if not width
or not height
:
3042 resolution
= parse_resolution(lbl
)
3045 width
= width
or resolution
.get('width')
3046 height
= height
or resolution
.get('height')
3048 tbr
= parse_bitrate(lbl
)
3057 'format_id': s_attr
.get('label') or s_attr
.get('title'),
3059 f
.update(formats
[0])
3060 media_info
['formats'].append(f
)
3062 media_info
['formats'].extend(formats
)
3063 for track_tag
in re
.findall(r
'<track[^>]+>', media_content
):
3064 track_attributes
= extract_attributes(track_tag
)
3065 kind
= track_attributes
.get('kind')
3066 if not kind
or kind
in ('subtitles', 'captions'):
3067 src
= strip_or_none(track_attributes
.get('src'))
3070 lang
= track_attributes
.get('srclang') or track_attributes
.get('lang') or track_attributes
.get('label')
3071 media_info
['subtitles'].setdefault(lang
, []).append({
3072 'url': absolute_url(src
),
3074 for f
in media_info
['formats']:
3075 f
.setdefault('http_headers', {})['Referer'] = base_url
3076 if media_info
['formats'] or media_info
['subtitles']:
3077 entries
.append(media_info
)
3080 def _extract_akamai_formats(self
, *args
, **kwargs
):
3081 fmts
, subs
= self
._extract
_akamai
_formats
_and
_subtitles
(*args
, **kwargs
)
3083 self
._report
_ignoring
_subs
('akamai')
3086 def _extract_akamai_formats_and_subtitles(self
, manifest_url
, video_id
, hosts
={}):
3087 signed
= 'hdnea=' in manifest_url
3089 # https://learn.akamai.com/en-us/webhelp/media-services-on-demand/stream-packaging-user-guide/GUID-BE6C0F73-1E06-483B-B0EA-57984B91B7F9.html
3090 manifest_url
= re
.sub(
3091 r
'(?:b=[\d,-]+|(?:__a__|attributes)=off|__b__=\d+)&?',
3092 '', manifest_url
).strip('?')
3097 hdcore_sign
= 'hdcore=3.7.0'
3098 f4m_url
= re
.sub(r
'(https?://[^/]+)/i/', r
'\1/z/', manifest_url
).replace('/master.m3u8', '/manifest.f4m')
3099 hds_host
= hosts
.get('hds')
3101 f4m_url
= re
.sub(r
'(https?://)[^/]+', r
'\1' + hds_host
, f4m_url
)
3102 if 'hdcore=' not in f4m_url
:
3103 f4m_url
+= ('&' if '?' in f4m_url
else '?') + hdcore_sign
3104 f4m_formats
= self
._extract
_f
4m
_formats
(
3105 f4m_url
, video_id
, f4m_id
='hds', fatal
=False)
3106 for entry
in f4m_formats
:
3107 entry
.update({'extra_param_to_segment_url': hdcore_sign}
)
3108 formats
.extend(f4m_formats
)
3110 m3u8_url
= re
.sub(r
'(https?://[^/]+)/z/', r
'\1/i/', manifest_url
).replace('/manifest.f4m', '/master.m3u8')
3111 hls_host
= hosts
.get('hls')
3113 m3u8_url
= re
.sub(r
'(https?://)[^/]+', r
'\1' + hls_host
, m3u8_url
)
3114 m3u8_formats
, m3u8_subtitles
= self
._extract
_m
3u8_formats
_and
_subtitles
(
3115 m3u8_url
, video_id
, 'mp4', 'm3u8_native',
3116 m3u8_id
='hls', fatal
=False)
3117 formats
.extend(m3u8_formats
)
3118 subtitles
= self
._merge
_subtitles
(subtitles
, m3u8_subtitles
)
3120 http_host
= hosts
.get('http')
3121 if http_host
and m3u8_formats
and not signed
:
3122 REPL_REGEX
= r
'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
3123 qualities
= re
.match(REPL_REGEX
, m3u8_url
).group(2).split(',')
3124 qualities_length
= len(qualities
)
3125 if len(m3u8_formats
) in (qualities_length
, qualities_length
+ 1):
3127 for f
in m3u8_formats
:
3128 if f
['vcodec'] != 'none':
3129 for protocol
in ('http', 'https'):
3131 del http_f
['manifest_url']
3133 REPL_REGEX
, protocol
+ fr
'://{http_host}/\g<1>{qualities[i]}\3', f
['url'])
3135 'format_id': http_f
['format_id'].replace('hls-', protocol
+ '-'),
3137 'protocol': protocol
,
3139 formats
.append(http_f
)
3142 return formats
, subtitles
3144 def _extract_wowza_formats(self
, url
, video_id
, m3u8_entry_protocol
='m3u8_native', skip_protocols
=[]):
3145 query
= urllib
.parse
.urlparse(url
).query
3146 url
= re
.sub(r
'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url
)
3148 r
'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url
)
3149 url_base
= mobj
.group('url')
3150 http_base_url
= '%s%s:%s' % ('http', mobj
.group('s') or '', url_base
)
3153 def manifest_url(manifest
):
3154 m_url
= f
'{http_base_url}/{manifest}'
3156 m_url
+= '?%s' % query
3159 if 'm3u8' not in skip_protocols
:
3160 formats
.extend(self
._extract
_m
3u8_formats
(
3161 manifest_url('playlist.m3u8'), video_id
, 'mp4',
3162 m3u8_entry_protocol
, m3u8_id
='hls', fatal
=False))
3163 if 'f4m' not in skip_protocols
:
3164 formats
.extend(self
._extract
_f
4m
_formats
(
3165 manifest_url('manifest.f4m'),
3166 video_id
, f4m_id
='hds', fatal
=False))
3167 if 'dash' not in skip_protocols
:
3168 formats
.extend(self
._extract
_mpd
_formats
(
3169 manifest_url('manifest.mpd'),
3170 video_id
, mpd_id
='dash', fatal
=False))
3171 if re
.search(r
'(?:/smil:|\.smil)', url_base
):
3172 if 'smil' not in skip_protocols
:
3173 rtmp_formats
= self
._extract
_smil
_formats
(
3174 manifest_url('jwplayer.smil'),
3175 video_id
, fatal
=False)
3176 for rtmp_format
in rtmp_formats
:
3177 rtsp_format
= rtmp_format
.copy()
3178 rtsp_format
['url'] = '%s/%s' % (rtmp_format
['url'], rtmp_format
['play_path'])
3179 del rtsp_format
['play_path']
3180 del rtsp_format
['ext']
3181 rtsp_format
.update({
3182 'url': rtsp_format
['url'].replace('rtmp://', 'rtsp://'),
3183 'format_id': rtmp_format
['format_id'].replace('rtmp', 'rtsp'),
3186 formats
.extend([rtmp_format
, rtsp_format
])
3188 for protocol
in ('rtmp', 'rtsp'):
3189 if protocol
not in skip_protocols
:
3191 'url': f
'{protocol}:{url_base}',
3192 'format_id': protocol
,
3193 'protocol': protocol
,
3197 def _find_jwplayer_data(self
, webpage
, video_id
=None, transform_source
=js_to_json
):
3199 r
'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P
=quote
)\
)(?
!</script
>).*?\
.setup\s
*\
((?P
<options
>[^
)]+)\
)',
3203 jwplayer_data = self._parse_json(mobj.group('options
'),
3205 transform_source=transform_source)
3206 except ExtractorError:
3209 if isinstance(jwplayer_data, dict):
3210 return jwplayer_data
3212 def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
3213 jwplayer_data = self._find_jwplayer_data(
3214 webpage, video_id, transform_source=js_to_json)
3215 return self._parse_jwplayer_data(
3216 jwplayer_data, video_id, *args, **kwargs)
3218 def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
3219 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
3220 # JWPlayer backward compatibility: flattened playlists
3221 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
3222 if 'playlist
' not in jwplayer_data:
3223 jwplayer_data = {'playlist': [jwplayer_data]}
3227 # JWPlayer backward compatibility: single playlist item
3228 # https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
3229 if not isinstance(jwplayer_data['playlist
'], list):
3230 jwplayer_data['playlist
'] = [jwplayer_data['playlist
']]
3232 for video_data in jwplayer_data['playlist
']:
3233 # JWPlayer backward compatibility: flattened sources
3234 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
3235 if 'sources
' not in video_data:
3236 video_data['sources
'] = [video_data]
3238 this_video_id = video_id or video_data['mediaid
']
3240 formats = self._parse_jwplayer_formats(
3241 video_data['sources
'], video_id=this_video_id, m3u8_id=m3u8_id,
3242 mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
3245 tracks = video_data.get('tracks
')
3246 if tracks and isinstance(tracks, list):
3247 for track in tracks:
3248 if not isinstance(track, dict):
3250 track_kind = track.get('kind
')
3251 if not track_kind or not isinstance(track_kind, str):
3253 if track_kind.lower() not in ('captions
', 'subtitles
'):
3255 track_url = urljoin(base_url, track.get('file'))
3258 subtitles.setdefault(track.get('label
') or 'en
', []).append({
3259 'url
': self._proto_relative_url(track_url)
3263 'id': this_video_id,
3264 'title
': unescapeHTML(video_data['title
'] if require_title else video_data.get('title
')),
3265 'description
': clean_html(video_data.get('description
')),
3266 'thumbnail
': urljoin(base_url, self._proto_relative_url(video_data.get('image
'))),
3267 'timestamp
': int_or_none(video_data.get('pubdate
')),
3268 'duration
': float_or_none(jwplayer_data.get('duration
') or video_data.get('duration
')),
3269 'subtitles
': subtitles,
3271 # https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
3272 if len(formats) == 1 and re.search(r'^
(?
:http|
//).*(?
:youtube\
.com|youtu\
.be
)/.+', formats[0]['url
']):
3274 '_type
': 'url_transparent
',
3275 'url
': formats[0]['url
'],
3278 entry['formats
'] = formats
3279 entries.append(entry)
3280 if len(entries) == 1:
3283 return self.playlist_result(entries)
3285 def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
3286 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
3289 for source in jwplayer_sources_data:
3290 if not isinstance(source, dict):
3292 source_url = urljoin(
3293 base_url, self._proto_relative_url(source.get('file')))
3294 if not source_url or source_url in urls:
3296 urls.append(source_url)
3297 source_type = source.get('type') or ''
3298 ext = mimetype2ext(source_type) or determine_ext(source_url)
3299 if source_type == 'hls
' or ext == 'm3u8
':
3300 formats.extend(self._extract_m3u8_formats(
3301 source_url, video_id, 'mp4
', entry_protocol='m3u8_native
',
3302 m3u8_id=m3u8_id, fatal=False))
3303 elif source_type == 'dash
' or ext == 'mpd
':
3304 formats.extend(self._extract_mpd_formats(
3305 source_url, video_id, mpd_id=mpd_id, fatal=False))
3307 formats.extend(self._extract_smil_formats(
3308 source_url, video_id, fatal=False))
3309 # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
3310 elif source_type.startswith('audio
') or ext in (
3311 'oga
', 'aac
', 'mp3
', 'mpeg
', 'vorbis
'):
3318 height = int_or_none(source.get('height
'))
3320 # Often no height is provided but there is a label in
3321 # format like "1080p", "720p SD", or 1080.
3322 height = int_or_none(self._search_regex(
3323 r'^
(\d{3,4}
)[pP
]?
(?
:\b|$
)', str(source.get('label
') or ''),
3324 'height
', default=None))
3327 'width
': int_or_none(source.get('width
')),
3329 'tbr
': int_or_none(source.get('bitrate
'), scale=1000),
3330 'filesize
': int_or_none(source.get('filesize
')),
3333 if source_url.startswith('rtmp
'):
3334 a_format['ext
'] = 'flv
'
3335 # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
3336 # of jwplayer.flash.swf
3337 rtmp_url_parts = re.split(
3338 r'((?
:mp4|mp3|flv
):)', source_url, 1)
3339 if len(rtmp_url_parts) == 3:
3340 rtmp_url, prefix, play_path = rtmp_url_parts
3343 'play_path
': prefix + play_path,
3346 a_format.update(rtmp_params)
3347 formats.append(a_format)
3350 def _live_title(self, name):
3351 self._downloader.deprecation_warning('yt_dlp
.InfoExtractor
._live
_title
is deprecated
and does
not work
as expected
')
3354 def _int(self, v, name, fatal=False, **kwargs):
3355 res = int_or_none(v, **kwargs)
3357 msg = f'Failed to extract {name}
: Could
not parse value {v!r}
'
3359 raise ExtractorError(msg)
3361 self.report_warning(msg)
3364 def _float(self, v, name, fatal=False, **kwargs):
3365 res = float_or_none(v, **kwargs)
3367 msg = f'Failed to extract {name}
: Could
not parse value {v!r}
'
3369 raise ExtractorError(msg)
3371 self.report_warning(msg)
3374 def _set_cookie(self, domain, name, value, expire_time=None, port=None,
3375 path='/', secure=False, discard=False, rest={}, **kwargs):
3376 cookie = http.cookiejar.Cookie(
3377 0, name, value, port, port is not None, domain, True,
3378 domain.startswith('.'), path, True, secure, expire_time,
3379 discard, None, None, rest)
3380 self.cookiejar.set_cookie(cookie)
3382 def _get_cookies(self, url):
3383 """ Return a http.cookies.SimpleCookie with the cookies for the url """
3384 return LenientSimpleCookie(self._downloader._calc_cookies(url))
3386 def _apply_first_set_cookie_header(self, url_handle, cookie):
3388 Apply first Set-Cookie header instead of the last. Experimental.
3390 Some sites (e.g. [1-3]) may serve two cookies under the same name
3391 in Set-Cookie header and expect the first (old) one to be set rather
3392 than second (new). However, as of RFC6265 the newer one cookie
3393 should be set into cookie store what actually happens.
3394 We will workaround this issue by resetting the cookie to
3395 the first one manually.
3396 1. https://new.vk.com/
3397 2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3398 3. https://learning.oreilly.com/
3400 for header, cookies in url_handle.headers.items():
3401 if header.lower() != 'set-cookie
':
3403 cookies = cookies.encode('iso
-8859-1').decode('utf
-8')
3404 cookie_value = re.search(
3405 r'%s=(.+?
);.*?
\b[Dd
]omain
=(.+?
)(?
:[,;]|$
)' % cookie, cookies)
3407 value, domain = cookie_value.groups()
3408 self._set_cookie(domain, cookie, value)
3412 def get_testcases(cls, include_onlymatching=False):
3413 # Do not look in super classes
3414 t = vars(cls).get('_TEST
')
3416 assert not hasattr(cls, '_TESTS
'), f'{cls.ie_key()}IE has _TEST
and _TESTS
'
3419 tests = vars(cls).get('_TESTS
', [])
3421 if not include_onlymatching and t.get('only_matching
', False):
3423 t['name
'] = cls.ie_key()
3427 def get_webpage_testcases(cls):
3428 tests = vars(cls).get('_WEBPAGE_TESTS
', [])
3430 t['name
'] = cls.ie_key()
3433 @classproperty(cache=True)
3435 """Get age limit from the testcases"""
3436 return max(traverse_obj(
3437 (*cls.get_testcases(include_onlymatching=False), *cls.get_webpage_testcases()),
3438 (..., (('playlist
', 0), None), 'info_dict
', 'age_limit
')) or [0])
3440 @classproperty(cache=True)
3441 def _RETURN_TYPE(cls):
3442 """What the extractor returns: "video", "playlist", "any", or None (Unknown)"""
3443 tests = tuple(cls.get_testcases(include_onlymatching=False))
3446 elif not any(k.startswith('playlist
') for test in tests for k in test):
3448 elif all(any(k.startswith('playlist
') for k in test) for test in tests):
3453 def is_single_video(cls, url):
3454 """Returns whether the URL is of a single video, None if unknown"""
3455 assert cls.suitable(url), 'The URL must be suitable
for the extractor
'
3456 return {'video': True, 'playlist': False}.get(cls._RETURN_TYPE)
3459 def is_suitable(cls, age_limit):
3460 """Test whether the extractor is generally suitable for the given age limit"""
3461 return not age_restricted(cls.age_limit, age_limit)
3464 def description(cls, *, markdown=True, search_examples=None):
3465 """Description of the extractor"""
3467 if cls._NETRC_MACHINE:
3469 desc += f' [<abbr title
="netrc machine"><em
>{cls._NETRC_MACHINE}
</em
></abbr
>]'
3471 desc += f' [{cls._NETRC_MACHINE}
]'
3472 if cls.IE_DESC is False:
3475 desc += f' {cls.IE_DESC}
'
3477 desc += f'; "{cls.SEARCH_KEY}:" prefix
'
3479 _COUNTS = ('', '5', '10', 'all
')
3480 desc += f' (e
.g
. "{cls.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(search_examples)}")'
3481 if not cls.working():
3482 desc += ' (**Currently broken
**)' if markdown else ' (Currently broken
)'
3484 # Escape emojis. Ref: https://github.com/github/markup/issues/1153
3485 name = (' - **%s**' % re.sub(r':(\w
+:)', ':\u200B
\\g
<1>', cls.IE_NAME)) if markdown else cls.IE_NAME
3486 return f'{name}
:{desc}
' if desc else name
3488 def extract_subtitles(self, *args, **kwargs):
3489 if (self.get_param('writesubtitles
', False)
3490 or self.get_param('listsubtitles
')):
3491 return self._get_subtitles(*args, **kwargs)
3494 def _get_subtitles(self, *args, **kwargs):
3495 raise NotImplementedError('This method must be implemented by subclasses
')
3497 class CommentsDisabled(Exception):
3498 """Raise in _get_comments if comments are disabled for the video"""
3500 def extract_comments(self, *args, **kwargs):
3501 if not self.get_param('getcomments
'):
3503 generator = self._get_comments(*args, **kwargs)
3510 comments.append(next(generator))
3511 except StopIteration:
3513 except KeyboardInterrupt:
3514 self.to_screen('Interrupted by user
')
3515 except self.CommentsDisabled:
3516 return {'comments': None, 'comment_count': None}
3517 except Exception as e:
3518 if self.get_param('ignoreerrors
') is not True:
3520 self._downloader.report_error(e)
3521 comment_count = len(comments)
3522 self.to_screen(f'Extracted {comment_count} comments
')
3524 'comments
': comments,
3525 'comment_count
': None if interrupted else comment_count
3529 def _get_comments(self, *args, **kwargs):
3530 raise NotImplementedError('This method must be implemented by subclasses
')
3533 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
3534 """ Merge subtitle items for one language. Items with duplicated URLs/data
3535 will be dropped. """
3536 list1_data = {(item.get('url'), item.get('data')) for item in subtitle_list1}
3537 ret = list(subtitle_list1)
3538 ret.extend(item for item in subtitle_list2 if (item.get('url
'), item.get('data
')) not in list1_data)
3542 def _merge_subtitles(cls, *dicts, target=None):
3543 """ Merge subtitle dictionaries, language by language. """
3547 for lang, subs in d.items():
3548 target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs)
3551 def extract_automatic_captions(self, *args, **kwargs):
3552 if (self.get_param('writeautomaticsub
', False)
3553 or self.get_param('listsubtitles
')):
3554 return self._get_automatic_captions(*args, **kwargs)
3557 def _get_automatic_captions(self, *args, **kwargs):
3558 raise NotImplementedError('This method must be implemented by subclasses
')
3560 @functools.cached_property
3561 def _cookies_passed(self):
3562 """Whether cookies have been passed to YoutubeDL"""
3563 return self.get_param('cookiefile
') is not None or self.get_param('cookiesfrombrowser
') is not None
3565 def mark_watched(self, *args, **kwargs):
3566 if not self.get_param('mark_watched
', False):
3568 if self.supports_login() and self._get_login_info()[0] is not None or self._cookies_passed:
3569 self._mark_watched(*args, **kwargs)
3571 def _mark_watched(self, *args, **kwargs):
3572 raise NotImplementedError('This method must be implemented by subclasses
')
3574 def geo_verification_headers(self):
3576 geo_verification_proxy = self.get_param('geo_verification_proxy
')
3577 if geo_verification_proxy:
3578 headers['Ytdl
-request
-proxy
'] = geo_verification_proxy
3582 def _generic_id(url):
3583 return urllib.parse.unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
3585 def _generic_title(self, url='', webpage='', *, default=None):
3586 return (self._og_search_title(webpage, default=None)
3587 or self._html_extract_title(webpage, default=None)
3588 or urllib.parse.unquote(os.path.splitext(url_basename(url))[0])
3592 def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None):
3593 all_known = all(map(
3594 lambda x: x is not None,
3595 (is_private, needs_premium, needs_subscription, needs_auth, is_unlisted)))
3597 'private
' if is_private
3598 else 'premium_only
' if needs_premium
3599 else 'subscriber_only
' if needs_subscription
3600 else 'needs_auth
' if needs_auth
3601 else 'unlisted
' if is_unlisted
3602 else 'public
' if all_known
3605 def _configuration_arg(self, key, default=NO_DEFAULT, *, ie_key=None, casesense=False):
3607 @returns A list of values for the extractor argument given by "key"
3608 or "default" if no such key is present
3609 @param default The default value to return when the key is not present (default: [])
3610 @param casesense When false, the values are converted to lower case
3612 ie_key = ie_key if isinstance(ie_key, str) else (ie_key or self).ie_key()
3613 val = traverse_obj(self._downloader.params, ('extractor_args
', ie_key.lower(), key))
3615 return [] if default is NO_DEFAULT else default
3616 return list(val) if casesense else [x.lower() for x in val]
3618 def _yes_playlist(self, playlist_id, video_id, smuggled_data=None, *, playlist_label='playlist
', video_label='video
'):
3619 if not playlist_id or not video_id:
3622 no_playlist = (smuggled_data or {}).get('force_noplaylist
')
3623 if no_playlist is not None:
3624 return not no_playlist
3626 video_id = '' if video_id is True else f' {video_id}
'
3627 playlist_id = '' if playlist_id is True else f' {playlist_id}
'
3628 if self.get_param('noplaylist
'):
3629 self.to_screen(f'Downloading just the {video_label}{video_id} because of
--no
-playlist
')
3631 self.to_screen(f'Downloading {playlist_label}{playlist_id}
- add
--no
-playlist to download just the {video_label}{video_id}
')
3634 def _error_or_warning(self, err, _count=None, _retries=0, *, fatal=True):
3635 RetryManager.report_retry(
3636 err, _count or int(fatal), _retries,
3637 info=self.to_screen, warn=self.report_warning, error=None if fatal else self.report_warning,
3638 sleep_func=self.get_param('retry_sleep_functions
', {}).get('extractor
'))
3640 def RetryManager(self, **kwargs):
3641 return RetryManager(self.get_param('extractor_retries
', 3), self._error_or_warning, **kwargs)
3643 def _extract_generic_embeds(self, url, *args, info_dict={}, note='Extracting generic embeds
', **kwargs):
3644 display_id = traverse_obj(info_dict, 'display_id
', 'id')
3645 self.to_screen(f'{format_field(display_id, None, "%s: ")}{note}
')
3646 return self._downloader.get_info_extractor('Generic
')._extract_embeds(
3647 smuggle_url(url, {'block_ies': [self.ie_key()]}), *args, **kwargs)
3650 def extract_from_webpage(cls, ydl, url, webpage):
3651 ie = (cls if isinstance(cls._extract_from_webpage, types.MethodType)
3652 else ydl.get_info_extractor(cls.ie_key()))
3653 for info in ie._extract_from_webpage(url, webpage) or []:
3654 # url = None since we do not want to set (webpage/original)_url
3655 ydl.add_default_extra_info(info, ie, None)
3659 def _extract_from_webpage(cls, url, webpage):
3660 for embed_url in orderedSet(
3661 cls._extract_embed_urls(url, webpage) or [], lazy=True):
3662 yield cls.url_result(embed_url, None if cls._VALID_URL is False else cls)
3665 def _extract_embed_urls(cls, url, webpage):
3666 """@returns all the embed urls on the webpage"""
3667 if '_EMBED_URL_RE
' not in cls.__dict__:
3668 assert isinstance(cls._EMBED_REGEX, (list, tuple))
3669 for idx, regex in enumerate(cls._EMBED_REGEX):
3670 assert regex.count('(?P
<url
>') == 1, \
3671 f'{cls.__name__}
._EMBED
_REGEX
[{idx}
] must have exactly
1 url group
\n\t{regex}
'
3672 cls._EMBED_URL_RE = tuple(map(re.compile, cls._EMBED_REGEX))
3674 for regex in cls._EMBED_URL_RE:
3675 for mobj in regex.finditer(webpage):
3676 embed_url = urllib.parse.urljoin(url, unescapeHTML(mobj.group('url
')))
3677 if cls._VALID_URL is False or cls.suitable(embed_url):
3680 class StopExtraction(Exception):
3684 def _extract_url(cls, webpage): # TODO: Remove
3685 """Only for compatibility with some older extractors"""
3686 return next(iter(cls._extract_embed_urls(None, webpage) or []), None)
3689 def __init_subclass__(cls, *, plugin_name=None, **kwargs):
3691 mro = inspect.getmro(cls)
3692 super_class = cls.__wrapped__ = mro[mro.index(cls) + 1]
3693 cls.IE_NAME, cls.ie_key = f'{super_class.IE_NAME}
+{plugin_name}
', super_class.ie_key
3694 while getattr(super_class, '__wrapped__
', None):
3695 super_class = super_class.__wrapped__
3696 setattr(sys.modules[super_class.__module__], super_class.__name__, cls)
3698 return super().__init_subclass__(**kwargs)
3701 class SearchInfoExtractor(InfoExtractor):
3703 Base class for paged search queries extractors.
3704 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
3705 Instances should define _SEARCH_KEY and optionally _MAX_RESULTS
3708 _MAX_RESULTS = float('inf
')
3709 _RETURN_TYPE = 'playlist
'
3712 def _VALID_URL(cls):
3713 return r'%s(?P
<prefix
>|
[1-9][0-9]*|all
):(?P
<query
>[\s\S
]+)' % cls._SEARCH_KEY
3715 def _real_extract(self, query):
3716 prefix, query = self._match_valid_url(query).group('prefix
', 'query
')
3718 return self._get_n_results(query, 1)
3719 elif prefix == 'all
':
3720 return self._get_n_results(query, self._MAX_RESULTS)
3724 raise ExtractorError(f'invalid download number {n}
for query
"{query}"')
3725 elif n > self._MAX_RESULTS:
3726 self.report_warning('%s returns
max %i results (you requested
%i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
3727 n = self._MAX_RESULTS
3728 return self._get_n_results(query, n)
3730 def _get_n_results(self, query, n):
3731 """Get a specified number of results for a query.
3732 Either this function or _search_results must be overridden by subclasses """
3733 return self.playlist_result(
3734 itertools.islice(self._search_results(query), 0, None if n == float('inf
') else n),
3737 def _search_results(self, query):
3738 """Returns an iterator of search results"""
3739 raise NotImplementedError('This method must be implemented by subclasses
')
3742 def SEARCH_KEY(cls):
3743 return cls._SEARCH_KEY
3746 class UnsupportedURLIE(InfoExtractor):
3751 def _real_extract(self, url):
3752 raise UnsupportedError(url)