21 import xml
.etree
.ElementTree
23 from ..compat
import functools
# isort: split
24 from ..compat
import compat_etree_fromstring
, compat_expanduser
, compat_os_name
25 from ..cookies
import LenientSimpleCookie
26 from ..downloader
.f4m
import get_base_url
, remove_encrypted_media
64 parse_m3u8_attributes
,
93 """Information Extractor class.
95 Information extractors are the classes that, given a URL, extract
96 information about the video (or videos) the URL refers to. This
97 information includes the real video URL, the video title, author and
98 others. The information is stored in a dictionary which is then
99 passed to the YoutubeDL. The YoutubeDL processes this
100 information possibly downloading the video to the file system, among
101 other possible outcomes.
103 The type field determines the type of the result.
104 By far the most common value (and the default if _type is missing) is
105 "video", which indicates a single video.
107 For a video, the dictionaries must include the following fields:
109 id: Video identifier.
110 title: Video title, unescaped. Set to an empty string if video has
111 no title as opposed to "None" which signifies that the
112 extractor failed to obtain a title
114 Additionally, it must contain either a formats entry or a url one:
116 formats: A list of dictionaries for each format available, ordered
117 from worst to best quality.
120 * url The mandatory URL representing the media:
121 for plain file media - HTTP URL of this file,
123 for HLS - URL of the M3U8 media playlist,
124 for HDS - URL of the F4M manifest,
126 - HTTP URL to plain file media (in case of
128 - URL of the MPD manifest or base URL
129 representing the media if MPD manifest
130 is parsed from a string (in case of
132 for MSS - URL of the ISM manifest.
134 The URL of the manifest file in case of
136 for HLS - URL of the M3U8 master playlist,
137 for HDS - URL of the F4M manifest,
138 for DASH - URL of the MPD manifest,
139 for MSS - URL of the ISM manifest.
140 * manifest_stream_number (For internal use only)
141 The index of the stream in the manifest file
142 * ext Will be calculated from URL if missing
143 * format A human-readable description of the format
144 ("mp4 container with h264/opus").
145 Calculated from the format_id, width, height.
146 and format_note fields if missing.
147 * format_id A short description of the format
148 ("mp4_h264_opus" or "19").
149 Technically optional, but strongly recommended.
150 * format_note Additional info about the format
151 ("3D" or "DASH video")
152 * width Width of the video, if known
153 * height Height of the video, if known
154 * aspect_ratio Aspect ratio of the video, if known
155 Automatically calculated from width and height
156 * resolution Textual description of width and height
157 Automatically calculated from width and height
158 * dynamic_range The dynamic range of the video. One of:
159 "SDR" (None), "HDR10", "HDR10+, "HDR12", "HLG, "DV"
160 * tbr Average bitrate of audio and video in KBit/s
161 * abr Average audio bitrate in KBit/s
162 * acodec Name of the audio codec in use
163 * asr Audio sampling rate in Hertz
164 * audio_channels Number of audio channels
165 * vbr Average video bitrate in KBit/s
167 * vcodec Name of the video codec in use
168 * container Name of the container format
169 * filesize The number of bytes, if known in advance
170 * filesize_approx An estimate for the number of bytes
171 * player_url SWF Player URL (used for rtmpdump).
172 * protocol The protocol that will be used for the actual
173 download, lower-case. One of "http", "https" or
174 one of the protocols defined in downloader.PROTOCOL_MAP
176 Base URL for fragments. Each fragment's path
177 value (if present) will be relative to
179 * fragments A list of fragments of a fragmented media.
180 Each fragment entry must contain either an url
181 or a path. If an url is present it should be
182 considered by a client. Otherwise both path and
183 fragment_base_url must be present. Here is
184 the list of all potential fields:
185 * "url" - fragment's URL
186 * "path" - fragment's path relative to
188 * "duration" (optional, int or float)
189 * "filesize" (optional, int)
190 * is_from_start Is a live format that can be downloaded
191 from the start. Boolean
192 * preference Order number of this format. If this field is
193 present and not None, the formats get sorted
194 by this field, regardless of all other values.
195 -1 for default (order by other properties),
196 -2 or smaller for less than default.
197 < -1000 to hide the format (if there is
198 another one which is strictly better)
199 * language Language code, e.g. "de" or "en-US".
200 * language_preference Is this in the language mentioned in
202 10 if it's what the URL is about,
203 -1 for default (don't know),
204 -10 otherwise, other values reserved for now.
205 * quality Order number of the video quality of this
206 format, irrespective of the file format.
207 -1 for default (order by other properties),
208 -2 or smaller for less than default.
209 * source_preference Order number for this video source
210 (quality takes higher priority)
211 -1 for default (order by other properties),
212 -2 or smaller for less than default.
213 * http_headers A dictionary of additional HTTP headers
214 to add to the request.
215 * stretched_ratio If given and not 1, indicates that the
216 video's pixels are not square.
217 width : height ratio as float.
218 * no_resume The server does not support resuming the
219 (HTTP or RTMP) download. Boolean.
220 * has_drm The format has DRM and cannot be downloaded. Boolean
221 * downloader_options A dictionary of downloader options
222 (For internal use only)
223 * http_chunk_size Chunk size for HTTP downloads
224 * ffmpeg_args Extra arguments for ffmpeg downloader
225 RTMP formats can also have the additional fields: page_url,
226 app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn,
227 rtmp_protocol, rtmp_real_time
229 url: Final video URL.
230 ext: Video filename extension.
231 format: The video format, defaults to ext (used for --get-format)
232 player_url: SWF Player URL (used for rtmpdump).
234 The following fields are optional:
236 direct: True if a direct video file was given (must only be set by GenericIE)
237 alt_title: A secondary title of the video.
238 display_id An alternative identifier for the video, not necessarily
239 unique, but available before title. Typically, id is
240 something like "4234987", title "Dancing naked mole rats",
241 and display_id "dancing-naked-mole-rats"
242 thumbnails: A list of dictionaries, with the following entries:
243 * "id" (optional, string) - Thumbnail format ID
245 * "preference" (optional, int) - quality of the image
246 * "width" (optional, int)
247 * "height" (optional, int)
248 * "resolution" (optional, string "{width}x{height}",
250 * "filesize" (optional, int)
251 * "http_headers" (dict) - HTTP headers for the request
252 thumbnail: Full URL to a video thumbnail image.
253 description: Full video description.
254 uploader: Full name of the video uploader.
255 license: License name the video is licensed under.
256 creator: The creator of the video.
257 timestamp: UNIX timestamp of the moment the video was uploaded
258 upload_date: Video upload date in UTC (YYYYMMDD).
259 If not explicitly set, calculated from timestamp
260 release_timestamp: UNIX timestamp of the moment the video was released.
261 If it is not clear whether to use timestamp or this, use the former
262 release_date: The date (YYYYMMDD) when the video was released in UTC.
263 If not explicitly set, calculated from release_timestamp
264 modified_timestamp: UNIX timestamp of the moment the video was last modified.
265 modified_date: The date (YYYYMMDD) when the video was last modified in UTC.
266 If not explicitly set, calculated from modified_timestamp
267 uploader_id: Nickname or id of the video uploader.
268 uploader_url: Full URL to a personal webpage of the video uploader.
269 channel: Full name of the channel the video is uploaded on.
270 Note that channel fields may or may not repeat uploader
271 fields. This depends on a particular extractor.
272 channel_id: Id of the channel.
273 channel_url: Full URL to a channel webpage.
274 channel_follower_count: Number of followers of the channel.
275 location: Physical location where the video was filmed.
276 subtitles: The available subtitles as a dictionary in the format
277 {tag: subformats}. "tag" is usually a language code, and
278 "subformats" is a list sorted from lower to higher
279 preference, each element is a dictionary with the "ext"
281 * "data": The subtitles file contents
282 * "url": A URL pointing to the subtitles file
283 It can optionally also have:
284 * "name": Name or description of the subtitles
285 * "http_headers": A dictionary of additional HTTP headers
286 to add to the request.
287 "ext" will be calculated from URL if missing
288 automatic_captions: Like 'subtitles'; contains automatically generated
289 captions instead of normal subtitles
290 duration: Length of the video in seconds, as an integer or float.
291 view_count: How many users have watched the video on the platform.
292 concurrent_view_count: How many users are currently watching the video on the platform.
293 like_count: Number of positive ratings of the video
294 dislike_count: Number of negative ratings of the video
295 repost_count: Number of reposts of the video
296 average_rating: Average rating give by users, the scale used depends on the webpage
297 comment_count: Number of comments on the video
298 comments: A list of comments, each with one or more of the following
299 properties (all but one of text or html optional):
300 * "author" - human-readable name of the comment author
301 * "author_id" - user ID of the comment author
302 * "author_thumbnail" - The thumbnail of the comment author
304 * "html" - Comment as HTML
305 * "text" - Plain text of the comment
306 * "timestamp" - UNIX timestamp of comment
307 * "parent" - ID of the comment this one is replying to.
308 Set to "root" to indicate that this is a
309 comment to the original video.
310 * "like_count" - Number of positive ratings of the comment
311 * "dislike_count" - Number of negative ratings of the comment
312 * "is_favorited" - Whether the comment is marked as
313 favorite by the video uploader
314 * "author_is_uploader" - Whether the comment is made by
316 age_limit: Age restriction for the video, as an integer (years)
317 webpage_url: The URL to the video webpage, if given to yt-dlp it
318 should allow to get the same result again. (It will be set
319 by YoutubeDL if it's missing)
320 categories: A list of categories that the video falls in, for example
322 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
323 cast: A list of the video cast
324 is_live: True, False, or None (=unknown). Whether this video is a
325 live stream that goes on instead of a fixed-length video.
326 was_live: True, False, or None (=unknown). Whether this video was
327 originally a live stream.
328 live_status: None (=unknown), 'is_live', 'is_upcoming', 'was_live', 'not_live',
329 or 'post_live' (was live, but VOD is not yet processed)
330 If absent, automatically set from is_live, was_live
331 start_time: Time in seconds where the reproduction should start, as
332 specified in the URL.
333 end_time: Time in seconds where the reproduction should end, as
334 specified in the URL.
335 chapters: A list of dictionaries, with the following entries:
336 * "start_time" - The start time of the chapter in seconds
337 * "end_time" - The end time of the chapter in seconds
338 * "title" (optional, string)
339 playable_in_embed: Whether this video is allowed to play in embedded
340 players on other sites. Can be True (=always allowed),
341 False (=never allowed), None (=unknown), or a string
342 specifying the criteria for embedability; e.g. 'whitelist'
343 availability: Under what condition the video is available. One of
344 'private', 'premium_only', 'subscriber_only', 'needs_auth',
345 'unlisted' or 'public'. Use 'InfoExtractor._availability'
347 _old_archive_ids: A list of old archive ids needed for backward compatibility
348 _format_sort_fields: A list of fields to use for sorting formats
349 __post_extractor: A function to be called just before the metadata is
350 written to either disk, logger or console. The function
351 must return a dict which will be added to the info_dict.
352 This is usefull for additional information that is
353 time-consuming to extract. Note that the fields thus
354 extracted will not be available to output template and
355 match_filter. So, only "comments" and "comment_count" are
356 currently allowed to be extracted via this method.
358 The following fields should only be used when the video belongs to some logical
361 chapter: Name or title of the chapter the video belongs to.
362 chapter_number: Number of the chapter the video belongs to, as an integer.
363 chapter_id: Id of the chapter the video belongs to, as a unicode string.
365 The following fields should only be used when the video is an episode of some
366 series, programme or podcast:
368 series: Title of the series or programme the video episode belongs to.
369 series_id: Id of the series or programme the video episode belongs to, as a unicode string.
370 season: Title of the season the video episode belongs to.
371 season_number: Number of the season the video episode belongs to, as an integer.
372 season_id: Id of the season the video episode belongs to, as a unicode string.
373 episode: Title of the video episode. Unlike mandatory video title field,
374 this field should denote the exact title of the video episode
375 without any kind of decoration.
376 episode_number: Number of the video episode within a season, as an integer.
377 episode_id: Id of the video episode, as a unicode string.
379 The following fields should only be used when the media is a track or a part of
382 track: Title of the track.
383 track_number: Number of the track within an album or a disc, as an integer.
384 track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
386 artist: Artist(s) of the track.
387 genre: Genre(s) of the track.
388 album: Title of the album the track belongs to.
389 album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
390 album_artist: List of all artists appeared on the album (e.g.
391 "Ash Borer / Fell Voices" or "Various Artists", useful for splits
393 disc_number: Number of the disc or other physical medium the track belongs to,
395 release_year: Year (YYYY) when the album was released.
396 composer: Composer of the piece
398 The following fields should only be set for clips that should be cut from the original video:
400 section_start: Start time of the section in seconds
401 section_end: End time of the section in seconds
403 The following fields should only be set for storyboards:
404 rows: Number of rows in each storyboard fragment, as an integer
405 columns: Number of columns in each storyboard fragment, as an integer
407 Unless mentioned otherwise, the fields should be Unicode strings.
409 Unless mentioned otherwise, None is equivalent to absence of information.
412 _type "playlist" indicates multiple videos.
413 There must be a key "entries", which is a list, an iterable, or a PagedList
414 object, each element of which is a valid dictionary by this specification.
416 Additionally, playlists can have "id", "title", and any other relevant
417 attributes with the same semantics as videos (see above).
419 It can also have the following optional fields:
421 playlist_count: The total number of videos in a playlist. If not given,
422 YoutubeDL tries to calculate it from "entries"
425 _type "multi_video" indicates that there are multiple videos that
426 form a single show, for examples multiple acts of an opera or TV episode.
427 It must have an entries key like a playlist and contain all the keys
428 required for a video at the same time.
431 _type "url" indicates that the video must be extracted from another
432 location, possibly by a different extractor. Its only required key is:
433 "url" - the next URL to extract.
434 The key "ie_key" can be set to the class name (minus the trailing "IE",
435 e.g. "Youtube") if the extractor class is known in advance.
436 Additionally, the dictionary may have any properties of the resolved entity
437 known in advance, for example "title" if the title of the referred video is
441 _type "url_transparent" entities have the same specification as "url", but
442 indicate that the given additional information is more precise than the one
443 associated with the resolved URL.
444 This is useful when a site employs a video service that hosts the video and
445 its technical metadata, but that video service does not embed a useful
446 title, description etc.
449 Subclasses of this should also be added to the list of extractors and
450 should define a _VALID_URL regexp and, re-define the _real_extract() and
451 (optionally) _real_initialize() methods.
453 Subclasses may also override suitable() if necessary, but ensure the function
454 signature is preserved and that this function imports everything it needs
455 (except other extractors), so that lazy_extractors works correctly.
457 Subclasses can define a list of _EMBED_REGEX, which will be searched for in
458 the HTML of Generic webpages. It may also override _extract_embed_urls
459 or _extract_from_webpage as necessary. While these are normally classmethods,
460 _extract_from_webpage is allowed to be an instance method.
462 _extract_from_webpage may raise self.StopExtraction() to stop further
463 processing of the webpage and obtain exclusive rights to it. This is useful
464 when the extractor cannot reliably be matched using just the URL,
465 e.g. invidious/peertube instances
467 Embed-only extractors can be defined by setting _VALID_URL = False.
469 To support username + password (or netrc) login, the extractor must define a
470 _NETRC_MACHINE and re-define _perform_login(username, password) and
471 (optionally) _initialize_pre_login() methods. The _perform_login method will
472 be called between _initialize_pre_login and _real_initialize if credentials
473 are passed by the user. In cases where it is necessary to have the login
474 process as part of the extraction rather than initialization, _perform_login
475 can be left undefined.
477 _GEO_BYPASS attribute may be set to False in order to disable
478 geo restriction bypass mechanisms for a particular extractor.
479 Though it won't disable explicit geo restriction bypass based on
480 country code provided with geo_bypass_country.
482 _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
483 countries for this extractor. One of these countries will be used by
484 geo restriction bypass mechanism right away in order to bypass
485 geo restriction, of course, if the mechanism is not disabled.
487 _GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
488 IP blocks in CIDR notation for this extractor. One of these IP blocks
489 will be used by geo restriction bypass mechanism similarly
492 The _ENABLED attribute should be set to False for IEs that
493 are disabled by default and must be explicitly enabled.
495 The _WORKING attribute should be set to False for broken IEs
496 in order to warn the users and skip the tests.
501 _x_forwarded_for_ip
= None
503 _GEO_COUNTRIES
= None
504 _GEO_IP_BLOCKS
= None
507 _NETRC_MACHINE
= None
513 def _login_hint(self
, method
=NO_DEFAULT
, netrc
=None):
514 password_hint
= f
'--username and --password, or --netrc ({netrc or self._NETRC_MACHINE}) to provide account credentials'
517 'any': f
'Use --cookies, --cookies-from-browser, {password_hint}',
518 'password': f
'Use {password_hint}',
520 'Use --cookies-from-browser or --cookies for the authentication. '
521 'See https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp for how to manually pass cookies'),
522 }[method
if method
is not NO_DEFAULT
else 'any' if self
.supports_login() else 'cookies']
524 def __init__(self
, downloader
=None):
525 """Constructor. Receives an optional downloader (a YoutubeDL instance).
526 If a downloader is not passed during initialization,
527 it must be set using "set_downloader()" before "extract()" is called"""
529 self
._x
_forwarded
_for
_ip
= None
530 self
._printed
_messages
= set()
531 self
.set_downloader(downloader
)
534 def _match_valid_url(cls
, url
):
535 if cls
._VALID
_URL
is False:
537 # This does not use has/getattr intentionally - we want to know whether
538 # we have cached the regexp for *this* class, whereas getattr would also
539 # match the superclass
540 if '_VALID_URL_RE' not in cls
.__dict
__:
541 cls
._VALID
_URL
_RE
= re
.compile(cls
._VALID
_URL
)
542 return cls
._VALID
_URL
_RE
.match(url
)
545 def suitable(cls
, url
):
546 """Receives a URL and returns True if suitable for this IE."""
547 # This function must import everything it needs (except other extractors),
548 # so that lazy_extractors works correctly
549 return cls
._match
_valid
_url
(url
) is not None
552 def _match_id(cls
, url
):
553 return cls
._match
_valid
_url
(url
).group('id')
556 def get_temp_id(cls
, url
):
558 return cls
._match
_id
(url
)
559 except (IndexError, AttributeError):
564 """Getter method for _WORKING."""
568 def supports_login(cls
):
569 return bool(cls
._NETRC
_MACHINE
)
571 def initialize(self
):
572 """Initializes an instance (authentication, etc)."""
573 self
._printed
_messages
= set()
574 self
._initialize
_geo
_bypass
({
575 'countries': self
._GEO
_COUNTRIES
,
576 'ip_blocks': self
._GEO
_IP
_BLOCKS
,
579 self
._initialize
_pre
_login
()
580 if self
.supports_login():
581 username
, password
= self
._get
_login
_info
()
583 self
._perform
_login
(username
, password
)
584 elif self
.get_param('username') and False not in (self
.IE_DESC
, self
._NETRC
_MACHINE
):
585 self
.report_warning(f
'Login with password is not supported for this website. {self._login_hint("cookies")}')
586 self
._real
_initialize
()
589 def _initialize_geo_bypass(self
, geo_bypass_context
):
591 Initialize geo restriction bypass mechanism.
593 This method is used to initialize geo bypass mechanism based on faking
594 X-Forwarded-For HTTP header. A random country from provided country list
595 is selected and a random IP belonging to this country is generated. This
596 IP will be passed as X-Forwarded-For HTTP header in all subsequent
599 This method will be used for initial geo bypass mechanism initialization
600 during the instance initialization with _GEO_COUNTRIES and
603 You may also manually call it from extractor's code if geo bypass
604 information is not available beforehand (e.g. obtained during
605 extraction) or due to some other reason. In this case you should pass
606 this information in geo bypass context passed as first argument. It may
607 contain following fields:
609 countries: List of geo unrestricted countries (similar
611 ip_blocks: List of geo unrestricted IP blocks in CIDR notation
612 (similar to _GEO_IP_BLOCKS)
615 if not self
._x
_forwarded
_for
_ip
:
617 # Geo bypass mechanism is explicitly disabled by user
618 if not self
.get_param('geo_bypass', True):
621 if not geo_bypass_context
:
622 geo_bypass_context
= {}
624 # Backward compatibility: previously _initialize_geo_bypass
625 # expected a list of countries, some 3rd party code may still use
627 if isinstance(geo_bypass_context
, (list, tuple)):
628 geo_bypass_context
= {
629 'countries': geo_bypass_context
,
632 # The whole point of geo bypass mechanism is to fake IP
633 # as X-Forwarded-For HTTP header based on some IP block or
636 # Path 1: bypassing based on IP block in CIDR notation
638 # Explicit IP block specified by user, use it right away
639 # regardless of whether extractor is geo bypassable or not
640 ip_block
= self
.get_param('geo_bypass_ip_block', None)
642 # Otherwise use random IP block from geo bypass context but only
643 # if extractor is known as geo bypassable
645 ip_blocks
= geo_bypass_context
.get('ip_blocks')
646 if self
._GEO
_BYPASS
and ip_blocks
:
647 ip_block
= random
.choice(ip_blocks
)
650 self
._x
_forwarded
_for
_ip
= GeoUtils
.random_ipv4(ip_block
)
651 self
.write_debug(f
'Using fake IP {self._x_forwarded_for_ip} as X-Forwarded-For')
654 # Path 2: bypassing based on country code
656 # Explicit country code specified by user, use it right away
657 # regardless of whether extractor is geo bypassable or not
658 country
= self
.get_param('geo_bypass_country', None)
660 # Otherwise use random country code from geo bypass context but
661 # only if extractor is known as geo bypassable
663 countries
= geo_bypass_context
.get('countries')
664 if self
._GEO
_BYPASS
and countries
:
665 country
= random
.choice(countries
)
668 self
._x
_forwarded
_for
_ip
= GeoUtils
.random_ipv4(country
)
669 self
._downloader
.write_debug(
670 f
'Using fake IP {self._x_forwarded_for_ip} ({country.upper()}) as X-Forwarded-For')
672 def extract(self
, url
):
673 """Extracts URL information and returns it in list of dicts."""
678 self
.to_screen('Extracting URL: %s' % (
679 url
if self
.get_param('verbose') else truncate_string(url
, 100, 20)))
680 ie_result
= self
._real
_extract
(url
)
681 if ie_result
is None:
683 if self
._x
_forwarded
_for
_ip
:
684 ie_result
['__x_forwarded_for_ip'] = self
._x
_forwarded
_for
_ip
685 subtitles
= ie_result
.get('subtitles') or {}
686 if 'no-live-chat' in self
.get_param('compat_opts'):
687 for lang
in ('live_chat', 'comments', 'danmaku'):
688 subtitles
.pop(lang
, None)
690 except GeoRestrictedError
as e
:
691 if self
.__maybe
_fake
_ip
_and
_retry
(e
.countries
):
694 except UnsupportedError
:
696 except ExtractorError
as e
:
697 e
.video_id
= e
.video_id
or self
.get_temp_id(url
),
698 e
.ie
= e
.ie
or self
.IE_NAME
,
699 e
.traceback
= e
.traceback
or sys
.exc_info()[2]
701 except http
.client
.IncompleteRead
as e
:
702 raise ExtractorError('A network error has occurred.', cause
=e
, expected
=True, video_id
=self
.get_temp_id(url
))
703 except (KeyError, StopIteration) as e
:
704 raise ExtractorError('An extractor error has occurred.', cause
=e
, video_id
=self
.get_temp_id(url
))
706 def __maybe_fake_ip_and_retry(self
, countries
):
707 if (not self
.get_param('geo_bypass_country', None)
709 and self
.get_param('geo_bypass', True)
710 and not self
._x
_forwarded
_for
_ip
712 country_code
= random
.choice(countries
)
713 self
._x
_forwarded
_for
_ip
= GeoUtils
.random_ipv4(country_code
)
714 if self
._x
_forwarded
_for
_ip
:
716 'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
717 % (self
._x
_forwarded
_for
_ip
, country_code
.upper()))
721 def set_downloader(self
, downloader
):
722 """Sets a YoutubeDL instance as the downloader for this IE."""
723 self
._downloader
= downloader
727 return self
._downloader
.cache
731 return self
._downloader
.cookiejar
733 def _initialize_pre_login(self
):
734 """ Initialization before login. Redefine in subclasses."""
737 def _perform_login(self
, username
, password
):
738 """ Login with username and password. Redefine in subclasses."""
741 def _real_initialize(self
):
742 """Real initialization process. Redefine in subclasses."""
745 def _real_extract(self
, url
):
746 """Real extraction process. Redefine in subclasses."""
747 raise NotImplementedError('This method must be implemented by subclasses')
751 """A string for getting the InfoExtractor with get_info_extractor"""
752 return cls
.__name
__[:-2]
756 return cls
.__name
__[:-2]
759 def __can_accept_status_code(err
, expected_status
):
760 assert isinstance(err
, urllib
.error
.HTTPError
)
761 if expected_status
is None:
763 elif callable(expected_status
):
764 return expected_status(err
.code
) is True
766 return err
.code
in variadic(expected_status
)
768 def _create_request(self
, url_or_request
, data
=None, headers
=None, query
=None):
769 if isinstance(url_or_request
, urllib
.request
.Request
):
770 return update_Request(url_or_request
, data
=data
, headers
=headers
, query
=query
)
772 url_or_request
= update_url_query(url_or_request
, query
)
773 return sanitized_Request(url_or_request
, data
, headers
or {})
775 def _request_webpage(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True, data
=None, headers
=None, query
=None, expected_status
=None):
777 Return the response handle.
779 See _download_webpage docstring for arguments specification.
781 if not self
._downloader
._first
_webpage
_request
:
782 sleep_interval
= self
.get_param('sleep_interval_requests') or 0
783 if sleep_interval
> 0:
784 self
.to_screen('Sleeping %s seconds ...' % sleep_interval
)
785 time
.sleep(sleep_interval
)
787 self
._downloader
._first
_webpage
_request
= False
790 self
.report_download_webpage(video_id
)
791 elif note
is not False:
793 self
.to_screen(str(note
))
795 self
.to_screen(f
'{video_id}: {note}')
797 # Some sites check X-Forwarded-For HTTP header in order to figure out
798 # the origin of the client behind proxy. This allows bypassing geo
799 # restriction by faking this header's value to IP that belongs to some
800 # geo unrestricted country. We will do so once we encounter any
801 # geo restriction error.
802 if self
._x
_forwarded
_for
_ip
:
803 headers
= (headers
or {}).copy()
804 headers
.setdefault('X-Forwarded-For', self
._x
_forwarded
_for
_ip
)
807 return self
._downloader
.urlopen(self
._create
_request
(url_or_request
, data
, headers
, query
))
808 except network_exceptions
as err
:
809 if isinstance(err
, urllib
.error
.HTTPError
):
810 if self
.__can
_accept
_status
_code
(err
, expected_status
):
811 # Retain reference to error to prevent file object from
812 # being closed before it can be read. Works around the
813 # effects of <https://bugs.python.org/issue15002>
814 # introduced in Python 3.4.1.
821 errnote
= 'Unable to download webpage'
823 errmsg
= f
'{errnote}: {error_to_compat_str(err)}'
825 raise ExtractorError(errmsg
, cause
=err
)
827 self
.report_warning(errmsg
)
830 def _download_webpage_handle(self
, url_or_request
, video_id
, note
=None, errnote
=None, fatal
=True,
831 encoding
=None, data
=None, headers
={}, query={}
, expected_status
=None):
833 Return a tuple (page content as string, URL handle).
836 url_or_request -- plain text URL as a string or
837 a urllib.request.Request object
838 video_id -- Video/playlist/item identifier (string)
841 note -- note printed before downloading (string)
842 errnote -- note printed in case of an error (string)
843 fatal -- flag denoting whether error should be considered fatal,
844 i.e. whether it should cause ExtractionError to be raised,
845 otherwise a warning will be reported and extraction continued
846 encoding -- encoding for a page content decoding, guessed automatically
847 when not explicitly specified
848 data -- POST data (bytes)
849 headers -- HTTP headers (dict)
850 query -- URL query (dict)
851 expected_status -- allows to accept failed HTTP requests (non 2xx
852 status code) by explicitly specifying a set of accepted status
853 codes. Can be any of the following entities:
854 - an integer type specifying an exact failed status code to
856 - a list or a tuple of integer types specifying a list of
857 failed status codes to accept
858 - a callable accepting an actual failed status code and
859 returning True if it should be accepted
860 Note that this argument does not affect success status codes (2xx)
861 which are always accepted.
864 # Strip hashes from the URL (#1038)
865 if isinstance(url_or_request
, str):
866 url_or_request
= url_or_request
.partition('#')[0]
868 urlh
= self
._request
_webpage
(url_or_request
, video_id
, note
, errnote
, fatal
, data
=data
, headers
=headers
, query
=query
, expected_status
=expected_status
)
872 content
= self
._webpage
_read
_content
(urlh
, url_or_request
, video_id
, note
, errnote
, fatal
, encoding
=encoding
)
873 return (content
, urlh
)
876 def _guess_encoding_from_content(content_type
, webpage_bytes
):
877 m
= re
.match(r
'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type
)
879 encoding
= m
.group(1)
881 m
= re
.search(br
'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
882 webpage_bytes[:1024])
884 encoding = m.group(1).decode('ascii')
885 elif webpage_bytes.startswith(b'\xff\xfe'):
892 def __check_blocked(self, content):
893 first_block = content[:512]
894 if ('<title>Access to this site is blocked</title>' in content
895 and 'Websense' in first_block):
896 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
897 blocked_iframe = self._html_search_regex(
898 r'<iframe src="([^
"]+)"', content,
899 'Websense information URL
', default=None)
901 msg += ' Visit
%s for more details
' % blocked_iframe
902 raise ExtractorError(msg, expected=True)
903 if '<title
>The URL you requested has been blocked
</title
>' in first_block:
905 'Access to this webpage has been blocked by Indian censorship
. '
906 'Use a VPN
or proxy
server (with --proxy
) to route around it
.')
907 block_msg = self._html_search_regex(
908 r'</h1
><p
>(.*?
)</p
>',
909 content, 'block message
', default=None)
911 msg += ' (Message
: "%s")' % block_msg.replace('\n', ' ')
912 raise ExtractorError(msg, expected=True)
913 if ('<title
>TTK
:: Доступ к ресурсу ограничен
</title
>' in content
914 and 'blocklist
.rkn
.gov
.ru
' in content):
915 raise ExtractorError(
916 'Access to this webpage has been blocked by decision of the Russian government
. '
917 'Visit http
://blocklist
.rkn
.gov
.ru
/ for a block reason
.',
920 def _request_dump_filename(self, url, video_id):
921 basen = f'{video_id}_{url}
'
922 trim_length = self.get_param('trim_file_name
') or 240
923 if len(basen) > trim_length:
924 h = '___
' + hashlib.md5(basen.encode('utf
-8')).hexdigest()
925 basen = basen[:trim_length - len(h)] + h
926 filename = sanitize_filename(f'{basen}
.dump
', restricted=True)
927 # Working around MAX_PATH limitation on Windows (see
928 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
929 if compat_os_name == 'nt
':
930 absfilepath = os.path.abspath(filename)
931 if len(absfilepath) > 259:
932 filename = fR'\\?\{absfilepath}
'
935 def __decode_webpage(self, webpage_bytes, encoding, headers):
937 encoding = self._guess_encoding_from_content(headers.get('Content
-Type
', ''), webpage_bytes)
939 return webpage_bytes.decode(encoding, 'replace
')
941 return webpage_bytes.decode('utf
-8', 'replace
')
943 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
944 webpage_bytes = urlh.read()
945 if prefix is not None:
946 webpage_bytes = prefix + webpage_bytes
947 if self.get_param('dump_intermediate_pages
', False):
948 self.to_screen('Dumping request to
' + urlh.geturl())
949 dump = base64.b64encode(webpage_bytes).decode('ascii
')
950 self._downloader.to_screen(dump)
951 if self.get_param('write_pages
'):
952 filename = self._request_dump_filename(urlh.geturl(), video_id)
953 self.to_screen(f'Saving request to {filename}
')
954 with open(filename, 'wb
') as outf:
955 outf.write(webpage_bytes)
957 content = self.__decode_webpage(webpage_bytes, encoding, urlh.headers)
958 self.__check_blocked(content)
962 def __print_error(self, errnote, fatal, video_id, err):
964 raise ExtractorError(f'{video_id}
: {errnote}
', cause=err)
966 self.report_warning(f'{video_id}
: {errnote}
: {err}
')
968 def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True, errnote=None):
970 xml_string = transform_source(xml_string)
972 return compat_etree_fromstring(xml_string.encode('utf
-8'))
973 except xml.etree.ElementTree.ParseError as ve:
974 self.__print_error('Failed to parse XML
' if errnote is None else errnote, fatal, video_id, ve)
976 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True, errnote=None, **parser_kwargs):
979 json_string, cls=LenientJSONDecoder, strict=False, transform_source=transform_source, **parser_kwargs)
980 except ValueError as ve:
981 self.__print_error('Failed to parse JSON
' if errnote is None else errnote, fatal, video_id, ve)
983 def _parse_socket_response_as_json(self, data, *args, **kwargs):
984 return self._parse_json(data[data.find('{'):data.rfind('}
') + 1], *args, **kwargs)
986 def __create_download_methods(name, parser, note, errnote, return_value):
988 def parse(ie, content, *args, errnote=errnote, **kwargs):
992 kwargs['errnote
'] = errnote
993 # parser is fetched by name so subclasses can override it
994 return getattr(ie, parser)(content, *args, **kwargs)
996 def download_handle(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
997 fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
998 res = self._download_webpage_handle(
999 url_or_request, video_id, note=note, errnote=errnote, fatal=fatal, encoding=encoding,
1000 data=data, headers=headers, query=query, expected_status=expected_status)
1004 return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote), urlh
1006 def download_content(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
1007 fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
1008 if self.get_param('load_pages
'):
1009 url_or_request = self._create_request(url_or_request, data, headers, query)
1010 filename = self._request_dump_filename(url_or_request.full_url, video_id)
1011 self.to_screen(f'Loading request
from {filename}
')
1013 with open(filename, 'rb
') as dumpf:
1014 webpage_bytes = dumpf.read()
1015 except OSError as e:
1016 self.report_warning(f'Unable to load request
from disk
: {e}
')
1018 content = self.__decode_webpage(webpage_bytes, encoding, url_or_request.headers)
1019 return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote)
1023 'transform_source
': transform_source,
1025 'encoding
': encoding,
1029 'expected_status
': expected_status,
1032 kwargs.pop('transform_source
')
1033 # The method is fetched by name so subclasses can override _download_..._handle
1034 res = getattr(self, download_handle.__name__)(url_or_request, video_id, **kwargs)
1035 return res if res is False else res[0]
1037 def impersonate(func, name, return_value):
1038 func.__name__, func.__qualname__ = name, f'InfoExtractor
.{name}
'
1040 @param transform_source Apply this transformation before parsing
1041 @returns {return_value}
1043 See _download_webpage_handle docstring for other arguments specification
1046 impersonate(download_handle, f'_download_{name}_handle
', f'({return_value}
, URL handle
)')
1047 impersonate(download_content, f'_download_{name}
', f'{return_value}
')
1048 return download_handle, download_content
1050 _download_xml_handle, _download_xml = __create_download_methods(
1051 'xml
', '_parse_xml
', 'Downloading XML
', 'Unable to download XML
', 'xml
as an xml
.etree
.ElementTree
.Element
')
1052 _download_json_handle, _download_json = __create_download_methods(
1053 'json
', '_parse_json
', 'Downloading JSON metadata
', 'Unable to download JSON metadata
', 'JSON
object as a
dict')
1054 _download_socket_json_handle, _download_socket_json = __create_download_methods(
1055 'socket_json
', '_parse_socket_response_as_json
', 'Polling socket
', 'Unable to poll socket
', 'JSON
object as a
dict')
1056 __download_webpage = __create_download_methods('webpage
', None, None, None, 'data of the page
as a string
')[1]
1058 def _download_webpage(
1059 self, url_or_request, video_id, note=None, errnote=None,
1060 fatal=True, tries=1, timeout=NO_DEFAULT, *args, **kwargs):
1062 Return the data of the page as a string.
1065 tries -- number of tries
1066 timeout -- sleep interval between tries
1068 See _download_webpage_handle docstring for other arguments specification.
1071 R''' # NB: These are unused; should they be deprecated?
1073 self._downloader.deprecation_warning('tries argument
is deprecated
in InfoExtractor
._download
_webpage
')
1074 if timeout is NO_DEFAULT:
1077 self._downloader.deprecation_warning('timeout argument
is deprecated
in InfoExtractor
._download
_webpage
')
1083 return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
1084 except http.client.IncompleteRead as e:
1086 if try_count >= tries:
1088 self._sleep(timeout, video_id)
1090 def report_warning(self, msg, video_id=None, *args, only_once=False, **kwargs):
1091 idstr = format_field(video_id, None, '%s: ')
1092 msg = f'[{self.IE_NAME}
] {idstr}{msg}
'
1094 if f'WARNING
: {msg}
' in self._printed_messages:
1096 self._printed_messages.add(f'WARNING
: {msg}
')
1097 self._downloader.report_warning(msg, *args, **kwargs)
1099 def to_screen(self, msg, *args, **kwargs):
1100 """Print msg to screen, prefixing it with '[ie_name
]'"""
1101 self._downloader.to_screen(f'[{self.IE_NAME}
] {msg}
', *args, **kwargs)
1103 def write_debug(self, msg, *args, **kwargs):
1104 self._downloader.write_debug(f'[{self.IE_NAME}
] {msg}
', *args, **kwargs)
1106 def get_param(self, name, default=None, *args, **kwargs):
1107 if self._downloader:
1108 return self._downloader.params.get(name, default, *args, **kwargs)
1111 def report_drm(self, video_id, partial=NO_DEFAULT):
1112 if partial is not NO_DEFAULT:
1113 self._downloader.deprecation_warning('InfoExtractor
.report_drm no longer accepts the argument partial
')
1114 self.raise_no_formats('This video
is DRM protected
', expected=True, video_id=video_id)
1116 def report_extraction(self, id_or_name):
1117 """Report information extraction."""
1118 self.to_screen('%s: Extracting information
' % id_or_name)
1120 def report_download_webpage(self, video_id):
1121 """Report webpage download."""
1122 self.to_screen('%s: Downloading webpage
' % video_id)
1124 def report_age_confirmation(self):
1125 """Report attempt to confirm age."""
1126 self.to_screen('Confirming age
')
1128 def report_login(self):
1129 """Report attempt to log in."""
1130 self.to_screen('Logging
in')
1132 def raise_login_required(
1133 self, msg='This video
is only available
for registered users
',
1134 metadata_available=False, method=NO_DEFAULT):
1135 if metadata_available and (
1136 self.get_param('ignore_no_formats_error
') or self.get_param('wait_for_video
')):
1137 self.report_warning(msg)
1139 msg += format_field(self._login_hint(method), None, '. %s')
1140 raise ExtractorError(msg, expected=True)
1142 def raise_geo_restricted(
1143 self, msg='This video
is not available
from your location due to geo restriction
',
1144 countries=None, metadata_available=False):
1145 if metadata_available and (
1146 self.get_param('ignore_no_formats_error
') or self.get_param('wait_for_video
')):
1147 self.report_warning(msg)
1149 raise GeoRestrictedError(msg, countries=countries)
1151 def raise_no_formats(self, msg, expected=False, video_id=None):
1153 self.get_param('ignore_no_formats_error
') or self.get_param('wait_for_video
')):
1154 self.report_warning(msg, video_id)
1155 elif isinstance(msg, ExtractorError):
1158 raise ExtractorError(msg, expected=expected, video_id=video_id)
1160 # Methods for following #608
1162 def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs):
1163 """Returns a URL that points to a page that should be processed"""
1165 kwargs['ie_key
'] = ie if isinstance(ie, str) else ie.ie_key()
1166 if video_id is not None:
1167 kwargs['id'] = video_id
1168 if video_title is not None:
1169 kwargs['title
'] = video_title
1172 '_type
': 'url_transparent
' if url_transparent else 'url
',
1177 def playlist_from_matches(cls, matches, playlist_id=None, playlist_title=None,
1178 getter=IDENTITY, ie=None, video_kwargs=None, **kwargs):
1179 return cls.playlist_result(
1180 (cls.url_result(m, ie, **(video_kwargs or {})) for m in orderedSet(map(getter, matches), lazy=True)),
1181 playlist_id, playlist_title, **kwargs)
1184 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, *, multi_video=False, **kwargs):
1185 """Returns a playlist"""
1187 kwargs['id'] = playlist_id
1189 kwargs['title
'] = playlist_title
1190 if playlist_description is not None:
1191 kwargs['description
'] = playlist_description
1194 '_type
': 'multi_video
' if multi_video else 'playlist
',
1198 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
1200 Perform a regex search on the given string, using a single or a list of
1201 patterns returning the first matching group.
1202 In case of failure return a default value or raise a WARNING or a
1203 RegexNotFoundError, depending on fatal, specifying the field name.
1207 elif isinstance(pattern, (str, re.Pattern)):
1208 mobj = re.search(pattern, string, flags)
1211 mobj = re.search(p, string, flags)
1215 _name = self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
1219 # return the first matching group
1220 return next(g for g in mobj.groups() if g is not None)
1221 elif isinstance(group, (list, tuple)):
1222 return tuple(mobj.group(g) for g in group)
1224 return mobj.group(group)
1225 elif default is not NO_DEFAULT:
1228 raise RegexNotFoundError('Unable to extract
%s' % _name)
1230 self.report_warning('unable to extract
%s' % _name + bug_reports_message())
1233 def _search_json(self, start_pattern, string, name, video_id, *, end_pattern='',
1234 contains_pattern=r'{(?s:.+)}
', fatal=True, default=NO_DEFAULT, **kwargs):
1235 """Searches string for the JSON object specified by start_pattern"""
1236 # NB: end_pattern is only used to reduce the size of the initial match
1237 if default is NO_DEFAULT:
1238 default, has_default = {}, False
1240 fatal, has_default = False, True
1242 json_string = self._search_regex(
1243 rf'(?
:{start_pattern}
)\s
*(?P
<json
>{contains_pattern}
)\s
*(?
:{end_pattern}
)',
1244 string, name, group='json
', fatal=fatal, default=None if has_default else NO_DEFAULT)
1248 _name = self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
1250 return self._parse_json(json_string, video_id, ignore_extra=True, **kwargs)
1251 except ExtractorError as e:
1253 raise ExtractorError(
1254 f'Unable to extract {_name}
- Failed to parse JSON
', cause=e.cause, video_id=video_id)
1255 elif not has_default:
1256 self.report_warning(
1257 f'Unable to extract {_name}
- Failed to parse JSON
: {e}
', video_id=video_id)
1260 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
1262 Like _search_regex, but strips HTML tags and unescapes entities.
1264 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
1266 return clean_html(res).strip()
1270 def _get_netrc_login_info(self, netrc_machine=None):
1273 netrc_machine = netrc_machine or self._NETRC_MACHINE
1275 if self.get_param('usenetrc
', False):
1277 netrc_file = compat_expanduser(self.get_param('netrc_location
') or '~
')
1278 if os.path.isdir(netrc_file):
1279 netrc_file = os.path.join(netrc_file, '.netrc
')
1280 info = netrc.netrc(file=netrc_file).authenticators(netrc_machine)
1281 if info is not None:
1285 raise netrc.NetrcParseError(
1286 'No authenticators
for %s' % netrc_machine)
1287 except (OSError, netrc.NetrcParseError) as err:
1288 self.report_warning(
1289 'parsing
.netrc
: %s' % error_to_compat_str(err))
1291 return username, password
1293 def _get_login_info(self, username_option='username
', password_option='password
', netrc_machine=None):
1295 Get the login info as (username, password)
1296 First look for the manually specified credentials using username_option
1297 and password_option as keys in params dictionary. If no such credentials
1298 available look in the netrc file using the netrc_machine or _NETRC_MACHINE
1300 If there's no info available
, return (None, None)
1303 # Attempt to use provided username and password or .netrc data
1304 username = self.get_param(username_option)
1305 if username is not None:
1306 password = self.get_param(password_option)
1308 username, password = self._get_netrc_login_info(netrc_machine)
1310 return username, password
1312 def _get_tfa_info(self, note='two-factor verification code'):
1314 Get the two
-factor authentication info
1315 TODO
- asking the user will be required
for sms
/phone verify
1316 currently just uses the command line option
1317 If there
's no info available, return None
1320 tfa = self.get_param('twofactor
')
1324 return getpass.getpass('Type
%s and press
[Return
]: ' % note)
1326 # Helper functions for extracting OpenGraph info
1328 def _og_regexes(prop):
1329 content_re = r'content
=(?
:"([^"]+?
)"|\'([^\']+?)\'|\s*([^\s"\'=<>`
]+?
))'
1330 property_re = (r'(?
:name|
property)=(?
:\'og
%(sep)s%(prop)s\'|
"og%(sep)s%(prop)s"|\s
*og
%(sep)s%(prop)s\b)'
1331 % {'prop': re.escape(prop), 'sep': '(?::|[:-])'})
1332 template = r'<meta
[^
>]+?
%s[^
>]+?
%s'
1334 template % (property_re, content_re),
1335 template % (content_re, property_re),
1339 def _meta_regex(prop):
1340 return r'''(?isx)<meta
1341 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
1342 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
1344 def _og_search_property(self, prop, html, name=None, **kargs):
1345 prop = variadic(prop)
1347 name = 'OpenGraph
%s' % prop[0]
1350 og_regexes.extend(self._og_regexes(p))
1351 escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
1354 return unescapeHTML(escaped)
1356 def _og_search_thumbnail(self, html, **kargs):
1357 return self._og_search_property('image
', html, 'thumbnail URL
', fatal=False, **kargs)
1359 def _og_search_description(self, html, **kargs):
1360 return self._og_search_property('description
', html, fatal=False, **kargs)
1362 def _og_search_title(self, html, *, fatal=False, **kargs):
1363 return self._og_search_property('title
', html, fatal=fatal, **kargs)
1365 def _og_search_video_url(self, html, name='video url
', secure=True, **kargs):
1366 regexes = self._og_regexes('video
') + self._og_regexes('video
:url
')
1368 regexes = self._og_regexes('video
:secure_url
') + regexes
1369 return self._html_search_regex(regexes, html, name, **kargs)
1371 def _og_search_url(self, html, **kargs):
1372 return self._og_search_property('url
', html, **kargs)
1374 def _html_extract_title(self, html, name='title
', *, fatal=False, **kwargs):
1375 return self._html_search_regex(r'(?s
)<title
\b[^
>]*>([^
<]+)</title
>', html, name, fatal=fatal, **kwargs)
1377 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
1378 name = variadic(name)
1379 if display_name is None:
1380 display_name = name[0]
1381 return self._html_search_regex(
1382 [self._meta_regex(n) for n in name],
1383 html, display_name, fatal=fatal, group='content
', **kwargs)
1385 def _dc_search_uploader(self, html):
1386 return self._html_search_meta('dc
.creator
', html, 'uploader
')
1389 def _rta_search(html):
1390 # See http://www.rtalabel.org/index.php?content=howtofaq#single
1391 if re.search(r'(?ix
)<meta\s
+name
="rating"\s
+'
1392 r' content
="RTA-5042-1996-1400-1577-RTA"',
1396 # And then there are the jokers who advertise that they use RTA, but actually don't
.
1397 AGE_LIMIT_MARKERS
= [
1398 r
'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
1400 if any(re
.search(marker
, html
) for marker
in AGE_LIMIT_MARKERS
):
1404 def _media_rating_search(self
, html
):
1405 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
1406 rating
= self
._html
_search
_meta
('rating', html
)
1418 return RATING_TABLE
.get(rating
.lower())
1420 def _family_friendly_search(self
, html
):
1421 # See http://schema.org/VideoObject
1422 family_friendly
= self
._html
_search
_meta
(
1423 'isFamilyFriendly', html
, default
=None)
1425 if not family_friendly
:
1434 return RATING_TABLE
.get(family_friendly
.lower())
1436 def _twitter_search_player(self
, html
):
1437 return self
._html
_search
_meta
('twitter:player', html
,
1438 'twitter card player')
1440 def _yield_json_ld(self
, html
, video_id
, *, fatal
=True, default
=NO_DEFAULT
):
1441 """Yield all json ld objects in the html"""
1442 if default
is not NO_DEFAULT
:
1444 for mobj
in re
.finditer(JSON_LD_RE
, html
):
1445 json_ld_item
= self
._parse
_json
(mobj
.group('json_ld'), video_id
, fatal
=fatal
)
1446 for json_ld
in variadic(json_ld_item
):
1447 if isinstance(json_ld
, dict):
1450 def _search_json_ld(self
, html
, video_id
, expected_type
=None, *, fatal
=True, default
=NO_DEFAULT
):
1451 """Search for a video in any json ld in the html"""
1452 if default
is not NO_DEFAULT
:
1454 info
= self
._json
_ld
(
1455 list(self
._yield
_json
_ld
(html
, video_id
, fatal
=fatal
, default
=default
)),
1456 video_id
, fatal
=fatal
, expected_type
=expected_type
)
1459 if default
is not NO_DEFAULT
:
1462 raise RegexNotFoundError('Unable to extract JSON-LD')
1464 self
.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
1467 def _json_ld(self
, json_ld
, video_id
, fatal
=True, expected_type
=None):
1468 if isinstance(json_ld
, str):
1469 json_ld
= self
._parse
_json
(json_ld
, video_id
, fatal
=fatal
)
1474 INTERACTION_TYPE_MAP
= {
1475 'CommentAction': 'comment',
1476 'AgreeAction': 'like',
1477 'DisagreeAction': 'dislike',
1478 'LikeAction': 'like',
1479 'DislikeAction': 'dislike',
1480 'ListenAction': 'view',
1481 'WatchAction': 'view',
1482 'ViewAction': 'view',
1485 def is_type(e
, *expected_types
):
1486 type = variadic(traverse_obj(e
, '@type'))
1487 return any(x
in type for x
in expected_types
)
1489 def extract_interaction_type(e
):
1490 interaction_type
= e
.get('interactionType')
1491 if isinstance(interaction_type
, dict):
1492 interaction_type
= interaction_type
.get('@type')
1493 return str_or_none(interaction_type
)
1495 def extract_interaction_statistic(e
):
1496 interaction_statistic
= e
.get('interactionStatistic')
1497 if isinstance(interaction_statistic
, dict):
1498 interaction_statistic
= [interaction_statistic
]
1499 if not isinstance(interaction_statistic
, list):
1501 for is_e
in interaction_statistic
:
1502 if not is_type(is_e
, 'InteractionCounter'):
1504 interaction_type
= extract_interaction_type(is_e
)
1505 if not interaction_type
:
1507 # For interaction count some sites provide string instead of
1508 # an integer (as per spec) with non digit characters (e.g. ",")
1509 # so extracting count with more relaxed str_to_int
1510 interaction_count
= str_to_int(is_e
.get('userInteractionCount'))
1511 if interaction_count
is None:
1513 count_kind
= INTERACTION_TYPE_MAP
.get(interaction_type
.split('/')[-1])
1516 count_key
= '%s_count' % count_kind
1517 if info
.get(count_key
) is not None:
1519 info
[count_key
] = interaction_count
1521 def extract_chapter_information(e
):
1523 'title': part
.get('name'),
1524 'start_time': part
.get('startOffset'),
1525 'end_time': part
.get('endOffset'),
1526 } for part
in variadic(e
.get('hasPart') or []) if part
.get('@type') == 'Clip']
1527 for idx
, (last_c
, current_c
, next_c
) in enumerate(zip(
1528 [{'end_time': 0}
] + chapters
, chapters
, chapters
[1:])):
1529 current_c
['end_time'] = current_c
['end_time'] or next_c
['start_time']
1530 current_c
['start_time'] = current_c
['start_time'] or last_c
['end_time']
1531 if None in current_c
.values():
1532 self
.report_warning(f
'Chapter {idx} contains broken data. Not extracting chapters')
1535 chapters
[-1]['end_time'] = chapters
[-1]['end_time'] or info
['duration']
1536 info
['chapters'] = chapters
1538 def extract_video_object(e
):
1539 author
= e
.get('author')
1541 'url': url_or_none(e
.get('contentUrl')),
1542 'ext': mimetype2ext(e
.get('encodingFormat')),
1543 'title': unescapeHTML(e
.get('name')),
1544 'description': unescapeHTML(e
.get('description')),
1545 'thumbnails': [{'url': unescapeHTML(url)}
1546 for url
in variadic(traverse_obj(e
, 'thumbnailUrl', 'thumbnailURL'))
1547 if url_or_none(url
)],
1548 'duration': parse_duration(e
.get('duration')),
1549 'timestamp': unified_timestamp(e
.get('uploadDate')),
1550 # author can be an instance of 'Organization' or 'Person' types.
1551 # both types can have 'name' property(inherited from 'Thing' type). [1]
1552 # however some websites are using 'Text' type instead.
1553 # 1. https://schema.org/VideoObject
1554 'uploader': author
.get('name') if isinstance(author
, dict) else author
if isinstance(author
, str) else None,
1555 'artist': traverse_obj(e
, ('byArtist', 'name'), expected_type
=str),
1556 'filesize': int_or_none(float_or_none(e
.get('contentSize'))),
1557 'tbr': int_or_none(e
.get('bitrate')),
1558 'width': int_or_none(e
.get('width')),
1559 'height': int_or_none(e
.get('height')),
1560 'view_count': int_or_none(e
.get('interactionCount')),
1561 'tags': try_call(lambda: e
.get('keywords').split(',')),
1563 if is_type(e
, 'AudioObject'):
1566 'abr': int_or_none(e
.get('bitrate')),
1568 extract_interaction_statistic(e
)
1569 extract_chapter_information(e
)
1571 def traverse_json_ld(json_ld
, at_top_level
=True):
1572 for e
in variadic(json_ld
):
1573 if not isinstance(e
, dict):
1575 if at_top_level
and '@context' not in e
:
1577 if at_top_level
and set(e
.keys()) == {'@context', '@graph'}
:
1578 traverse_json_ld(e
['@graph'], at_top_level
=False)
1580 if expected_type
is not None and not is_type(e
, expected_type
):
1582 rating
= traverse_obj(e
, ('aggregateRating', 'ratingValue'), expected_type
=float_or_none
)
1583 if rating
is not None:
1584 info
['average_rating'] = rating
1585 if is_type(e
, 'TVEpisode', 'Episode'):
1586 episode_name
= unescapeHTML(e
.get('name'))
1588 'episode': episode_name
,
1589 'episode_number': int_or_none(e
.get('episodeNumber')),
1590 'description': unescapeHTML(e
.get('description')),
1592 if not info
.get('title') and episode_name
:
1593 info
['title'] = episode_name
1594 part_of_season
= e
.get('partOfSeason')
1595 if is_type(part_of_season
, 'TVSeason', 'Season', 'CreativeWorkSeason'):
1597 'season': unescapeHTML(part_of_season
.get('name')),
1598 'season_number': int_or_none(part_of_season
.get('seasonNumber')),
1600 part_of_series
= e
.get('partOfSeries') or e
.get('partOfTVSeries')
1601 if is_type(part_of_series
, 'TVSeries', 'Series', 'CreativeWorkSeries'):
1602 info
['series'] = unescapeHTML(part_of_series
.get('name'))
1603 elif is_type(e
, 'Movie'):
1605 'title': unescapeHTML(e
.get('name')),
1606 'description': unescapeHTML(e
.get('description')),
1607 'duration': parse_duration(e
.get('duration')),
1608 'timestamp': unified_timestamp(e
.get('dateCreated')),
1610 elif is_type(e
, 'Article', 'NewsArticle'):
1612 'timestamp': parse_iso8601(e
.get('datePublished')),
1613 'title': unescapeHTML(e
.get('headline')),
1614 'description': unescapeHTML(e
.get('articleBody') or e
.get('description')),
1616 if is_type(traverse_obj(e
, ('video', 0)), 'VideoObject'):
1617 extract_video_object(e
['video'][0])
1618 elif is_type(traverse_obj(e
, ('subjectOf', 0)), 'VideoObject'):
1619 extract_video_object(e
['subjectOf'][0])
1620 elif is_type(e
, 'VideoObject', 'AudioObject'):
1621 extract_video_object(e
)
1622 if expected_type
is None:
1626 video
= e
.get('video')
1627 if is_type(video
, 'VideoObject'):
1628 extract_video_object(video
)
1629 if expected_type
is None:
1634 traverse_json_ld(json_ld
)
1635 return filter_dict(info
)
1637 def _search_nextjs_data(self
, webpage
, video_id
, *, transform_source
=None, fatal
=True, **kw
):
1638 return self
._parse
_json
(
1640 r
'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^
>]*>([^
<]+)</script
>',
1641 webpage, 'next
.js data
', fatal=fatal, **kw),
1642 video_id, transform_source=transform_source, fatal=fatal)
1644 def _search_nuxt_data(self, webpage, video_id, context_name='__NUXT__
', *, fatal=True, traverse=('data
', 0)):
1645 """Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function"""
1646 rectx = re.escape(context_name)
1647 FUNCTION_RE = r'\
(function\
((?P
<arg_keys
>.*?
)\
){return\s+(?P<js>{.*?}
)\s
*;?\s
*}\
((?P
<arg_vals
>.*?
)\
)'
1648 js, arg_keys, arg_vals = self._search_regex(
1649 (rf'<script
>\s
*window\
.{rectx}
={FUNCTION_RE}\s
*\
)\s
*;?\s
*</script
>', rf'{rectx}\
(.*?{FUNCTION_RE}
'),
1650 webpage, context_name, group=('js
', 'arg_keys
', 'arg_vals
'),
1651 default=NO_DEFAULT if fatal else (None, None, None))
1655 args = dict(zip(arg_keys.split(','), arg_vals.split(',')))
1657 for key, val in args.items():
1658 if val in ('undefined
', 'void
0'):
1661 ret = self._parse_json(js, video_id, transform_source=functools.partial(js_to_json, vars=args), fatal=fatal)
1662 return traverse_obj(ret, traverse) or {}
1665 def _hidden_inputs(html):
1666 html = re.sub(r'<!--(?
:(?
!<!--).)*-->', '', html)
1668 for input in re.findall(r'(?i
)(<input[^
>]+>)', html):
1669 attrs = extract_attributes(input)
1672 if attrs.get('type') not in ('hidden
', 'submit
'):
1674 name = attrs.get('name
') or attrs.get('id')
1675 value = attrs.get('value
')
1676 if name and value is not None:
1677 hidden_inputs[name] = value
1678 return hidden_inputs
1680 def _form_hidden_inputs(self, form_id, html):
1681 form = self._search_regex(
1682 r'(?
is)<form
[^
>]+?
id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
1683 html, '%s form' % form_id, group='form')
1684 return self._hidden_inputs(form)
1686 @classproperty(cache=True)
1687 def FormatSort(cls):
1688 class FormatSort(FormatSorter):
1689 def __init__(ie, *args, **kwargs):
1690 super().__init__(ie._downloader, *args, **kwargs)
1692 deprecation_warning(
1693 'yt_dlp.InfoExtractor.FormatSort is deprecated and may be removed in the future. '
1694 'Use yt_dlp.utils.FormatSorter instead')
1697 def _sort_formats(self, formats, field_preference=[]):
1698 if not field_preference:
1699 self._downloader.deprecation_warning(
1700 'yt_dlp.InfoExtractor._sort_formats is deprecated and is no longer required')
1702 self._downloader.deprecation_warning(
1703 'yt_dlp.InfoExtractor._sort_formats is deprecated and no longer works as expected. '
1704 'Return _format_sort_fields in the info_dict instead')
1706 formats[0]['__sort_fields'] = field_preference
1708 def _check_formats(self, formats, video_id):
1710 formats[:] = filter(
1711 lambda f: self._is_valid_url(
1713 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
1717 def _remove_duplicate_formats(formats):
1721 if f['url'] not in format_urls:
1722 format_urls.add(f['url'])
1723 unique_formats.append(f)
1724 formats[:] = unique_formats
1726 def _is_valid_url(self, url, video_id, item='video', headers={}):
1727 url = self._proto_relative_url(url, scheme='http:')
1728 # For now assume non HTTP(S) URLs always valid
1729 if not (url.startswith('http://') or url.startswith('https://')):
1732 self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
1734 except ExtractorError as e:
1736 '%s: %s URL is invalid, skipping: %s'
1737 % (video_id, item, error_to_compat_str(e.cause)))
1740 def http_scheme(self):
1741 """ Either "http
:" or "https
:", depending on the user's preferences """
1744 if self.get_param('prefer_insecure', False)
1747 def _proto_relative_url(self, url, scheme=None):
1748 scheme = scheme or self.http_scheme()
1749 assert scheme.endswith(':')
1750 return sanitize_url(url, scheme=scheme[:-1])
1752 def _sleep(self, timeout, video_id, msg_template=None):
1753 if msg_template is None:
1754 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
1755 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
1759 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
1760 transform_source=lambda s: fix_xml_ampersands(s).strip(),
1761 fatal=True, m3u8_id=None, data=None, headers={}, query={}):
1762 res = self._download_xml_handle(
1763 manifest_url, video_id, 'Downloading f4m manifest',
1764 'Unable to download f4m manifest',
1765 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
1766 # (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
1767 transform_source=transform_source,
1768 fatal=fatal, data=data, headers=headers, query=query)
1772 manifest, urlh = res
1773 manifest_url = urlh.geturl()
1775 return self._parse_f4m_formats(
1776 manifest, manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
1777 transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
1779 def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
1780 transform_source=lambda s: fix_xml_ampersands(s).strip(),
1781 fatal=True, m3u8_id=None):
1782 if not isinstance(manifest, xml.etree.ElementTree.Element) and not fatal:
1785 # currently yt-dlp cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
1786 akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
1787 if akamai_pv is not None and ';' in akamai_pv.text:
1788 playerVerificationChallenge = akamai_pv.text.split(';')[0]
1789 if playerVerificationChallenge.strip() != '':
1793 manifest_version = '1.0'
1794 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
1796 manifest_version = '2.0'
1797 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
1798 # Remove unsupported DRM protected media from final formats
1799 # rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
1800 media_nodes = remove_encrypted_media(media_nodes)
1804 manifest_base_url = get_base_url(manifest)
1806 bootstrap_info = xpath_element(
1807 manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
1808 'bootstrap info', default=None)
1811 mime_type = xpath_text(
1812 manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
1813 'base URL', default=None)
1814 if mime_type and mime_type.startswith('audio/'):
1817 for i, media_el in enumerate(media_nodes):
1818 tbr = int_or_none(media_el.attrib.get('bitrate'))
1819 width = int_or_none(media_el.attrib.get('width'))
1820 height = int_or_none(media_el.attrib.get('height'))
1821 format_id = join_nonempty(f4m_id, tbr or i)
1822 # If <bootstrapInfo> is present, the specified f4m is a
1823 # stream-level manifest, and only set-level manifests may refer to
1824 # external resources. See section 11.4 and section 4 of F4M spec
1825 if bootstrap_info is None:
1827 # @href is introduced in 2.0, see section 11.6 of F4M spec
1828 if manifest_version == '2.0':
1829 media_url = media_el.attrib.get('href')
1830 if media_url is None:
1831 media_url = media_el.attrib.get('url')
1835 media_url if media_url.startswith('http://') or media_url.startswith('https://')
1836 else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
1837 # If media_url is itself a f4m manifest do the recursive extraction
1838 # since bitrates in parent manifest (this one) and media_url manifest
1839 # may differ leading to inability to resolve the format by requested
1840 # bitrate in f4m downloader
1841 ext = determine_ext(manifest_url)
1843 f4m_formats = self._extract_f4m_formats(
1844 manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
1845 transform_source=transform_source, fatal=fatal)
1846 # Sometimes stream-level manifest contains single media entry that
1847 # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
1848 # At the same time parent's media entry in set-level manifest may
1849 # contain it. We will copy it from parent in such cases.
1850 if len(f4m_formats) == 1:
1853 'tbr': f.get('tbr') or tbr,
1854 'width': f.get('width') or width,
1855 'height': f.get('height') or height,
1856 'format_id': f.get('format_id') if not tbr else format_id,
1859 formats.extend(f4m_formats)
1862 formats.extend(self._extract_m3u8_formats(
1863 manifest_url, video_id, 'mp4', preference=preference,
1864 quality=quality, m3u8_id=m3u8_id, fatal=fatal))
1867 'format_id': format_id,
1868 'url': manifest_url,
1869 'manifest_url': manifest_url,
1870 'ext': 'flv' if bootstrap_info is not None else None,
1876 'preference': preference,
1881 def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, quality=None, m3u8_id=None):
1883 'format_id': join_nonempty(m3u8_id, 'meta'),
1887 'preference': preference - 100 if preference else -100,
1889 'resolution': 'multiple',
1890 'format_note': 'Quality selection URL',
1893 def _report_ignoring_subs(self, name):
1894 self.report_warning(bug_reports_message(
1895 f'Ignoring subtitle tracks found in the {name} manifest; '
1896 'if any subtitle tracks are missing,'
1899 def _extract_m3u8_formats(self, *args, **kwargs):
1900 fmts, subs = self._extract_m3u8_formats_and_subtitles(*args, **kwargs)
1902 self._report_ignoring_subs('HLS')
1905 def _extract_m3u8_formats_and_subtitles(
1906 self, m3u8_url, video_id, ext=None, entry_protocol='m3u8_native',
1907 preference=None, quality=None, m3u8_id=None, note=None,
1908 errnote=None, fatal=True, live=False, data=None, headers={},
1912 if errnote is not False:
1913 errnote = errnote or 'Failed to obtain m3u8 URL'
1915 raise ExtractorError(errnote, video_id=video_id)
1916 self.report_warning(f'{errnote}{bug_reports_message()}')
1919 res = self._download_webpage_handle(
1921 note='Downloading m3u8 information' if note is None else note,
1922 errnote='Failed to download m3u8 information' if errnote is None else errnote,
1923 fatal=fatal, data=data, headers=headers, query=query)
1928 m3u8_doc, urlh = res
1929 m3u8_url = urlh.geturl()
1931 return self._parse_m3u8_formats_and_subtitles(
1932 m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
1933 preference=preference, quality=quality, m3u8_id=m3u8_id,
1934 note=note, errnote=errnote, fatal=fatal, live=live, data=data,
1935 headers=headers, query=query, video_id=video_id)
1937 def _parse_m3u8_formats_and_subtitles(
1938 self, m3u8_doc, m3u8_url=None, ext=None, entry_protocol='m3u8_native',
1939 preference=None, quality=None, m3u8_id=None, live=False, note=None,
1940 errnote=None, fatal=True, data=None, headers={}, query={},
1942 formats, subtitles = [], {}
1944 has_drm = re.search('|'.join([
1945 r'#EXT-X-FAXS-CM:', # Adobe Flash Access
1946 r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd
://', # Apple FairPlay
1949 def format_url(url):
1950 return url if re.match(r'^https?
://', url) else urllib.parse.urljoin(m3u8_url, url)
1952 if self.get_param('hls_split_discontinuity
', False):
1953 def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None):
1955 if not manifest_url:
1957 m3u8_doc = self._download_webpage(
1958 manifest_url, video_id, fatal=fatal, data=data, headers=headers,
1959 note=False, errnote='Failed to download m3u8 playlist information
')
1960 if m3u8_doc is False:
1962 return range(1 + sum(line.startswith('#EXT-X-DISCONTINUITY') for line in m3u8_doc.splitlines()))
1965 def _extract_m3u8_playlist_indices(*args
, **kwargs
):
1969 # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
1970 # 2. https://github.com/ytdl-org/youtube-dl/issues/12211
1971 # 3. https://github.com/ytdl-org/youtube-dl/issues/18923
1973 # We should try extracting formats only from master playlists [1, 4.3.4],
1974 # i.e. playlists that describe available qualities. On the other hand
1975 # media playlists [1, 4.3.3] should be returned as is since they contain
1976 # just the media without qualities renditions.
1977 # Fortunately, master playlist can be easily distinguished from media
1978 # playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
1979 # master playlist tags MUST NOT appear in a media playlist and vice versa.
1980 # As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
1981 # media playlist and MUST NOT appear in master playlist thus we can
1982 # clearly detect media playlist with this criterion.
1984 if '#EXT-X-TARGETDURATION' in m3u8_doc
: # media playlist, return as is
1986 'format_id': join_nonempty(m3u8_id
, idx
),
1987 'format_index': idx
,
1988 'url': m3u8_url
or encode_data_uri(m3u8_doc
.encode('utf-8'), 'application/x-mpegurl'),
1990 'protocol': entry_protocol
,
1991 'preference': preference
,
1994 } for idx
in _extract_m3u8_playlist_indices(m3u8_doc
=m3u8_doc
)]
1996 return formats
, subtitles
1999 last_stream_inf
= {}
2001 def extract_media(x_media_line
):
2002 media
= parse_m3u8_attributes(x_media_line
)
2003 # As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
2004 media_type
, group_id
, name
= media
.get('TYPE'), media
.get('GROUP-ID'), media
.get('NAME')
2005 if not (media_type
and group_id
and name
):
2007 groups
.setdefault(group_id
, []).append(media
)
2008 # <https://tools.ietf.org/html/rfc8216#section-4.3.4.1>
2009 if media_type
== 'SUBTITLES':
2010 # According to RFC 8216 §4.3.4.2.1, URI is REQUIRED in the
2011 # EXT-X-MEDIA tag if the media type is SUBTITLES.
2012 # However, lack of URI has been spotted in the wild.
2013 # e.g. NebulaIE; see https://github.com/yt-dlp/yt-dlp/issues/339
2014 if not media
.get('URI'):
2016 url
= format_url(media
['URI'])
2019 'ext': determine_ext(url
),
2021 if sub_info
['ext'] == 'm3u8':
2022 # Per RFC 8216 §3.1, the only possible subtitle format m3u8
2023 # files may contain is WebVTT:
2024 # <https://tools.ietf.org/html/rfc8216#section-3.1>
2025 sub_info
['ext'] = 'vtt'
2026 sub_info
['protocol'] = 'm3u8_native'
2027 lang
= media
.get('LANGUAGE') or 'und'
2028 subtitles
.setdefault(lang
, []).append(sub_info
)
2029 if media_type
not in ('VIDEO', 'AUDIO'):
2031 media_url
= media
.get('URI')
2033 manifest_url
= format_url(media_url
)
2035 'format_id': join_nonempty(m3u8_id
, group_id
, name
, idx
),
2036 'format_note': name
,
2037 'format_index': idx
,
2038 'url': manifest_url
,
2039 'manifest_url': m3u8_url
,
2040 'language': media
.get('LANGUAGE'),
2042 'protocol': entry_protocol
,
2043 'preference': preference
,
2045 'vcodec': 'none' if media_type
== 'AUDIO' else None,
2046 } for idx
in _extract_m3u8_playlist_indices(manifest_url
))
2048 def build_stream_name():
2049 # Despite specification does not mention NAME attribute for
2050 # EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
2051 # or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
2052 # 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
2053 stream_name
= last_stream_inf
.get('NAME')
2056 # If there is no NAME in EXT-X-STREAM-INF it will be obtained
2057 # from corresponding rendition group
2058 stream_group_id
= last_stream_inf
.get('VIDEO')
2059 if not stream_group_id
:
2061 stream_group
= groups
.get(stream_group_id
)
2062 if not stream_group
:
2063 return stream_group_id
2064 rendition
= stream_group
[0]
2065 return rendition
.get('NAME') or stream_group_id
2067 # parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
2068 # chance to detect video only formats when EXT-X-STREAM-INF tags
2069 # precede EXT-X-MEDIA tags in HLS manifest such as [3].
2070 for line
in m3u8_doc
.splitlines():
2071 if line
.startswith('#EXT-X-MEDIA:'):
2074 for line
in m3u8_doc
.splitlines():
2075 if line
.startswith('#EXT-X-STREAM-INF:'):
2076 last_stream_inf
= parse_m3u8_attributes(line
)
2077 elif line
.startswith('#') or not line
.strip():
2080 tbr
= float_or_none(
2081 last_stream_inf
.get('AVERAGE-BANDWIDTH')
2082 or last_stream_inf
.get('BANDWIDTH'), scale
=1000)
2083 manifest_url
= format_url(line
.strip())
2085 for idx
in _extract_m3u8_playlist_indices(manifest_url
):
2086 format_id
= [m3u8_id
, None, idx
]
2087 # Bandwidth of live streams may differ over time thus making
2088 # format_id unpredictable. So it's better to keep provided
2091 stream_name
= build_stream_name()
2092 format_id
[1] = stream_name
or '%d' % (tbr
or len(formats
))
2094 'format_id': join_nonempty(*format_id
),
2095 'format_index': idx
,
2096 'url': manifest_url
,
2097 'manifest_url': m3u8_url
,
2100 'fps': float_or_none(last_stream_inf
.get('FRAME-RATE')),
2101 'protocol': entry_protocol
,
2102 'preference': preference
,
2105 resolution
= last_stream_inf
.get('RESOLUTION')
2107 mobj
= re
.search(r
'(?P<width>\d+)[xX](?P<height>\d+)', resolution
)
2109 f
['width'] = int(mobj
.group('width'))
2110 f
['height'] = int(mobj
.group('height'))
2111 # Unified Streaming Platform
2113 r
'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f
['url'])
2115 abr
, vbr
= mobj
.groups()
2116 abr
, vbr
= float_or_none(abr
, 1000), float_or_none(vbr
, 1000)
2121 codecs
= parse_codecs(last_stream_inf
.get('CODECS'))
2123 audio_group_id
= last_stream_inf
.get('AUDIO')
2124 # As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
2125 # references a rendition group MUST have a CODECS attribute.
2126 # However, this is not always respected. E.g. [2]
2127 # contains EXT-X-STREAM-INF tag which references AUDIO
2128 # rendition group but does not have CODECS and despite
2129 # referencing an audio group it represents a complete
2130 # (with audio and video) format. So, for such cases we will
2131 # ignore references to rendition groups and treat them
2132 # as complete formats.
2133 if audio_group_id
and codecs
and f
.get('vcodec') != 'none':
2134 audio_group
= groups
.get(audio_group_id
)
2135 if audio_group
and audio_group
[0].get('URI'):
2136 # TODO: update acodec for audio only formats with
2138 f
['acodec'] = 'none'
2139 if not f
.get('ext'):
2140 f
['ext'] = 'm4a' if f
.get('vcodec') == 'none' else 'mp4'
2144 progressive_uri
= last_stream_inf
.get('PROGRESSIVE-URI')
2147 del http_f
['manifest_url']
2149 'format_id': f
['format_id'].replace('hls-', 'http-'),
2151 'url': progressive_uri
,
2153 formats
.append(http_f
)
2155 last_stream_inf
= {}
2156 return formats
, subtitles
2158 def _extract_m3u8_vod_duration(
2159 self
, m3u8_vod_url
, video_id
, note
=None, errnote
=None, data
=None, headers
={}, query={}
):
2161 m3u8_vod
= self
._download
_webpage
(
2162 m3u8_vod_url
, video_id
,
2163 note
='Downloading m3u8 VOD manifest' if note
is None else note
,
2164 errnote
='Failed to download VOD manifest' if errnote
is None else errnote
,
2165 fatal
=False, data
=data
, headers
=headers
, query
=query
)
2167 return self
._parse
_m
3u8_vod
_duration
(m3u8_vod
or '', video_id
)
2169 def _parse_m3u8_vod_duration(self
, m3u8_vod
, video_id
):
2170 if '#EXT-X-PLAYLIST-TYPE:VOD' not in m3u8_vod
:
2174 float(line
[len('#EXTINF:'):].split(',')[0])
2175 for line
in m3u8_vod
.splitlines() if line
.startswith('#EXTINF:'))) or None
2178 def _xpath_ns(path
, namespace
=None):
2182 for c
in path
.split('/'):
2183 if not c
or c
== '.':
2186 out
.append('{%s}%s' % (namespace
, c
))
2187 return '/'.join(out
)
2189 def _extract_smil_formats_and_subtitles(self
, smil_url
, video_id
, fatal
=True, f4m_params
=None, transform_source
=None):
2190 res
= self
._download
_smil
(smil_url
, video_id
, fatal
=fatal
, transform_source
=transform_source
)
2196 smil_url
= urlh
.geturl()
2198 namespace
= self
._parse
_smil
_namespace
(smil
)
2200 fmts
= self
._parse
_smil
_formats
(
2201 smil
, smil_url
, video_id
, namespace
=namespace
, f4m_params
=f4m_params
)
2202 subs
= self
._parse
_smil
_subtitles
(
2203 smil
, namespace
=namespace
)
2207 def _extract_smil_formats(self
, *args
, **kwargs
):
2208 fmts
, subs
= self
._extract
_smil
_formats
_and
_subtitles
(*args
, **kwargs
)
2210 self
._report
_ignoring
_subs
('SMIL')
2213 def _extract_smil_info(self
, smil_url
, video_id
, fatal
=True, f4m_params
=None):
2214 res
= self
._download
_smil
(smil_url
, video_id
, fatal
=fatal
)
2219 smil_url
= urlh
.geturl()
2221 return self
._parse
_smil
(smil
, smil_url
, video_id
, f4m_params
=f4m_params
)
2223 def _download_smil(self
, smil_url
, video_id
, fatal
=True, transform_source
=None):
2224 return self
._download
_xml
_handle
(
2225 smil_url
, video_id
, 'Downloading SMIL file',
2226 'Unable to download SMIL file', fatal
=fatal
, transform_source
=transform_source
)
2228 def _parse_smil(self
, smil
, smil_url
, video_id
, f4m_params
=None):
2229 namespace
= self
._parse
_smil
_namespace
(smil
)
2231 formats
= self
._parse
_smil
_formats
(
2232 smil
, smil_url
, video_id
, namespace
=namespace
, f4m_params
=f4m_params
)
2233 subtitles
= self
._parse
_smil
_subtitles
(smil
, namespace
=namespace
)
2235 video_id
= os
.path
.splitext(url_basename(smil_url
))[0]
2239 for meta
in smil
.findall(self
._xpath
_ns
('./head/meta', namespace
)):
2240 name
= meta
.attrib
.get('name')
2241 content
= meta
.attrib
.get('content')
2242 if not name
or not content
:
2244 if not title
and name
== 'title':
2246 elif not description
and name
in ('description', 'abstract'):
2247 description
= content
2248 elif not upload_date
and name
== 'date':
2249 upload_date
= unified_strdate(content
)
2252 'id': image
.get('type'),
2253 'url': image
.get('src'),
2254 'width': int_or_none(image
.get('width')),
2255 'height': int_or_none(image
.get('height')),
2256 } for image
in smil
.findall(self
._xpath
_ns
('.//image', namespace
)) if image
.get('src')]
2260 'title': title
or video_id
,
2261 'description': description
,
2262 'upload_date': upload_date
,
2263 'thumbnails': thumbnails
,
2265 'subtitles': subtitles
,
2268 def _parse_smil_namespace(self
, smil
):
2269 return self
._search
_regex
(
2270 r
'(?i)^{([^}]+)?}smil$', smil
.tag
, 'namespace', default
=None)
2272 def _parse_smil_formats(self
, smil
, smil_url
, video_id
, namespace
=None, f4m_params
=None, transform_rtmp_url
=None):
2274 for meta
in smil
.findall(self
._xpath
_ns
('./head/meta', namespace
)):
2275 b
= meta
.get('base') or meta
.get('httpBase')
2287 media
= smil
.findall(self
._xpath
_ns
('.//video', namespace
)) + smil
.findall(self
._xpath
_ns
('.//audio', namespace
))
2288 for medium
in media
:
2289 src
= medium
.get('src')
2290 if not src
or src
in srcs
:
2294 bitrate
= float_or_none(medium
.get('system-bitrate') or medium
.get('systemBitrate'), 1000)
2295 filesize
= int_or_none(medium
.get('size') or medium
.get('fileSize'))
2296 width
= int_or_none(medium
.get('width'))
2297 height
= int_or_none(medium
.get('height'))
2298 proto
= medium
.get('proto')
2299 ext
= medium
.get('ext')
2300 src_ext
= determine_ext(src
)
2301 streamer
= medium
.get('streamer') or base
2303 if proto
== 'rtmp' or streamer
.startswith('rtmp'):
2309 'format_id': 'rtmp-%d' % (rtmp_count
if bitrate
is None else bitrate
),
2311 'filesize': filesize
,
2315 if transform_rtmp_url
:
2316 streamer
, src
= transform_rtmp_url(streamer
, src
)
2317 formats
[-1].update({
2323 src_url
= src
if src
.startswith('http') else urllib
.parse
.urljoin(base
, src
)
2324 src_url
= src_url
.strip()
2326 if proto
== 'm3u8' or src_ext
== 'm3u8':
2327 m3u8_formats
= self
._extract
_m
3u8_formats
(
2328 src_url
, video_id
, ext
or 'mp4', m3u8_id
='hls', fatal
=False)
2329 if len(m3u8_formats
) == 1:
2331 m3u8_formats
[0].update({
2332 'format_id': 'hls-%d' % (m3u8_count
if bitrate
is None else bitrate
),
2337 formats
.extend(m3u8_formats
)
2338 elif src_ext
== 'f4m':
2343 'plugin': 'flowplayer-3.2.0.1',
2345 f4m_url
+= '&' if '?' in f4m_url
else '?'
2346 f4m_url
+= urllib
.parse
.urlencode(f4m_params
)
2347 formats
.extend(self
._extract
_f
4m
_formats
(f4m_url
, video_id
, f4m_id
='hds', fatal
=False))
2348 elif src_ext
== 'mpd':
2349 formats
.extend(self
._extract
_mpd
_formats
(
2350 src_url
, video_id
, mpd_id
='dash', fatal
=False))
2351 elif re
.search(r
'\.ism/[Mm]anifest', src_url
):
2352 formats
.extend(self
._extract
_ism
_formats
(
2353 src_url
, video_id
, ism_id
='mss', fatal
=False))
2354 elif src_url
.startswith('http') and self
._is
_valid
_url
(src
, video_id
):
2358 'ext': ext
or src_ext
or 'flv',
2359 'format_id': 'http-%d' % (bitrate
or http_count
),
2361 'filesize': filesize
,
2366 for medium
in smil
.findall(self
._xpath
_ns
('.//imagestream', namespace
)):
2367 src
= medium
.get('src')
2368 if not src
or src
in srcs
:
2374 'format_id': 'imagestream-%d' % (imgs_count
),
2376 'ext': mimetype2ext(medium
.get('type')),
2379 'width': int_or_none(medium
.get('width')),
2380 'height': int_or_none(medium
.get('height')),
2381 'format_note': 'SMIL storyboards',
2386 def _parse_smil_subtitles(self
, smil
, namespace
=None, subtitles_lang
='en'):
2389 for num
, textstream
in enumerate(smil
.findall(self
._xpath
_ns
('.//textstream', namespace
))):
2390 src
= textstream
.get('src')
2391 if not src
or src
in urls
:
2394 ext
= textstream
.get('ext') or mimetype2ext(textstream
.get('type')) or determine_ext(src
)
2395 lang
= textstream
.get('systemLanguage') or textstream
.get('systemLanguageName') or textstream
.get('lang') or subtitles_lang
2396 subtitles
.setdefault(lang
, []).append({
2402 def _extract_xspf_playlist(self
, xspf_url
, playlist_id
, fatal
=True):
2403 res
= self
._download
_xml
_handle
(
2404 xspf_url
, playlist_id
, 'Downloading xpsf playlist',
2405 'Unable to download xspf manifest', fatal
=fatal
)
2410 xspf_url
= urlh
.geturl()
2412 return self
._parse
_xspf
(
2413 xspf
, playlist_id
, xspf_url
=xspf_url
,
2414 xspf_base_url
=base_url(xspf_url
))
2416 def _parse_xspf(self
, xspf_doc
, playlist_id
, xspf_url
=None, xspf_base_url
=None):
2418 'xspf': 'http://xspf.org/ns/0/',
2419 's1': 'http://static.streamone.nl/player/ns/0',
2423 for track
in xspf_doc
.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP
)):
2425 track
, xpath_with_ns('./xspf:title', NS_MAP
), 'title', default
=playlist_id
)
2426 description
= xpath_text(
2427 track
, xpath_with_ns('./xspf:annotation', NS_MAP
), 'description')
2428 thumbnail
= xpath_text(
2429 track
, xpath_with_ns('./xspf:image', NS_MAP
), 'thumbnail')
2430 duration
= float_or_none(
2431 xpath_text(track
, xpath_with_ns('./xspf:duration', NS_MAP
), 'duration'), 1000)
2434 for location
in track
.findall(xpath_with_ns('./xspf:location', NS_MAP
)):
2435 format_url
= urljoin(xspf_base_url
, location
.text
)
2440 'manifest_url': xspf_url
,
2441 'format_id': location
.get(xpath_with_ns('s1:label', NS_MAP
)),
2442 'width': int_or_none(location
.get(xpath_with_ns('s1:width', NS_MAP
))),
2443 'height': int_or_none(location
.get(xpath_with_ns('s1:height', NS_MAP
))),
2449 'description': description
,
2450 'thumbnail': thumbnail
,
2451 'duration': duration
,
2456 def _extract_mpd_formats(self
, *args
, **kwargs
):
2457 fmts
, subs
= self
._extract
_mpd
_formats
_and
_subtitles
(*args
, **kwargs
)
2459 self
._report
_ignoring
_subs
('DASH')
2462 def _extract_mpd_formats_and_subtitles(
2463 self
, mpd_url
, video_id
, mpd_id
=None, note
=None, errnote
=None,
2464 fatal
=True, data
=None, headers
={}, query={}
):
2465 res
= self
._download
_xml
_handle
(
2467 note
='Downloading MPD manifest' if note
is None else note
,
2468 errnote
='Failed to download MPD manifest' if errnote
is None else errnote
,
2469 fatal
=fatal
, data
=data
, headers
=headers
, query
=query
)
2476 # We could have been redirected to a new url when we retrieved our mpd file.
2477 mpd_url
= urlh
.geturl()
2478 mpd_base_url
= base_url(mpd_url
)
2480 return self
._parse
_mpd
_formats
_and
_subtitles
(
2481 mpd_doc
, mpd_id
, mpd_base_url
, mpd_url
)
2483 def _parse_mpd_formats(self
, *args
, **kwargs
):
2484 fmts
, subs
= self
._parse
_mpd
_formats
_and
_subtitles
(*args
, **kwargs
)
2486 self
._report
_ignoring
_subs
('DASH')
2489 def _parse_mpd_formats_and_subtitles(
2490 self
, mpd_doc
, mpd_id
=None, mpd_base_url
='', mpd_url
=None):
2492 Parse formats from MPD manifest.
2494 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
2495 http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2496 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
2498 if not self
.get_param('dynamic_mpd', True):
2499 if mpd_doc
.get('type') == 'dynamic':
2502 namespace
= self
._search
_regex
(r
'(?i)^{([^}]+)?}MPD$', mpd_doc
.tag
, 'namespace', default
=None)
2505 return self
._xpath
_ns
(path
, namespace
)
2507 def is_drm_protected(element
):
2508 return element
.find(_add_ns('ContentProtection')) is not None
2510 def extract_multisegment_info(element
, ms_parent_info
):
2511 ms_info
= ms_parent_info
.copy()
2513 # As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
2514 # common attributes and elements. We will only extract relevant
2516 def extract_common(source
):
2517 segment_timeline
= source
.find(_add_ns('SegmentTimeline'))
2518 if segment_timeline
is not None:
2519 s_e
= segment_timeline
.findall(_add_ns('S'))
2521 ms_info
['total_number'] = 0
2524 r
= int(s
.get('r', 0))
2525 ms_info
['total_number'] += 1 + r
2526 ms_info
['s'].append({
2527 't': int(s
.get('t', 0)),
2528 # @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
2529 'd': int(s
.attrib
['d']),
2532 start_number
= source
.get('startNumber')
2534 ms_info
['start_number'] = int(start_number
)
2535 timescale
= source
.get('timescale')
2537 ms_info
['timescale'] = int(timescale
)
2538 segment_duration
= source
.get('duration')
2539 if segment_duration
:
2540 ms_info
['segment_duration'] = float(segment_duration
)
2542 def extract_Initialization(source
):
2543 initialization
= source
.find(_add_ns('Initialization'))
2544 if initialization
is not None:
2545 ms_info
['initialization_url'] = initialization
.attrib
['sourceURL']
2547 segment_list
= element
.find(_add_ns('SegmentList'))
2548 if segment_list
is not None:
2549 extract_common(segment_list
)
2550 extract_Initialization(segment_list
)
2551 segment_urls_e
= segment_list
.findall(_add_ns('SegmentURL'))
2553 ms_info
['segment_urls'] = [segment
.attrib
['media'] for segment
in segment_urls_e
]
2555 segment_template
= element
.find(_add_ns('SegmentTemplate'))
2556 if segment_template
is not None:
2557 extract_common(segment_template
)
2558 media
= segment_template
.get('media')
2560 ms_info
['media'] = media
2561 initialization
= segment_template
.get('initialization')
2563 ms_info
['initialization'] = initialization
2565 extract_Initialization(segment_template
)
2568 mpd_duration
= parse_duration(mpd_doc
.get('mediaPresentationDuration'))
2569 formats
, subtitles
= [], {}
2570 stream_numbers
= collections
.defaultdict(int)
2571 for period
in mpd_doc
.findall(_add_ns('Period')):
2572 period_duration
= parse_duration(period
.get('duration')) or mpd_duration
2573 period_ms_info
= extract_multisegment_info(period
, {
2577 for adaptation_set
in period
.findall(_add_ns('AdaptationSet')):
2578 adaption_set_ms_info
= extract_multisegment_info(adaptation_set
, period_ms_info
)
2579 for representation
in adaptation_set
.findall(_add_ns('Representation')):
2580 representation_attrib
= adaptation_set
.attrib
.copy()
2581 representation_attrib
.update(representation
.attrib
)
2582 # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
2583 mime_type
= representation_attrib
['mimeType']
2584 content_type
= representation_attrib
.get('contentType', mime_type
.split('/')[0])
2586 codec_str
= representation_attrib
.get('codecs', '')
2587 # Some kind of binary subtitle found in some youtube livestreams
2588 if mime_type
== 'application/x-rawcc':
2589 codecs
= {'scodec': codec_str}
2591 codecs
= parse_codecs(codec_str
)
2592 if content_type
not in ('video', 'audio', 'text'):
2593 if mime_type
== 'image/jpeg':
2594 content_type
= mime_type
2595 elif codecs
.get('vcodec', 'none') != 'none':
2596 content_type
= 'video'
2597 elif codecs
.get('acodec', 'none') != 'none':
2598 content_type
= 'audio'
2599 elif codecs
.get('scodec', 'none') != 'none':
2600 content_type
= 'text'
2601 elif mimetype2ext(mime_type
) in ('tt', 'dfxp', 'ttml', 'xml', 'json'):
2602 content_type
= 'text'
2604 self
.report_warning('Unknown MIME type %s in DASH manifest' % mime_type
)
2608 for element
in (representation
, adaptation_set
, period
, mpd_doc
):
2609 base_url_e
= element
.find(_add_ns('BaseURL'))
2610 if try_call(lambda: base_url_e
.text
) is not None:
2611 base_url
= base_url_e
.text
+ base_url
2612 if re
.match(r
'^https?://', base_url
):
2614 if mpd_base_url
and base_url
.startswith('/'):
2615 base_url
= urllib
.parse
.urljoin(mpd_base_url
, base_url
)
2616 elif mpd_base_url
and not re
.match(r
'^https?://', base_url
):
2617 if not mpd_base_url
.endswith('/'):
2619 base_url
= mpd_base_url
+ base_url
2620 representation_id
= representation_attrib
.get('id')
2621 lang
= representation_attrib
.get('lang')
2622 url_el
= representation
.find(_add_ns('BaseURL'))
2623 filesize
= int_or_none(url_el
.attrib
.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el
is not None else None)
2624 bandwidth
= int_or_none(representation_attrib
.get('bandwidth'))
2625 if representation_id
is not None:
2626 format_id
= representation_id
2628 format_id
= content_type
2630 format_id
= mpd_id
+ '-' + format_id
2631 if content_type
in ('video', 'audio'):
2633 'format_id': format_id
,
2634 'manifest_url': mpd_url
,
2635 'ext': mimetype2ext(mime_type
),
2636 'width': int_or_none(representation_attrib
.get('width')),
2637 'height': int_or_none(representation_attrib
.get('height')),
2638 'tbr': float_or_none(bandwidth
, 1000),
2639 'asr': int_or_none(representation_attrib
.get('audioSamplingRate')),
2640 'fps': int_or_none(representation_attrib
.get('frameRate')),
2641 'language': lang
if lang
not in ('mul', 'und', 'zxx', 'mis') else None,
2642 'format_note': 'DASH %s' % content_type
,
2643 'filesize': filesize
,
2644 'container': mimetype2ext(mime_type
) + '_dash',
2647 elif content_type
== 'text':
2649 'ext': mimetype2ext(mime_type
),
2650 'manifest_url': mpd_url
,
2651 'filesize': filesize
,
2653 elif content_type
== 'image/jpeg':
2654 # See test case in VikiIE
2655 # https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1
2657 'format_id': format_id
,
2659 'manifest_url': mpd_url
,
2660 'format_note': 'DASH storyboards (jpeg)',
2664 if is_drm_protected(adaptation_set
) or is_drm_protected(representation
):
2666 representation_ms_info
= extract_multisegment_info(representation
, adaption_set_ms_info
)
2668 def prepare_template(template_name
, identifiers
):
2669 tmpl
= representation_ms_info
[template_name
]
2670 if representation_id
is not None:
2671 tmpl
= tmpl
.replace('$RepresentationID$', representation_id
)
2672 # First of, % characters outside $...$ templates
2673 # must be escaped by doubling for proper processing
2674 # by % operator string formatting used further (see
2675 # https://github.com/ytdl-org/youtube-dl/issues/16867).
2681 in_template
= not in_template
2682 elif c
== '%' and not in_template
:
2684 # Next, $...$ templates are translated to their
2685 # %(...) counterparts to be used with % operator
2686 t
= re
.sub(r
'\$(%s)\$' % '|'.join(identifiers
), r
'%(\1)d', t
)
2687 t
= re
.sub(r
'\$(%s)%%([^$]+)\$' % '|'.join(identifiers
), r
'%(\1)\2', t
)
2688 t
.replace('$$', '$')
2691 # @initialization is a regular template like @media one
2692 # so it should be handled just the same way (see
2693 # https://github.com/ytdl-org/youtube-dl/issues/11605)
2694 if 'initialization' in representation_ms_info
:
2695 initialization_template
= prepare_template(
2697 # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
2698 # $Time$ shall not be included for @initialization thus
2699 # only $Bandwidth$ remains
2701 representation_ms_info
['initialization_url'] = initialization_template
% {
2702 'Bandwidth': bandwidth
,
2705 def location_key(location
):
2706 return 'url' if re
.match(r
'^https?://', location
) else 'path'
2708 if 'segment_urls' not in representation_ms_info
and 'media' in representation_ms_info
:
2710 media_template
= prepare_template('media', ('Number', 'Bandwidth', 'Time'))
2711 media_location_key
= location_key(media_template
)
2713 # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
2714 # can't be used at the same time
2715 if '%(Number' in media_template
and 's' not in representation_ms_info
:
2716 segment_duration
= None
2717 if 'total_number' not in representation_ms_info
and 'segment_duration' in representation_ms_info
:
2718 segment_duration
= float_or_none(representation_ms_info
['segment_duration'], representation_ms_info
['timescale'])
2719 representation_ms_info
['total_number'] = int(math
.ceil(
2720 float_or_none(period_duration
, segment_duration
, default
=0)))
2721 representation_ms_info
['fragments'] = [{
2722 media_location_key
: media_template
% {
2723 'Number': segment_number
,
2724 'Bandwidth': bandwidth
,
2726 'duration': segment_duration
,
2727 } for segment_number
in range(
2728 representation_ms_info
['start_number'],
2729 representation_ms_info
['total_number'] + representation_ms_info
['start_number'])]
2731 # $Number*$ or $Time$ in media template with S list available
2732 # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
2733 # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
2734 representation_ms_info
['fragments'] = []
2737 segment_number
= representation_ms_info
['start_number']
2739 def add_segment_url():
2740 segment_url
= media_template
% {
2741 'Time': segment_time
,
2742 'Bandwidth': bandwidth
,
2743 'Number': segment_number
,
2745 representation_ms_info
['fragments'].append({
2746 media_location_key
: segment_url
,
2747 'duration': float_or_none(segment_d
, representation_ms_info
['timescale']),
2750 for num
, s
in enumerate(representation_ms_info
['s']):
2751 segment_time
= s
.get('t') or segment_time
2755 for r
in range(s
.get('r', 0)):
2756 segment_time
+= segment_d
2759 segment_time
+= segment_d
2760 elif 'segment_urls' in representation_ms_info
and 's' in representation_ms_info
:
2761 # No media template,
2762 # e.g. https://www.youtube.com/watch?v=iXZV5uAYMJI
2763 # or any YouTube dashsegments video
2766 timescale
= representation_ms_info
['timescale']
2767 for s
in representation_ms_info
['s']:
2768 duration
= float_or_none(s
['d'], timescale
)
2769 for r
in range(s
.get('r', 0) + 1):
2770 segment_uri
= representation_ms_info
['segment_urls'][segment_index
]
2772 location_key(segment_uri
): segment_uri
,
2773 'duration': duration
,
2776 representation_ms_info
['fragments'] = fragments
2777 elif 'segment_urls' in representation_ms_info
:
2778 # Segment URLs with no SegmentTimeline
2779 # E.g. https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
2780 # https://github.com/ytdl-org/youtube-dl/pull/14844
2782 segment_duration
= float_or_none(
2783 representation_ms_info
['segment_duration'],
2784 representation_ms_info
['timescale']) if 'segment_duration' in representation_ms_info
else None
2785 for segment_url
in representation_ms_info
['segment_urls']:
2787 location_key(segment_url
): segment_url
,
2789 if segment_duration
:
2790 fragment
['duration'] = segment_duration
2791 fragments
.append(fragment
)
2792 representation_ms_info
['fragments'] = fragments
2793 # If there is a fragments key available then we correctly recognized fragmented media.
2794 # Otherwise we will assume unfragmented media with direct access. Technically, such
2795 # assumption is not necessarily correct since we may simply have no support for
2796 # some forms of fragmented media renditions yet, but for now we'll use this fallback.
2797 if 'fragments' in representation_ms_info
:
2799 # NB: mpd_url may be empty when MPD manifest is parsed from a string
2800 'url': mpd_url
or base_url
,
2801 'fragment_base_url': base_url
,
2803 'protocol': 'http_dash_segments' if mime_type
!= 'image/jpeg' else 'mhtml',
2805 if 'initialization_url' in representation_ms_info
:
2806 initialization_url
= representation_ms_info
['initialization_url']
2807 if not f
.get('url'):
2808 f
['url'] = initialization_url
2809 f
['fragments'].append({location_key(initialization_url): initialization_url}
)
2810 f
['fragments'].extend(representation_ms_info
['fragments'])
2811 if not period_duration
:
2812 period_duration
= try_get(
2813 representation_ms_info
,
2814 lambda r
: sum(frag
['duration'] for frag
in r
['fragments']), float)
2816 # Assuming direct URL to unfragmented media.
2818 if content_type
in ('video', 'audio', 'image/jpeg'):
2819 f
['manifest_stream_number'] = stream_numbers
[f
['url']]
2820 stream_numbers
[f
['url']] += 1
2822 elif content_type
== 'text':
2823 subtitles
.setdefault(lang
or 'und', []).append(f
)
2825 return formats
, subtitles
2827 def _extract_ism_formats(self
, *args
, **kwargs
):
2828 fmts
, subs
= self
._extract
_ism
_formats
_and
_subtitles
(*args
, **kwargs
)
2830 self
._report
_ignoring
_subs
('ISM')
2833 def _extract_ism_formats_and_subtitles(self
, ism_url
, video_id
, ism_id
=None, note
=None, errnote
=None, fatal
=True, data
=None, headers
={}, query={}
):
2834 res
= self
._download
_xml
_handle
(
2836 note
='Downloading ISM manifest' if note
is None else note
,
2837 errnote
='Failed to download ISM manifest' if errnote
is None else errnote
,
2838 fatal
=fatal
, data
=data
, headers
=headers
, query
=query
)
2845 return self
._parse
_ism
_formats
_and
_subtitles
(ism_doc
, urlh
.geturl(), ism_id
)
2847 def _parse_ism_formats_and_subtitles(self
, ism_doc
, ism_url
, ism_id
=None):
2849 Parse formats from ISM manifest.
2851 1. [MS-SSTR]: Smooth Streaming Protocol,
2852 https://msdn.microsoft.com/en-us/library/ff469518.aspx
2854 if ism_doc
.get('IsLive') == 'TRUE':
2857 duration
= int(ism_doc
.attrib
['Duration'])
2858 timescale
= int_or_none(ism_doc
.get('TimeScale')) or 10000000
2862 for stream
in ism_doc
.findall('StreamIndex'):
2863 stream_type
= stream
.get('Type')
2864 if stream_type
not in ('video', 'audio', 'text'):
2866 url_pattern
= stream
.attrib
['Url']
2867 stream_timescale
= int_or_none(stream
.get('TimeScale')) or timescale
2868 stream_name
= stream
.get('Name')
2869 stream_language
= stream
.get('Language', 'und')
2870 for track
in stream
.findall('QualityLevel'):
2871 KNOWN_TAGS
= {'255': 'AACL', '65534': 'EC-3'}
2872 fourcc
= track
.get('FourCC') or KNOWN_TAGS
.get(track
.get('AudioTag'))
2873 # TODO: add support for WVC1 and WMAP
2874 if fourcc
not in ('H264', 'AVC1', 'AACL', 'TTML', 'EC-3'):
2875 self
.report_warning('%s is not a supported codec' % fourcc
)
2877 tbr
= int(track
.attrib
['Bitrate']) // 1000
2878 # [1] does not mention Width and Height attributes. However,
2879 # they're often present while MaxWidth and MaxHeight are
2880 # missing, so should be used as fallbacks
2881 width
= int_or_none(track
.get('MaxWidth') or track
.get('Width'))
2882 height
= int_or_none(track
.get('MaxHeight') or track
.get('Height'))
2883 sampling_rate
= int_or_none(track
.get('SamplingRate'))
2885 track_url_pattern
= re
.sub(r
'{[Bb]itrate}', track
.attrib
['Bitrate'], url_pattern
)
2886 track_url_pattern
= urllib
.parse
.urljoin(ism_url
, track_url_pattern
)
2892 stream_fragments
= stream
.findall('c')
2893 for stream_fragment_index
, stream_fragment
in enumerate(stream_fragments
):
2894 fragment_ctx
['time'] = int_or_none(stream_fragment
.get('t')) or fragment_ctx
['time']
2895 fragment_repeat
= int_or_none(stream_fragment
.get('r')) or 1
2896 fragment_ctx
['duration'] = int_or_none(stream_fragment
.get('d'))
2897 if not fragment_ctx
['duration']:
2899 next_fragment_time
= int(stream_fragment
[stream_fragment_index
+ 1].attrib
['t'])
2901 next_fragment_time
= duration
2902 fragment_ctx
['duration'] = (next_fragment_time
- fragment_ctx
['time']) / fragment_repeat
2903 for _
in range(fragment_repeat
):
2905 'url': re
.sub(r
'{start[ _]time}', str(fragment_ctx
['time']), track_url_pattern
),
2906 'duration': fragment_ctx
['duration'] / stream_timescale
,
2908 fragment_ctx
['time'] += fragment_ctx
['duration']
2910 if stream_type
== 'text':
2911 subtitles
.setdefault(stream_language
, []).append({
2915 'manifest_url': ism_url
,
2916 'fragments': fragments
,
2917 '_download_params': {
2918 'stream_type': stream_type
,
2919 'duration': duration
,
2920 'timescale': stream_timescale
,
2922 'language': stream_language
,
2923 'codec_private_data': track
.get('CodecPrivateData'),
2926 elif stream_type
in ('video', 'audio'):
2928 'format_id': join_nonempty(ism_id
, stream_name
, tbr
),
2930 'manifest_url': ism_url
,
2931 'ext': 'ismv' if stream_type
== 'video' else 'isma',
2935 'asr': sampling_rate
,
2936 'vcodec': 'none' if stream_type
== 'audio' else fourcc
,
2937 'acodec': 'none' if stream_type
== 'video' else fourcc
,
2939 'fragments': fragments
,
2940 'has_drm': ism_doc
.find('Protection') is not None,
2941 '_download_params': {
2942 'stream_type': stream_type
,
2943 'duration': duration
,
2944 'timescale': stream_timescale
,
2945 'width': width
or 0,
2946 'height': height
or 0,
2948 'language': stream_language
,
2949 'codec_private_data': track
.get('CodecPrivateData'),
2950 'sampling_rate': sampling_rate
,
2951 'channels': int_or_none(track
.get('Channels', 2)),
2952 'bits_per_sample': int_or_none(track
.get('BitsPerSample', 16)),
2953 'nal_unit_length_field': int_or_none(track
.get('NALUnitLengthField', 4)),
2956 return formats
, subtitles
2958 def _parse_html5_media_entries(self
, base_url
, webpage
, video_id
, m3u8_id
=None, m3u8_entry_protocol
='m3u8_native', mpd_id
=None, preference
=None, quality
=None):
2959 def absolute_url(item_url
):
2960 return urljoin(base_url
, item_url
)
2962 def parse_content_type(content_type
):
2963 if not content_type
:
2965 ctr
= re
.search(r
'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type
)
2967 mimetype
, codecs
= ctr
.groups()
2968 f
= parse_codecs(codecs
)
2969 f
['ext'] = mimetype2ext(mimetype
)
2973 def _media_formats(src
, cur_media_type
, type_info
=None):
2974 type_info
= type_info
or {}
2975 full_url
= absolute_url(src
)
2976 ext
= type_info
.get('ext') or determine_ext(full_url
)
2978 is_plain_url
= False
2979 formats
= self
._extract
_m
3u8_formats
(
2980 full_url
, video_id
, ext
='mp4',
2981 entry_protocol
=m3u8_entry_protocol
, m3u8_id
=m3u8_id
,
2982 preference
=preference
, quality
=quality
, fatal
=False)
2984 is_plain_url
= False
2985 formats
= self
._extract
_mpd
_formats
(
2986 full_url
, video_id
, mpd_id
=mpd_id
, fatal
=False)
2991 'vcodec': 'none' if cur_media_type
== 'audio' else None,
2994 return is_plain_url
, formats
2997 # amp-video and amp-audio are very similar to their HTML5 counterparts
2998 # so we will include them right here (see
2999 # https://www.ampproject.org/docs/reference/components/amp-video)
3000 # For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
3001 _MEDIA_TAG_NAME_RE
= r
'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
3002 media_tags
= [(media_tag
, media_tag_name
, media_type
, '')
3003 for media_tag
, media_tag_name
, media_type
3004 in re
.findall(r
'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE
, webpage
)]
3005 media_tags
.extend(re
.findall(
3006 # We only allow video|audio followed by a whitespace or '>'.
3007 # Allowing more characters may end up in significant slow down (see
3008 # https://github.com/ytdl-org/youtube-dl/issues/11979,
3009 # e.g. http://www.porntrex.com/maps/videositemap.xml).
3010 r
'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE
, webpage
))
3011 for media_tag
, _
, media_type
, media_content
in media_tags
:
3016 media_attributes
= extract_attributes(media_tag
)
3017 src
= strip_or_none(dict_get(media_attributes
, ('src', 'data-video-src', 'data-src', 'data-source')))
3019 f
= parse_content_type(media_attributes
.get('type'))
3020 _
, formats
= _media_formats(src
, media_type
, f
)
3021 media_info
['formats'].extend(formats
)
3022 media_info
['thumbnail'] = absolute_url(media_attributes
.get('poster'))
3024 for source_tag
in re
.findall(r
'<source[^>]+>', media_content
):
3025 s_attr
= extract_attributes(source_tag
)
3026 # data-video-src and data-src are non standard but seen
3027 # several times in the wild
3028 src
= strip_or_none(dict_get(s_attr
, ('src', 'data-video-src', 'data-src', 'data-source')))
3031 f
= parse_content_type(s_attr
.get('type'))
3032 is_plain_url
, formats
= _media_formats(src
, media_type
, f
)
3034 # width, height, res, label and title attributes are
3035 # all not standard but seen several times in the wild
3038 for lbl
in ('label', 'title')
3039 if str_or_none(s_attr
.get(lbl
))
3041 width
= int_or_none(s_attr
.get('width'))
3042 height
= (int_or_none(s_attr
.get('height'))
3043 or int_or_none(s_attr
.get('res')))
3044 if not width
or not height
:
3046 resolution
= parse_resolution(lbl
)
3049 width
= width
or resolution
.get('width')
3050 height
= height
or resolution
.get('height')
3052 tbr
= parse_bitrate(lbl
)
3061 'format_id': s_attr
.get('label') or s_attr
.get('title'),
3063 f
.update(formats
[0])
3064 media_info
['formats'].append(f
)
3066 media_info
['formats'].extend(formats
)
3067 for track_tag
in re
.findall(r
'<track[^>]+>', media_content
):
3068 track_attributes
= extract_attributes(track_tag
)
3069 kind
= track_attributes
.get('kind')
3070 if not kind
or kind
in ('subtitles', 'captions'):
3071 src
= strip_or_none(track_attributes
.get('src'))
3074 lang
= track_attributes
.get('srclang') or track_attributes
.get('lang') or track_attributes
.get('label')
3075 media_info
['subtitles'].setdefault(lang
, []).append({
3076 'url': absolute_url(src
),
3078 for f
in media_info
['formats']:
3079 f
.setdefault('http_headers', {})['Referer'] = base_url
3080 if media_info
['formats'] or media_info
['subtitles']:
3081 entries
.append(media_info
)
3084 def _extract_akamai_formats(self
, *args
, **kwargs
):
3085 fmts
, subs
= self
._extract
_akamai
_formats
_and
_subtitles
(*args
, **kwargs
)
3087 self
._report
_ignoring
_subs
('akamai')
3090 def _extract_akamai_formats_and_subtitles(self
, manifest_url
, video_id
, hosts
={}):
3091 signed
= 'hdnea=' in manifest_url
3093 # https://learn.akamai.com/en-us/webhelp/media-services-on-demand/stream-packaging-user-guide/GUID-BE6C0F73-1E06-483B-B0EA-57984B91B7F9.html
3094 manifest_url
= re
.sub(
3095 r
'(?:b=[\d,-]+|(?:__a__|attributes)=off|__b__=\d+)&?',
3096 '', manifest_url
).strip('?')
3101 hdcore_sign
= 'hdcore=3.7.0'
3102 f4m_url
= re
.sub(r
'(https?://[^/]+)/i/', r
'\1/z/', manifest_url
).replace('/master.m3u8', '/manifest.f4m')
3103 hds_host
= hosts
.get('hds')
3105 f4m_url
= re
.sub(r
'(https?://)[^/]+', r
'\1' + hds_host
, f4m_url
)
3106 if 'hdcore=' not in f4m_url
:
3107 f4m_url
+= ('&' if '?' in f4m_url
else '?') + hdcore_sign
3108 f4m_formats
= self
._extract
_f
4m
_formats
(
3109 f4m_url
, video_id
, f4m_id
='hds', fatal
=False)
3110 for entry
in f4m_formats
:
3111 entry
.update({'extra_param_to_segment_url': hdcore_sign}
)
3112 formats
.extend(f4m_formats
)
3114 m3u8_url
= re
.sub(r
'(https?://[^/]+)/z/', r
'\1/i/', manifest_url
).replace('/manifest.f4m', '/master.m3u8')
3115 hls_host
= hosts
.get('hls')
3117 m3u8_url
= re
.sub(r
'(https?://)[^/]+', r
'\1' + hls_host
, m3u8_url
)
3118 m3u8_formats
, m3u8_subtitles
= self
._extract
_m
3u8_formats
_and
_subtitles
(
3119 m3u8_url
, video_id
, 'mp4', 'm3u8_native',
3120 m3u8_id
='hls', fatal
=False)
3121 formats
.extend(m3u8_formats
)
3122 subtitles
= self
._merge
_subtitles
(subtitles
, m3u8_subtitles
)
3124 http_host
= hosts
.get('http')
3125 if http_host
and m3u8_formats
and not signed
:
3126 REPL_REGEX
= r
'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
3127 qualities
= re
.match(REPL_REGEX
, m3u8_url
).group(2).split(',')
3128 qualities_length
= len(qualities
)
3129 if len(m3u8_formats
) in (qualities_length
, qualities_length
+ 1):
3131 for f
in m3u8_formats
:
3132 if f
['vcodec'] != 'none':
3133 for protocol
in ('http', 'https'):
3135 del http_f
['manifest_url']
3137 REPL_REGEX
, protocol
+ fr
'://{http_host}/\g<1>{qualities[i]}\3', f
['url'])
3139 'format_id': http_f
['format_id'].replace('hls-', protocol
+ '-'),
3141 'protocol': protocol
,
3143 formats
.append(http_f
)
3146 return formats
, subtitles
3148 def _extract_wowza_formats(self
, url
, video_id
, m3u8_entry_protocol
='m3u8_native', skip_protocols
=[]):
3149 query
= urllib
.parse
.urlparse(url
).query
3150 url
= re
.sub(r
'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url
)
3152 r
'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url
)
3153 url_base
= mobj
.group('url')
3154 http_base_url
= '%s%s:%s' % ('http', mobj
.group('s') or '', url_base
)
3157 def manifest_url(manifest
):
3158 m_url
= f
'{http_base_url}/{manifest}'
3160 m_url
+= '?%s' % query
3163 if 'm3u8' not in skip_protocols
:
3164 formats
.extend(self
._extract
_m
3u8_formats
(
3165 manifest_url('playlist.m3u8'), video_id
, 'mp4',
3166 m3u8_entry_protocol
, m3u8_id
='hls', fatal
=False))
3167 if 'f4m' not in skip_protocols
:
3168 formats
.extend(self
._extract
_f
4m
_formats
(
3169 manifest_url('manifest.f4m'),
3170 video_id
, f4m_id
='hds', fatal
=False))
3171 if 'dash' not in skip_protocols
:
3172 formats
.extend(self
._extract
_mpd
_formats
(
3173 manifest_url('manifest.mpd'),
3174 video_id
, mpd_id
='dash', fatal
=False))
3175 if re
.search(r
'(?:/smil:|\.smil)', url_base
):
3176 if 'smil' not in skip_protocols
:
3177 rtmp_formats
= self
._extract
_smil
_formats
(
3178 manifest_url('jwplayer.smil'),
3179 video_id
, fatal
=False)
3180 for rtmp_format
in rtmp_formats
:
3181 rtsp_format
= rtmp_format
.copy()
3182 rtsp_format
['url'] = '%s/%s' % (rtmp_format
['url'], rtmp_format
['play_path'])
3183 del rtsp_format
['play_path']
3184 del rtsp_format
['ext']
3185 rtsp_format
.update({
3186 'url': rtsp_format
['url'].replace('rtmp://', 'rtsp://'),
3187 'format_id': rtmp_format
['format_id'].replace('rtmp', 'rtsp'),
3190 formats
.extend([rtmp_format
, rtsp_format
])
3192 for protocol
in ('rtmp', 'rtsp'):
3193 if protocol
not in skip_protocols
:
3195 'url': f
'{protocol}:{url_base}',
3196 'format_id': protocol
,
3197 'protocol': protocol
,
3201 def _find_jwplayer_data(self
, webpage
, video_id
=None, transform_source
=js_to_json
):
3203 r
'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P
=quote
)\
)(?
!</script
>).*?\
.setup\s
*\
((?P
<options
>[^
)]+)\
)',
3207 jwplayer_data = self._parse_json(mobj.group('options
'),
3209 transform_source=transform_source)
3210 except ExtractorError:
3213 if isinstance(jwplayer_data, dict):
3214 return jwplayer_data
3216 def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
3217 jwplayer_data = self._find_jwplayer_data(
3218 webpage, video_id, transform_source=js_to_json)
3219 return self._parse_jwplayer_data(
3220 jwplayer_data, video_id, *args, **kwargs)
3222 def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
3223 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
3224 # JWPlayer backward compatibility: flattened playlists
3225 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
3226 if 'playlist
' not in jwplayer_data:
3227 jwplayer_data = {'playlist': [jwplayer_data]}
3231 # JWPlayer backward compatibility: single playlist item
3232 # https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
3233 if not isinstance(jwplayer_data['playlist
'], list):
3234 jwplayer_data['playlist
'] = [jwplayer_data['playlist
']]
3236 for video_data in jwplayer_data['playlist
']:
3237 # JWPlayer backward compatibility: flattened sources
3238 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
3239 if 'sources
' not in video_data:
3240 video_data['sources
'] = [video_data]
3242 this_video_id = video_id or video_data['mediaid
']
3244 formats = self._parse_jwplayer_formats(
3245 video_data['sources
'], video_id=this_video_id, m3u8_id=m3u8_id,
3246 mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
3249 tracks = video_data.get('tracks
')
3250 if tracks and isinstance(tracks, list):
3251 for track in tracks:
3252 if not isinstance(track, dict):
3254 track_kind = track.get('kind
')
3255 if not track_kind or not isinstance(track_kind, str):
3257 if track_kind.lower() not in ('captions
', 'subtitles
'):
3259 track_url = urljoin(base_url, track.get('file'))
3262 subtitles.setdefault(track.get('label
') or 'en
', []).append({
3263 'url
': self._proto_relative_url(track_url)
3267 'id': this_video_id,
3268 'title
': unescapeHTML(video_data['title
'] if require_title else video_data.get('title
')),
3269 'description
': clean_html(video_data.get('description
')),
3270 'thumbnail
': urljoin(base_url, self._proto_relative_url(video_data.get('image
'))),
3271 'timestamp
': int_or_none(video_data.get('pubdate
')),
3272 'duration
': float_or_none(jwplayer_data.get('duration
') or video_data.get('duration
')),
3273 'subtitles
': subtitles,
3275 # https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
3276 if len(formats) == 1 and re.search(r'^
(?
:http|
//).*(?
:youtube\
.com|youtu\
.be
)/.+', formats[0]['url
']):
3278 '_type
': 'url_transparent
',
3279 'url
': formats[0]['url
'],
3282 entry['formats
'] = formats
3283 entries.append(entry)
3284 if len(entries) == 1:
3287 return self.playlist_result(entries)
3289 def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
3290 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
3293 for source in jwplayer_sources_data:
3294 if not isinstance(source, dict):
3296 source_url = urljoin(
3297 base_url, self._proto_relative_url(source.get('file')))
3298 if not source_url or source_url in urls:
3300 urls.append(source_url)
3301 source_type = source.get('type') or ''
3302 ext = mimetype2ext(source_type) or determine_ext(source_url)
3303 if source_type == 'hls
' or ext == 'm3u8
':
3304 formats.extend(self._extract_m3u8_formats(
3305 source_url, video_id, 'mp4
', entry_protocol='m3u8_native
',
3306 m3u8_id=m3u8_id, fatal=False))
3307 elif source_type == 'dash
' or ext == 'mpd
':
3308 formats.extend(self._extract_mpd_formats(
3309 source_url, video_id, mpd_id=mpd_id, fatal=False))
3311 formats.extend(self._extract_smil_formats(
3312 source_url, video_id, fatal=False))
3313 # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
3314 elif source_type.startswith('audio
') or ext in (
3315 'oga
', 'aac
', 'mp3
', 'mpeg
', 'vorbis
'):
3322 height = int_or_none(source.get('height
'))
3324 # Often no height is provided but there is a label in
3325 # format like "1080p", "720p SD", or 1080.
3326 height = int_or_none(self._search_regex(
3327 r'^
(\d{3,4}
)[pP
]?
(?
:\b|$
)', str(source.get('label
') or ''),
3328 'height
', default=None))
3331 'width
': int_or_none(source.get('width
')),
3333 'tbr
': int_or_none(source.get('bitrate
'), scale=1000),
3334 'filesize
': int_or_none(source.get('filesize
')),
3337 if source_url.startswith('rtmp
'):
3338 a_format['ext
'] = 'flv
'
3339 # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
3340 # of jwplayer.flash.swf
3341 rtmp_url_parts = re.split(
3342 r'((?
:mp4|mp3|flv
):)', source_url, 1)
3343 if len(rtmp_url_parts) == 3:
3344 rtmp_url, prefix, play_path = rtmp_url_parts
3347 'play_path
': prefix + play_path,
3350 a_format.update(rtmp_params)
3351 formats.append(a_format)
3354 def _live_title(self, name):
3355 self._downloader.deprecation_warning('yt_dlp
.InfoExtractor
._live
_title
is deprecated
and does
not work
as expected
')
3358 def _int(self, v, name, fatal=False, **kwargs):
3359 res = int_or_none(v, **kwargs)
3361 msg = f'Failed to extract {name}
: Could
not parse value {v!r}
'
3363 raise ExtractorError(msg)
3365 self.report_warning(msg)
3368 def _float(self, v, name, fatal=False, **kwargs):
3369 res = float_or_none(v, **kwargs)
3371 msg = f'Failed to extract {name}
: Could
not parse value {v!r}
'
3373 raise ExtractorError(msg)
3375 self.report_warning(msg)
3378 def _set_cookie(self, domain, name, value, expire_time=None, port=None,
3379 path='/', secure=False, discard=False, rest={}, **kwargs):
3380 cookie = http.cookiejar.Cookie(
3381 0, name, value, port, port is not None, domain, True,
3382 domain.startswith('.'), path, True, secure, expire_time,
3383 discard, None, None, rest)
3384 self.cookiejar.set_cookie(cookie)
3386 def _get_cookies(self, url):
3387 """ Return a http.cookies.SimpleCookie with the cookies for the url """
3388 return LenientSimpleCookie(self._downloader._calc_cookies(url))
3390 def _apply_first_set_cookie_header(self, url_handle, cookie):
3392 Apply first Set-Cookie header instead of the last. Experimental.
3394 Some sites (e.g. [1-3]) may serve two cookies under the same name
3395 in Set-Cookie header and expect the first (old) one to be set rather
3396 than second (new). However, as of RFC6265 the newer one cookie
3397 should be set into cookie store what actually happens.
3398 We will workaround this issue by resetting the cookie to
3399 the first one manually.
3400 1. https://new.vk.com/
3401 2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3402 3. https://learning.oreilly.com/
3404 for header, cookies in url_handle.headers.items():
3405 if header.lower() != 'set-cookie
':
3407 cookies = cookies.encode('iso
-8859-1').decode('utf
-8')
3408 cookie_value = re.search(
3409 r'%s=(.+?
);.*?
\b[Dd
]omain
=(.+?
)(?
:[,;]|$
)' % cookie, cookies)
3411 value, domain = cookie_value.groups()
3412 self._set_cookie(domain, cookie, value)
3416 def get_testcases(cls, include_onlymatching=False):
3417 # Do not look in super classes
3418 t = vars(cls).get('_TEST
')
3420 assert not hasattr(cls, '_TESTS
'), f'{cls.ie_key()}IE has _TEST
and _TESTS
'
3423 tests = vars(cls).get('_TESTS
', [])
3425 if not include_onlymatching and t.get('only_matching
', False):
3427 t['name
'] = cls.ie_key()
3431 def get_webpage_testcases(cls):
3432 tests = vars(cls).get('_WEBPAGE_TESTS
', [])
3434 t['name
'] = cls.ie_key()
3437 @classproperty(cache=True)
3439 """Get age limit from the testcases"""
3440 return max(traverse_obj(
3441 (*cls.get_testcases(include_onlymatching=False), *cls.get_webpage_testcases()),
3442 (..., (('playlist
', 0), None), 'info_dict
', 'age_limit
')) or [0])
3444 @classproperty(cache=True)
3445 def _RETURN_TYPE(cls):
3446 """What the extractor returns: "video", "playlist", "any", or None (Unknown)"""
3447 tests = tuple(cls.get_testcases(include_onlymatching=False))
3450 elif not any(k.startswith('playlist
') for test in tests for k in test):
3452 elif all(any(k.startswith('playlist
') for k in test) for test in tests):
3457 def is_single_video(cls, url):
3458 """Returns whether the URL is of a single video, None if unknown"""
3459 assert cls.suitable(url), 'The URL must be suitable
for the extractor
'
3460 return {'video': True, 'playlist': False}.get(cls._RETURN_TYPE)
3463 def is_suitable(cls, age_limit):
3464 """Test whether the extractor is generally suitable for the given age limit"""
3465 return not age_restricted(cls.age_limit, age_limit)
3468 def description(cls, *, markdown=True, search_examples=None):
3469 """Description of the extractor"""
3471 if cls._NETRC_MACHINE:
3473 desc += f' [<abbr title
="netrc machine"><em
>{cls._NETRC_MACHINE}
</em
></abbr
>]'
3475 desc += f' [{cls._NETRC_MACHINE}
]'
3476 if cls.IE_DESC is False:
3479 desc += f' {cls.IE_DESC}
'
3481 desc += f'; "{cls.SEARCH_KEY}:" prefix
'
3483 _COUNTS = ('', '5', '10', 'all
')
3484 desc += f' (e
.g
. "{cls.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(search_examples)}")'
3485 if not cls.working():
3486 desc += ' (**Currently broken
**)' if markdown else ' (Currently broken
)'
3488 # Escape emojis. Ref: https://github.com/github/markup/issues/1153
3489 name = (' - **%s**' % re.sub(r':(\w
+:)', ':\u200B
\\g
<1>', cls.IE_NAME)) if markdown else cls.IE_NAME
3490 return f'{name}
:{desc}
' if desc else name
3492 def extract_subtitles(self, *args, **kwargs):
3493 if (self.get_param('writesubtitles
', False)
3494 or self.get_param('listsubtitles
')):
3495 return self._get_subtitles(*args, **kwargs)
3498 def _get_subtitles(self, *args, **kwargs):
3499 raise NotImplementedError('This method must be implemented by subclasses
')
3501 class CommentsDisabled(Exception):
3502 """Raise in _get_comments if comments are disabled for the video"""
3504 def extract_comments(self, *args, **kwargs):
3505 if not self.get_param('getcomments
'):
3507 generator = self._get_comments(*args, **kwargs)
3514 comments.append(next(generator))
3515 except StopIteration:
3517 except KeyboardInterrupt:
3518 self.to_screen('Interrupted by user
')
3519 except self.CommentsDisabled:
3520 return {'comments': None, 'comment_count': None}
3521 except Exception as e:
3522 if self.get_param('ignoreerrors
') is not True:
3524 self._downloader.report_error(e)
3525 comment_count = len(comments)
3526 self.to_screen(f'Extracted {comment_count} comments
')
3528 'comments
': comments,
3529 'comment_count
': None if interrupted else comment_count
3533 def _get_comments(self, *args, **kwargs):
3534 raise NotImplementedError('This method must be implemented by subclasses
')
3537 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
3538 """ Merge subtitle items for one language. Items with duplicated URLs/data
3539 will be dropped. """
3540 list1_data = {(item.get('url'), item.get('data')) for item in subtitle_list1}
3541 ret = list(subtitle_list1)
3542 ret.extend(item for item in subtitle_list2 if (item.get('url
'), item.get('data
')) not in list1_data)
3546 def _merge_subtitles(cls, *dicts, target=None):
3547 """ Merge subtitle dictionaries, language by language. """
3551 for lang, subs in d.items():
3552 target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs)
3555 def extract_automatic_captions(self, *args, **kwargs):
3556 if (self.get_param('writeautomaticsub
', False)
3557 or self.get_param('listsubtitles
')):
3558 return self._get_automatic_captions(*args, **kwargs)
3561 def _get_automatic_captions(self, *args, **kwargs):
3562 raise NotImplementedError('This method must be implemented by subclasses
')
3564 @functools.cached_property
3565 def _cookies_passed(self):
3566 """Whether cookies have been passed to YoutubeDL"""
3567 return self.get_param('cookiefile
') is not None or self.get_param('cookiesfrombrowser
') is not None
3569 def mark_watched(self, *args, **kwargs):
3570 if not self.get_param('mark_watched
', False):
3572 if self.supports_login() and self._get_login_info()[0] is not None or self._cookies_passed:
3573 self._mark_watched(*args, **kwargs)
3575 def _mark_watched(self, *args, **kwargs):
3576 raise NotImplementedError('This method must be implemented by subclasses
')
3578 def geo_verification_headers(self):
3580 geo_verification_proxy = self.get_param('geo_verification_proxy
')
3581 if geo_verification_proxy:
3582 headers['Ytdl
-request
-proxy
'] = geo_verification_proxy
3586 def _generic_id(url):
3587 return urllib.parse.unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
3589 def _generic_title(self, url='', webpage='', *, default=None):
3590 return (self._og_search_title(webpage, default=None)
3591 or self._html_extract_title(webpage, default=None)
3592 or urllib.parse.unquote(os.path.splitext(url_basename(url))[0])
3596 def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None):
3597 all_known = all(map(
3598 lambda x: x is not None,
3599 (is_private, needs_premium, needs_subscription, needs_auth, is_unlisted)))
3601 'private
' if is_private
3602 else 'premium_only
' if needs_premium
3603 else 'subscriber_only
' if needs_subscription
3604 else 'needs_auth
' if needs_auth
3605 else 'unlisted
' if is_unlisted
3606 else 'public
' if all_known
3609 def _configuration_arg(self, key, default=NO_DEFAULT, *, ie_key=None, casesense=False):
3611 @returns A list of values for the extractor argument given by "key"
3612 or "default" if no such key is present
3613 @param default The default value to return when the key is not present (default: [])
3614 @param casesense When false, the values are converted to lower case
3616 ie_key = ie_key if isinstance(ie_key, str) else (ie_key or self).ie_key()
3617 val = traverse_obj(self._downloader.params, ('extractor_args
', ie_key.lower(), key))
3619 return [] if default is NO_DEFAULT else default
3620 return list(val) if casesense else [x.lower() for x in val]
3622 def _yes_playlist(self, playlist_id, video_id, smuggled_data=None, *, playlist_label='playlist
', video_label='video
'):
3623 if not playlist_id or not video_id:
3626 no_playlist = (smuggled_data or {}).get('force_noplaylist
')
3627 if no_playlist is not None:
3628 return not no_playlist
3630 video_id = '' if video_id is True else f' {video_id}
'
3631 playlist_id = '' if playlist_id is True else f' {playlist_id}
'
3632 if self.get_param('noplaylist
'):
3633 self.to_screen(f'Downloading just the {video_label}{video_id} because of
--no
-playlist
')
3635 self.to_screen(f'Downloading {playlist_label}{playlist_id}
- add
--no
-playlist to download just the {video_label}{video_id}
')
3638 def _error_or_warning(self, err, _count=None, _retries=0, *, fatal=True):
3639 RetryManager.report_retry(
3640 err, _count or int(fatal), _retries,
3641 info=self.to_screen, warn=self.report_warning, error=None if fatal else self.report_warning,
3642 sleep_func=self.get_param('retry_sleep_functions
', {}).get('extractor
'))
3644 def RetryManager(self, **kwargs):
3645 return RetryManager(self.get_param('extractor_retries
', 3), self._error_or_warning, **kwargs)
3647 def _extract_generic_embeds(self, url, *args, info_dict={}, note='Extracting generic embeds
', **kwargs):
3648 display_id = traverse_obj(info_dict, 'display_id
', 'id')
3649 self.to_screen(f'{format_field(display_id, None, "%s: ")}{note}
')
3650 return self._downloader.get_info_extractor('Generic
')._extract_embeds(
3651 smuggle_url(url, {'block_ies': [self.ie_key()]}), *args, **kwargs)
3654 def extract_from_webpage(cls, ydl, url, webpage):
3655 ie = (cls if isinstance(cls._extract_from_webpage, types.MethodType)
3656 else ydl.get_info_extractor(cls.ie_key()))
3657 for info in ie._extract_from_webpage(url, webpage) or []:
3658 # url = None since we do not want to set (webpage/original)_url
3659 ydl.add_default_extra_info(info, ie, None)
3663 def _extract_from_webpage(cls, url, webpage):
3664 for embed_url in orderedSet(
3665 cls._extract_embed_urls(url, webpage) or [], lazy=True):
3666 yield cls.url_result(embed_url, None if cls._VALID_URL is False else cls)
3669 def _extract_embed_urls(cls, url, webpage):
3670 """@returns all the embed urls on the webpage"""
3671 if '_EMBED_URL_RE
' not in cls.__dict__:
3672 assert isinstance(cls._EMBED_REGEX, (list, tuple))
3673 for idx, regex in enumerate(cls._EMBED_REGEX):
3674 assert regex.count('(?P
<url
>') == 1, \
3675 f'{cls.__name__}
._EMBED
_REGEX
[{idx}
] must have exactly
1 url group
\n\t{regex}
'
3676 cls._EMBED_URL_RE = tuple(map(re.compile, cls._EMBED_REGEX))
3678 for regex in cls._EMBED_URL_RE:
3679 for mobj in regex.finditer(webpage):
3680 embed_url = urllib.parse.urljoin(url, unescapeHTML(mobj.group('url
')))
3681 if cls._VALID_URL is False or cls.suitable(embed_url):
3684 class StopExtraction(Exception):
3688 def _extract_url(cls, webpage): # TODO: Remove
3689 """Only for compatibility with some older extractors"""
3690 return next(iter(cls._extract_embed_urls(None, webpage) or []), None)
3693 def __init_subclass__(cls, *, plugin_name=None, **kwargs):
3695 mro = inspect.getmro(cls)
3696 super_class = cls.__wrapped__ = mro[mro.index(cls) + 1]
3697 cls.IE_NAME, cls.ie_key = f'{super_class.IE_NAME}
+{plugin_name}
', super_class.ie_key
3698 while getattr(super_class, '__wrapped__
', None):
3699 super_class = super_class.__wrapped__
3700 setattr(sys.modules[super_class.__module__], super_class.__name__, cls)
3702 return super().__init_subclass__(**kwargs)
3705 class SearchInfoExtractor(InfoExtractor):
3707 Base class for paged search queries extractors.
3708 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
3709 Instances should define _SEARCH_KEY and optionally _MAX_RESULTS
3712 _MAX_RESULTS = float('inf
')
3713 _RETURN_TYPE = 'playlist
'
3716 def _VALID_URL(cls):
3717 return r'%s(?P
<prefix
>|
[1-9][0-9]*|all
):(?P
<query
>[\s\S
]+)' % cls._SEARCH_KEY
3719 def _real_extract(self, query):
3720 prefix, query = self._match_valid_url(query).group('prefix
', 'query
')
3722 return self._get_n_results(query, 1)
3723 elif prefix == 'all
':
3724 return self._get_n_results(query, self._MAX_RESULTS)
3728 raise ExtractorError(f'invalid download number {n}
for query
"{query}"')
3729 elif n > self._MAX_RESULTS:
3730 self.report_warning('%s returns
max %i results (you requested
%i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
3731 n = self._MAX_RESULTS
3732 return self._get_n_results(query, n)
3734 def _get_n_results(self, query, n):
3735 """Get a specified number of results for a query.
3736 Either this function or _search_results must be overridden by subclasses """
3737 return self.playlist_result(
3738 itertools.islice(self._search_results(query), 0, None if n == float('inf
') else n),
3741 def _search_results(self, query):
3742 """Returns an iterator of search results"""
3743 raise NotImplementedError('This method must be implemented by subclasses
')
3746 def SEARCH_KEY(cls):
3747 return cls._SEARCH_KEY
3750 class UnsupportedURLIE(InfoExtractor):
3755 def _real_extract(self, url):
3756 raise UnsupportedError(url)