]> jfr.im git - yt-dlp.git/blame - yt_dlp/extractor/common.py
[extractor] Deprecate `_sort_formats`
[yt-dlp.git] / yt_dlp / extractor / common.py
CommitLineData
d6983cb4 1import base64
234416e4 2import collections
ac668111 3import getpass
3ec05685 4import hashlib
54007a45 5import http.client
6import http.cookiejar
7import http.cookies
2314b4d8 8import inspect
cc16383f 9import itertools
3d3538e4 10import json
f8271158 11import math
4094b6e3 12import netrc
d6983cb4 13import os
773f291d 14import random
6929b41a 15import re
d6983cb4 16import sys
4094b6e3 17import time
8f97a15d 18import types
14f25df2 19import urllib.parse
ac668111 20import urllib.request
f8271158 21import xml.etree.ElementTree
d6983cb4 22
6929b41a 23from ..compat import functools # isort: split
14f25df2 24from ..compat import compat_etree_fromstring, compat_expanduser, compat_os_name
8817a80d 25from ..cookies import LenientSimpleCookie
f8271158 26from ..downloader.f4m import get_base_url, remove_encrypted_media
8c25f81b 27from ..utils import (
8f97a15d 28 IDENTITY,
f8271158 29 JSON_LD_RE,
30 NO_DEFAULT,
31 ExtractorError,
d0d74b71 32 FormatSorter,
f8271158 33 GeoRestrictedError,
34 GeoUtils,
b7c47b74 35 LenientJSONDecoder,
f8271158 36 RegexNotFoundError,
be5c1ae8 37 RetryManager,
f8271158 38 UnsupportedError,
05900629 39 age_restricted,
02dc0a36 40 base_url,
08f2a92c 41 bug_reports_message,
82d02080 42 classproperty,
d6983cb4 43 clean_html,
d0d74b71 44 deprecation_warning,
70f0f5a8 45 determine_ext,
d493f15c 46 dict_get,
42676437 47 encode_data_uri,
9b9c5355 48 error_to_compat_str,
46b18f23 49 extract_attributes,
90137ca4 50 filter_dict,
97f4aecf 51 fix_xml_ampersands,
b14f3a4c 52 float_or_none,
b868936c 53 format_field,
31bb8d3f 54 int_or_none,
34921b43 55 join_nonempty,
a4a554a7 56 js_to_json,
46b18f23 57 mimetype2ext,
3158150c 58 network_exceptions,
46b18f23 59 orderedSet,
d493f15c 60 parse_bitrate,
46b18f23
JH
61 parse_codecs,
62 parse_duration,
4ca2a3cf 63 parse_iso8601,
46b18f23 64 parse_m3u8_attributes,
d493f15c 65 parse_resolution,
46b18f23 66 sanitize_filename,
8f97a15d 67 sanitize_url,
b868936c 68 sanitized_Request,
ade1fa70 69 smuggle_url,
d493f15c 70 str_or_none,
ce5b9040 71 str_to_int,
f856816b 72 strip_or_none,
5d3a0e79 73 traverse_obj,
47046464 74 try_call,
ffa89477 75 try_get,
f38de77f 76 unescapeHTML,
647eab45 77 unified_strdate,
6b3a3098 78 unified_timestamp,
46b18f23 79 update_Request,
09d02ea4 80 update_url_query,
a107193e 81 url_basename,
bebef109 82 url_or_none,
b868936c 83 urljoin,
6606817a 84 variadic,
a6571f10 85 xpath_element,
8d6765cf
S
86 xpath_text,
87 xpath_with_ns,
d6983cb4 88)
c342041f 89
d6983cb4 90
86e5f3ed 91class InfoExtractor:
d6983cb4
PH
92 """Information Extractor class.
93
94 Information extractors are the classes that, given a URL, extract
95 information about the video (or videos) the URL refers to. This
96 information includes the real video URL, the video title, author and
97 others. The information is stored in a dictionary which is then
5d380852 98 passed to the YoutubeDL. The YoutubeDL processes this
d6983cb4
PH
99 information possibly downloading the video to the file system, among
100 other possible outcomes.
101
cf0649f8 102 The type field determines the type of the result.
fed5d032
PH
103 By far the most common value (and the default if _type is missing) is
104 "video", which indicates a single video.
105
106 For a video, the dictionaries must include the following fields:
d6983cb4
PH
107
108 id: Video identifier.
d4736fdb 109 title: Video title, unescaped. Set to an empty string if video has
110 no title as opposed to "None" which signifies that the
111 extractor failed to obtain a title
d67b0b15 112
f49d89ee 113 Additionally, it must contain either a formats entry or a url one:
d67b0b15 114
f49d89ee
PH
115 formats: A list of dictionaries for each format available, ordered
116 from worst to best quality.
117
118 Potential fields:
c790e93a
S
119 * url The mandatory URL representing the media:
120 for plain file media - HTTP URL of this file,
121 for RTMP - RTMP URL,
122 for HLS - URL of the M3U8 media playlist,
123 for HDS - URL of the F4M manifest,
79d2077e
S
124 for DASH
125 - HTTP URL to plain file media (in case of
126 unfragmented media)
127 - URL of the MPD manifest or base URL
128 representing the media if MPD manifest
8ed7a233 129 is parsed from a string (in case of
79d2077e 130 fragmented media)
c790e93a 131 for MSS - URL of the ISM manifest.
86f4d14f
S
132 * manifest_url
133 The URL of the manifest file in case of
c790e93a
S
134 fragmented media:
135 for HLS - URL of the M3U8 master playlist,
136 for HDS - URL of the F4M manifest,
137 for DASH - URL of the MPD manifest,
138 for MSS - URL of the ISM manifest.
a44ca5a4 139 * manifest_stream_number (For internal use only)
140 The index of the stream in the manifest file
10952eb2 141 * ext Will be calculated from URL if missing
d67b0b15
PH
142 * format A human-readable description of the format
143 ("mp4 container with h264/opus").
144 Calculated from the format_id, width, height.
145 and format_note fields if missing.
146 * format_id A short description of the format
5d4f3985
PH
147 ("mp4_h264_opus" or "19").
148 Technically optional, but strongly recommended.
d67b0b15
PH
149 * format_note Additional info about the format
150 ("3D" or "DASH video")
151 * width Width of the video, if known
152 * height Height of the video, if known
105bfd90 153 * aspect_ratio Aspect ratio of the video, if known
154 Automatically calculated from width and height
f49d89ee 155 * resolution Textual description of width and height
105bfd90 156 Automatically calculated from width and height
176f1866 157 * dynamic_range The dynamic range of the video. One of:
158 "SDR" (None), "HDR10", "HDR10+, "HDR12", "HLG, "DV"
7217e148 159 * tbr Average bitrate of audio and video in KBit/s
d67b0b15
PH
160 * abr Average audio bitrate in KBit/s
161 * acodec Name of the audio codec in use
dd27fd17 162 * asr Audio sampling rate in Hertz
b8ed0f15 163 * audio_channels Number of audio channels
d67b0b15 164 * vbr Average video bitrate in KBit/s
fbb21cf5 165 * fps Frame rate
d67b0b15 166 * vcodec Name of the video codec in use
1394ce65 167 * container Name of the container format
d67b0b15 168 * filesize The number of bytes, if known in advance
9732d77e 169 * filesize_approx An estimate for the number of bytes
d67b0b15 170 * player_url SWF Player URL (used for rtmpdump).
c7deaa4c 171 * protocol The protocol that will be used for the actual
adbc4ec4
THD
172 download, lower-case. One of "http", "https" or
173 one of the protocols defined in downloader.PROTOCOL_MAP
c58c2d63
S
174 * fragment_base_url
175 Base URL for fragments. Each fragment's path
176 value (if present) will be relative to
177 this URL.
178 * fragments A list of fragments of a fragmented media.
179 Each fragment entry must contain either an url
180 or a path. If an url is present it should be
181 considered by a client. Otherwise both path and
182 fragment_base_url must be present. Here is
183 the list of all potential fields:
184 * "url" - fragment's URL
185 * "path" - fragment's path relative to
186 fragment_base_url
a0d5077c
S
187 * "duration" (optional, int or float)
188 * "filesize" (optional, int)
adbc4ec4
THD
189 * is_from_start Is a live format that can be downloaded
190 from the start. Boolean
f49d89ee 191 * preference Order number of this format. If this field is
08d13955 192 present and not None, the formats get sorted
38d63d84 193 by this field, regardless of all other values.
f49d89ee
PH
194 -1 for default (order by other properties),
195 -2 or smaller for less than default.
e65566a9
PH
196 < -1000 to hide the format (if there is
197 another one which is strictly better)
32f90364
PH
198 * language Language code, e.g. "de" or "en-US".
199 * language_preference Is this in the language mentioned in
200 the URL?
aff2f4f4
PH
201 10 if it's what the URL is about,
202 -1 for default (don't know),
203 -10 otherwise, other values reserved for now.
5d73273f
PH
204 * quality Order number of the video quality of this
205 format, irrespective of the file format.
206 -1 for default (order by other properties),
207 -2 or smaller for less than default.
c64ed2a3
PH
208 * source_preference Order number for this video source
209 (quality takes higher priority)
210 -1 for default (order by other properties),
211 -2 or smaller for less than default.
d769be6c
PH
212 * http_headers A dictionary of additional HTTP headers
213 to add to the request.
6271f1ca 214 * stretched_ratio If given and not 1, indicates that the
3dee7826
PH
215 video's pixels are not square.
216 width : height ratio as float.
217 * no_resume The server does not support resuming the
218 (HTTP or RTMP) download. Boolean.
88acdbc2 219 * has_drm The format has DRM and cannot be downloaded. Boolean
0a5a191a 220 * downloader_options A dictionary of downloader options
221 (For internal use only)
222 * http_chunk_size Chunk size for HTTP downloads
223 * ffmpeg_args Extra arguments for ffmpeg downloader
3b1fe47d 224 RTMP formats can also have the additional fields: page_url,
225 app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn,
226 rtmp_protocol, rtmp_real_time
3dee7826 227
c0ba0f48 228 url: Final video URL.
d6983cb4 229 ext: Video filename extension.
d67b0b15
PH
230 format: The video format, defaults to ext (used for --get-format)
231 player_url: SWF Player URL (used for rtmpdump).
2f5865cc 232
d6983cb4
PH
233 The following fields are optional:
234
08d30158 235 direct: True if a direct video file was given (must only be set by GenericIE)
f5e43bc6 236 alt_title: A secondary title of the video.
0afef30b
PH
237 display_id An alternative identifier for the video, not necessarily
238 unique, but available before title. Typically, id is
239 something like "4234987", title "Dancing naked mole rats",
240 and display_id "dancing-naked-mole-rats"
d5519808 241 thumbnails: A list of dictionaries, with the following entries:
cfb56d1a 242 * "id" (optional, string) - Thumbnail format ID
d5519808 243 * "url"
cfb56d1a 244 * "preference" (optional, int) - quality of the image
d5519808
PH
245 * "width" (optional, int)
246 * "height" (optional, int)
5e1c39ac 247 * "resolution" (optional, string "{width}x{height}",
d5519808 248 deprecated)
2de624fd 249 * "filesize" (optional, int)
297e9952 250 * "http_headers" (dict) - HTTP headers for the request
d6983cb4 251 thumbnail: Full URL to a video thumbnail image.
f5e43bc6 252 description: Full video description.
d6983cb4 253 uploader: Full name of the video uploader.
2bc0c46f 254 license: License name the video is licensed under.
8a92e51c 255 creator: The creator of the video.
10db0d2f 256 timestamp: UNIX timestamp of the moment the video was uploaded
ae6a1b95 257 upload_date: Video upload date in UTC (YYYYMMDD).
f0d785d3 258 If not explicitly set, calculated from timestamp
259 release_timestamp: UNIX timestamp of the moment the video was released.
260 If it is not clear whether to use timestamp or this, use the former
ae6a1b95 261 release_date: The date (YYYYMMDD) when the video was released in UTC.
f0d785d3 262 If not explicitly set, calculated from release_timestamp
263 modified_timestamp: UNIX timestamp of the moment the video was last modified.
ae6a1b95 264 modified_date: The date (YYYYMMDD) when the video was last modified in UTC.
f0d785d3 265 If not explicitly set, calculated from modified_timestamp
d6983cb4 266 uploader_id: Nickname or id of the video uploader.
7bcd2830 267 uploader_url: Full URL to a personal webpage of the video uploader.
6f1f59f3 268 channel: Full name of the channel the video is uploaded on.
0e7b8d3e 269 Note that channel fields may or may not repeat uploader
6f1f59f3
S
270 fields. This depends on a particular extractor.
271 channel_id: Id of the channel.
272 channel_url: Full URL to a channel webpage.
6c73052c 273 channel_follower_count: Number of followers of the channel.
da9ec3b9 274 location: Physical location where the video was filmed.
a504ced0 275 subtitles: The available subtitles as a dictionary in the format
4606c34e
YCH
276 {tag: subformats}. "tag" is usually a language code, and
277 "subformats" is a list sorted from lower to higher
278 preference, each element is a dictionary with the "ext"
279 entry and one of:
a504ced0 280 * "data": The subtitles file contents
10952eb2 281 * "url": A URL pointing to the subtitles file
2412044c 282 It can optionally also have:
283 * "name": Name or description of the subtitles
08d30158 284 * "http_headers": A dictionary of additional HTTP headers
297e9952 285 to add to the request.
4bba3716 286 "ext" will be calculated from URL if missing
e167860c 287 automatic_captions: Like 'subtitles'; contains automatically generated
288 captions instead of normal subtitles
62d231c0 289 duration: Length of the video in seconds, as an integer or float.
f3d29461 290 view_count: How many users have watched the video on the platform.
867c66ff 291 concurrent_view_count: How many users are currently watching the video on the platform.
19e3dfc9
PH
292 like_count: Number of positive ratings of the video
293 dislike_count: Number of negative ratings of the video
02835c6b 294 repost_count: Number of reposts of the video
2d30521a 295 average_rating: Average rating give by users, the scale used depends on the webpage
19e3dfc9 296 comment_count: Number of comments on the video
dd622d7c
PH
297 comments: A list of comments, each with one or more of the following
298 properties (all but one of text or html optional):
299 * "author" - human-readable name of the comment author
300 * "author_id" - user ID of the comment author
a1c5d2ca 301 * "author_thumbnail" - The thumbnail of the comment author
dd622d7c
PH
302 * "id" - Comment ID
303 * "html" - Comment as HTML
304 * "text" - Plain text of the comment
305 * "timestamp" - UNIX timestamp of comment
306 * "parent" - ID of the comment this one is replying to.
307 Set to "root" to indicate that this is a
308 comment to the original video.
a1c5d2ca
M
309 * "like_count" - Number of positive ratings of the comment
310 * "dislike_count" - Number of negative ratings of the comment
311 * "is_favorited" - Whether the comment is marked as
312 favorite by the video uploader
313 * "author_is_uploader" - Whether the comment is made by
314 the video uploader
8dbe9899 315 age_limit: Age restriction for the video, as an integer (years)
7a5c1cfe 316 webpage_url: The URL to the video webpage, if given to yt-dlp it
9103bbc5
JMF
317 should allow to get the same result again. (It will be set
318 by YoutubeDL if it's missing)
ad3bc6ac
PH
319 categories: A list of categories that the video falls in, for example
320 ["Sports", "Berlin"]
864f24bd 321 tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
d0fb4bd1 322 cast: A list of the video cast
7267bd53
PH
323 is_live: True, False, or None (=unknown). Whether this video is a
324 live stream that goes on instead of a fixed-length video.
f76ede8e 325 was_live: True, False, or None (=unknown). Whether this video was
326 originally a live stream.
0647d925 327 live_status: None (=unknown), 'is_live', 'is_upcoming', 'was_live', 'not_live',
e325a21a 328 or 'post_live' (was live, but VOD is not yet processed)
ae30b840 329 If absent, automatically set from is_live, was_live
7c80519c 330 start_time: Time in seconds where the reproduction should start, as
10952eb2 331 specified in the URL.
297a564b 332 end_time: Time in seconds where the reproduction should end, as
10952eb2 333 specified in the URL.
55949fed 334 chapters: A list of dictionaries, with the following entries:
335 * "start_time" - The start time of the chapter in seconds
336 * "end_time" - The end time of the chapter in seconds
337 * "title" (optional, string)
6cfda058 338 playable_in_embed: Whether this video is allowed to play in embedded
339 players on other sites. Can be True (=always allowed),
340 False (=never allowed), None (=unknown), or a string
62b58c09 341 specifying the criteria for embedability; e.g. 'whitelist'
c224251a
M
342 availability: Under what condition the video is available. One of
343 'private', 'premium_only', 'subscriber_only', 'needs_auth',
344 'unlisted' or 'public'. Use 'InfoExtractor._availability'
345 to set it
1e8fe57e 346 _old_archive_ids: A list of old archive ids needed for backward compatibility
784320c9 347 _format_sort_fields: A list of fields to use for sorting formats
277d6ff5 348 __post_extractor: A function to be called just before the metadata is
349 written to either disk, logger or console. The function
350 must return a dict which will be added to the info_dict.
351 This is usefull for additional information that is
352 time-consuming to extract. Note that the fields thus
353 extracted will not be available to output template and
354 match_filter. So, only "comments" and "comment_count" are
355 currently allowed to be extracted via this method.
d6983cb4 356
7109903e
S
357 The following fields should only be used when the video belongs to some logical
358 chapter or section:
359
360 chapter: Name or title of the chapter the video belongs to.
27bfd4e5
S
361 chapter_number: Number of the chapter the video belongs to, as an integer.
362 chapter_id: Id of the chapter the video belongs to, as a unicode string.
7109903e
S
363
364 The following fields should only be used when the video is an episode of some
8d76bdf1 365 series, programme or podcast:
7109903e
S
366
367 series: Title of the series or programme the video episode belongs to.
9ac24e23 368 series_id: Id of the series or programme the video episode belongs to, as a unicode string.
7109903e 369 season: Title of the season the video episode belongs to.
27bfd4e5
S
370 season_number: Number of the season the video episode belongs to, as an integer.
371 season_id: Id of the season the video episode belongs to, as a unicode string.
7109903e
S
372 episode: Title of the video episode. Unlike mandatory video title field,
373 this field should denote the exact title of the video episode
374 without any kind of decoration.
27bfd4e5
S
375 episode_number: Number of the video episode within a season, as an integer.
376 episode_id: Id of the video episode, as a unicode string.
7109903e 377
7a93ab5f
S
378 The following fields should only be used when the media is a track or a part of
379 a music album:
380
381 track: Title of the track.
382 track_number: Number of the track within an album or a disc, as an integer.
383 track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
384 as a unicode string.
385 artist: Artist(s) of the track.
386 genre: Genre(s) of the track.
387 album: Title of the album the track belongs to.
388 album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
389 album_artist: List of all artists appeared on the album (e.g.
390 "Ash Borer / Fell Voices" or "Various Artists", useful for splits
391 and compilations).
392 disc_number: Number of the disc or other physical medium the track belongs to,
393 as an integer.
394 release_year: Year (YYYY) when the album was released.
8bcd4048 395 composer: Composer of the piece
7a93ab5f 396
3975b4d2 397 The following fields should only be set for clips that should be cut from the original video:
398
399 section_start: Start time of the section in seconds
400 section_end: End time of the section in seconds
401
45e8a04e 402 The following fields should only be set for storyboards:
403 rows: Number of rows in each storyboard fragment, as an integer
404 columns: Number of columns in each storyboard fragment, as an integer
405
deefc05b 406 Unless mentioned otherwise, the fields should be Unicode strings.
d6983cb4 407
d838b1bd
PH
408 Unless mentioned otherwise, None is equivalent to absence of information.
409
fed5d032
PH
410
411 _type "playlist" indicates multiple videos.
b82f815f
PH
412 There must be a key "entries", which is a list, an iterable, or a PagedList
413 object, each element of which is a valid dictionary by this specification.
fed5d032 414
962ffcf8 415 Additionally, playlists can have "id", "title", and any other relevant
b60419c5 416 attributes with the same semantics as videos (see above).
fed5d032 417
f0d785d3 418 It can also have the following optional fields:
419
420 playlist_count: The total number of videos in a playlist. If not given,
421 YoutubeDL tries to calculate it from "entries"
422
fed5d032
PH
423
424 _type "multi_video" indicates that there are multiple videos that
425 form a single show, for examples multiple acts of an opera or TV episode.
426 It must have an entries key like a playlist and contain all the keys
427 required for a video at the same time.
428
429
430 _type "url" indicates that the video must be extracted from another
431 location, possibly by a different extractor. Its only required key is:
432 "url" - the next URL to extract.
f58766ce
PH
433 The key "ie_key" can be set to the class name (minus the trailing "IE",
434 e.g. "Youtube") if the extractor class is known in advance.
435 Additionally, the dictionary may have any properties of the resolved entity
436 known in advance, for example "title" if the title of the referred video is
fed5d032
PH
437 known ahead of time.
438
439
440 _type "url_transparent" entities have the same specification as "url", but
441 indicate that the given additional information is more precise than the one
442 associated with the resolved URL.
443 This is useful when a site employs a video service that hosts the video and
444 its technical metadata, but that video service does not embed a useful
445 title, description etc.
446
447
8f97a15d 448 Subclasses of this should also be added to the list of extractors and
449 should define a _VALID_URL regexp and, re-define the _real_extract() and
450 (optionally) _real_initialize() methods.
d6983cb4 451
e6f21b3d 452 Subclasses may also override suitable() if necessary, but ensure the function
453 signature is preserved and that this function imports everything it needs
52efa4b3 454 (except other extractors), so that lazy_extractors works correctly.
455
8f97a15d 456 Subclasses can define a list of _EMBED_REGEX, which will be searched for in
457 the HTML of Generic webpages. It may also override _extract_embed_urls
458 or _extract_from_webpage as necessary. While these are normally classmethods,
459 _extract_from_webpage is allowed to be an instance method.
460
461 _extract_from_webpage may raise self.StopExtraction() to stop further
462 processing of the webpage and obtain exclusive rights to it. This is useful
62b58c09
L
463 when the extractor cannot reliably be matched using just the URL,
464 e.g. invidious/peertube instances
8f97a15d 465
466 Embed-only extractors can be defined by setting _VALID_URL = False.
467
52efa4b3 468 To support username + password (or netrc) login, the extractor must define a
469 _NETRC_MACHINE and re-define _perform_login(username, password) and
470 (optionally) _initialize_pre_login() methods. The _perform_login method will
471 be called between _initialize_pre_login and _real_initialize if credentials
472 are passed by the user. In cases where it is necessary to have the login
473 process as part of the extraction rather than initialization, _perform_login
474 can be left undefined.
e6f21b3d 475
4248dad9 476 _GEO_BYPASS attribute may be set to False in order to disable
773f291d
S
477 geo restriction bypass mechanisms for a particular extractor.
478 Though it won't disable explicit geo restriction bypass based on
504f20dd 479 country code provided with geo_bypass_country.
4248dad9
S
480
481 _GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
482 countries for this extractor. One of these countries will be used by
483 geo restriction bypass mechanism right away in order to bypass
504f20dd 484 geo restriction, of course, if the mechanism is not disabled.
773f291d 485
5f95927a
S
486 _GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
487 IP blocks in CIDR notation for this extractor. One of these IP blocks
488 will be used by geo restriction bypass mechanism similarly
504f20dd 489 to _GEO_COUNTRIES.
3ccdde8c 490
fe7866d0 491 The _ENABLED attribute should be set to False for IEs that
492 are disabled by default and must be explicitly enabled.
493
e6f21b3d 494 The _WORKING attribute should be set to False for broken IEs
d6983cb4
PH
495 in order to warn the users and skip the tests.
496 """
497
498 _ready = False
499 _downloader = None
773f291d 500 _x_forwarded_for_ip = None
4248dad9
S
501 _GEO_BYPASS = True
502 _GEO_COUNTRIES = None
5f95927a 503 _GEO_IP_BLOCKS = None
d6983cb4 504 _WORKING = True
fe7866d0 505 _ENABLED = True
52efa4b3 506 _NETRC_MACHINE = None
231025c4 507 IE_DESC = None
8dcce6a8 508 SEARCH_KEY = None
8f97a15d 509 _VALID_URL = None
510 _EMBED_REGEX = []
d6983cb4 511
8dcce6a8 512 def _login_hint(self, method=NO_DEFAULT, netrc=None):
513 password_hint = f'--username and --password, or --netrc ({netrc or self._NETRC_MACHINE}) to provide account credentials'
514 return {
515 None: '',
516 'any': f'Use --cookies, --cookies-from-browser, {password_hint}',
517 'password': f'Use {password_hint}',
518 'cookies': (
519 'Use --cookies-from-browser or --cookies for the authentication. '
17ffed18 520 'See https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp for how to manually pass cookies'),
8dcce6a8 521 }[method if method is not NO_DEFAULT else 'any' if self.supports_login() else 'cookies']
9d5d4d64 522
d6983cb4 523 def __init__(self, downloader=None):
49a57e70 524 """Constructor. Receives an optional downloader (a YoutubeDL instance).
525 If a downloader is not passed during initialization,
526 it must be set using "set_downloader()" before "extract()" is called"""
d6983cb4 527 self._ready = False
773f291d 528 self._x_forwarded_for_ip = None
28f436ba 529 self._printed_messages = set()
d6983cb4
PH
530 self.set_downloader(downloader)
531
532 @classmethod
5ad28e7f 533 def _match_valid_url(cls, url):
8f97a15d 534 if cls._VALID_URL is False:
535 return None
79cb2577
PH
536 # This does not use has/getattr intentionally - we want to know whether
537 # we have cached the regexp for *this* class, whereas getattr would also
538 # match the superclass
539 if '_VALID_URL_RE' not in cls.__dict__:
540 cls._VALID_URL_RE = re.compile(cls._VALID_URL)
5ad28e7f 541 return cls._VALID_URL_RE.match(url)
542
543 @classmethod
544 def suitable(cls, url):
545 """Receives a URL and returns True if suitable for this IE."""
3fb4e21b 546 # This function must import everything it needs (except other extractors),
547 # so that lazy_extractors works correctly
5ad28e7f 548 return cls._match_valid_url(url) is not None
d6983cb4 549
ed9266db
PH
550 @classmethod
551 def _match_id(cls, url):
5ad28e7f 552 return cls._match_valid_url(url).group('id')
ed9266db 553
1151c407 554 @classmethod
555 def get_temp_id(cls, url):
556 try:
557 return cls._match_id(url)
558 except (IndexError, AttributeError):
559 return None
560
d6983cb4
PH
561 @classmethod
562 def working(cls):
563 """Getter method for _WORKING."""
564 return cls._WORKING
565
52efa4b3 566 @classmethod
567 def supports_login(cls):
568 return bool(cls._NETRC_MACHINE)
569
d6983cb4
PH
570 def initialize(self):
571 """Initializes an instance (authentication, etc)."""
28f436ba 572 self._printed_messages = set()
5f95927a
S
573 self._initialize_geo_bypass({
574 'countries': self._GEO_COUNTRIES,
575 'ip_blocks': self._GEO_IP_BLOCKS,
576 })
4248dad9 577 if not self._ready:
52efa4b3 578 self._initialize_pre_login()
579 if self.supports_login():
580 username, password = self._get_login_info()
581 if username:
582 self._perform_login(username, password)
583 elif self.get_param('username') and False not in (self.IE_DESC, self._NETRC_MACHINE):
8dcce6a8 584 self.report_warning(f'Login with password is not supported for this website. {self._login_hint("cookies")}')
4248dad9
S
585 self._real_initialize()
586 self._ready = True
587
5f95927a 588 def _initialize_geo_bypass(self, geo_bypass_context):
e39b5d4a
S
589 """
590 Initialize geo restriction bypass mechanism.
591
592 This method is used to initialize geo bypass mechanism based on faking
593 X-Forwarded-For HTTP header. A random country from provided country list
dc0a869e 594 is selected and a random IP belonging to this country is generated. This
e39b5d4a
S
595 IP will be passed as X-Forwarded-For HTTP header in all subsequent
596 HTTP requests.
e39b5d4a
S
597
598 This method will be used for initial geo bypass mechanism initialization
5f95927a
S
599 during the instance initialization with _GEO_COUNTRIES and
600 _GEO_IP_BLOCKS.
e39b5d4a 601
5f95927a 602 You may also manually call it from extractor's code if geo bypass
e39b5d4a 603 information is not available beforehand (e.g. obtained during
5f95927a
S
604 extraction) or due to some other reason. In this case you should pass
605 this information in geo bypass context passed as first argument. It may
606 contain following fields:
607
608 countries: List of geo unrestricted countries (similar
609 to _GEO_COUNTRIES)
610 ip_blocks: List of geo unrestricted IP blocks in CIDR notation
611 (similar to _GEO_IP_BLOCKS)
612
e39b5d4a 613 """
773f291d 614 if not self._x_forwarded_for_ip:
5f95927a
S
615
616 # Geo bypass mechanism is explicitly disabled by user
a06916d9 617 if not self.get_param('geo_bypass', True):
5f95927a
S
618 return
619
620 if not geo_bypass_context:
621 geo_bypass_context = {}
622
623 # Backward compatibility: previously _initialize_geo_bypass
624 # expected a list of countries, some 3rd party code may still use
625 # it this way
626 if isinstance(geo_bypass_context, (list, tuple)):
627 geo_bypass_context = {
628 'countries': geo_bypass_context,
629 }
630
631 # The whole point of geo bypass mechanism is to fake IP
632 # as X-Forwarded-For HTTP header based on some IP block or
633 # country code.
634
635 # Path 1: bypassing based on IP block in CIDR notation
636
637 # Explicit IP block specified by user, use it right away
638 # regardless of whether extractor is geo bypassable or not
a06916d9 639 ip_block = self.get_param('geo_bypass_ip_block', None)
5f95927a
S
640
641 # Otherwise use random IP block from geo bypass context but only
642 # if extractor is known as geo bypassable
643 if not ip_block:
644 ip_blocks = geo_bypass_context.get('ip_blocks')
645 if self._GEO_BYPASS and ip_blocks:
646 ip_block = random.choice(ip_blocks)
647
648 if ip_block:
649 self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
8a82af35 650 self.write_debug(f'Using fake IP {self._x_forwarded_for_ip} as X-Forwarded-For')
5f95927a
S
651 return
652
653 # Path 2: bypassing based on country code
654
655 # Explicit country code specified by user, use it right away
656 # regardless of whether extractor is geo bypassable or not
a06916d9 657 country = self.get_param('geo_bypass_country', None)
5f95927a
S
658
659 # Otherwise use random country code from geo bypass context but
660 # only if extractor is known as geo bypassable
661 if not country:
662 countries = geo_bypass_context.get('countries')
663 if self._GEO_BYPASS and countries:
664 country = random.choice(countries)
665
666 if country:
667 self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
0760b0a7 668 self._downloader.write_debug(
86e5f3ed 669 f'Using fake IP {self._x_forwarded_for_ip} ({country.upper()}) as X-Forwarded-For')
d6983cb4
PH
670
671 def extract(self, url):
672 """Extracts URL information and returns it in list of dicts."""
3a5bcd03 673 try:
773f291d
S
674 for _ in range(2):
675 try:
676 self.initialize()
a06916d9 677 self.write_debug('Extracting URL: %s' % url)
0016b84e 678 ie_result = self._real_extract(url)
07cce701 679 if ie_result is None:
680 return None
0016b84e
S
681 if self._x_forwarded_for_ip:
682 ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
b79f9e30 683 subtitles = ie_result.get('subtitles') or {}
684 if 'no-live-chat' in self.get_param('compat_opts'):
685 for lang in ('live_chat', 'comments', 'danmaku'):
686 subtitles.pop(lang, None)
0016b84e 687 return ie_result
773f291d 688 except GeoRestrictedError as e:
4248dad9
S
689 if self.__maybe_fake_ip_and_retry(e.countries):
690 continue
773f291d 691 raise
0db3bae8 692 except UnsupportedError:
693 raise
1151c407 694 except ExtractorError as e:
0db3bae8 695 kwargs = {
696 'video_id': e.video_id or self.get_temp_id(url),
697 'ie': self.IE_NAME,
b69fd25c 698 'tb': e.traceback or sys.exc_info()[2],
0db3bae8 699 'expected': e.expected,
700 'cause': e.cause
701 }
702 if hasattr(e, 'countries'):
703 kwargs['countries'] = e.countries
7265a219 704 raise type(e)(e.orig_msg, **kwargs)
ac668111 705 except http.client.IncompleteRead as e:
1151c407 706 raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
9650885b 707 except (KeyError, StopIteration) as e:
1151c407 708 raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
d6983cb4 709
4248dad9 710 def __maybe_fake_ip_and_retry(self, countries):
a06916d9 711 if (not self.get_param('geo_bypass_country', None)
3089bc74 712 and self._GEO_BYPASS
a06916d9 713 and self.get_param('geo_bypass', True)
3089bc74
S
714 and not self._x_forwarded_for_ip
715 and countries):
eea0716c
S
716 country_code = random.choice(countries)
717 self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
4248dad9
S
718 if self._x_forwarded_for_ip:
719 self.report_warning(
eea0716c
S
720 'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
721 % (self._x_forwarded_for_ip, country_code.upper()))
4248dad9
S
722 return True
723 return False
724
d6983cb4 725 def set_downloader(self, downloader):
08d30158 726 """Sets a YoutubeDL instance as the downloader for this IE."""
d6983cb4
PH
727 self._downloader = downloader
728
9809740b 729 @property
730 def cache(self):
731 return self._downloader.cache
732
733 @property
734 def cookiejar(self):
735 return self._downloader.cookiejar
736
52efa4b3 737 def _initialize_pre_login(self):
962ffcf8 738 """ Initialization before login. Redefine in subclasses."""
52efa4b3 739 pass
740
741 def _perform_login(self, username, password):
742 """ Login with username and password. Redefine in subclasses."""
743 pass
744
d6983cb4
PH
745 def _real_initialize(self):
746 """Real initialization process. Redefine in subclasses."""
747 pass
748
749 def _real_extract(self, url):
750 """Real extraction process. Redefine in subclasses."""
08d30158 751 raise NotImplementedError('This method must be implemented by subclasses')
d6983cb4 752
56c73665
JMF
753 @classmethod
754 def ie_key(cls):
755 """A string for getting the InfoExtractor with get_info_extractor"""
3fb4e21b 756 return cls.__name__[:-2]
56c73665 757
82d02080 758 @classproperty
759 def IE_NAME(cls):
760 return cls.__name__[:-2]
d6983cb4 761
d391b7e2
S
762 @staticmethod
763 def __can_accept_status_code(err, expected_status):
ac668111 764 assert isinstance(err, urllib.error.HTTPError)
d391b7e2
S
765 if expected_status is None:
766 return False
d391b7e2
S
767 elif callable(expected_status):
768 return expected_status(err.code) is True
769 else:
6606817a 770 return err.code in variadic(expected_status)
d391b7e2 771
c043c246 772 def _create_request(self, url_or_request, data=None, headers=None, query=None):
ac668111 773 if isinstance(url_or_request, urllib.request.Request):
09d02ea4 774 return update_Request(url_or_request, data=data, headers=headers, query=query)
775 if query:
776 url_or_request = update_url_query(url_or_request, query)
c043c246 777 return sanitized_Request(url_or_request, data, headers or {})
f95b9dee 778
c043c246 779 def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers=None, query=None, expected_status=None):
d391b7e2
S
780 """
781 Return the response handle.
782
783 See _download_webpage docstring for arguments specification.
784 """
1cf376f5 785 if not self._downloader._first_webpage_request:
49a57e70 786 sleep_interval = self.get_param('sleep_interval_requests') or 0
1cf376f5 787 if sleep_interval > 0:
5ef7d9bd 788 self.to_screen('Sleeping %s seconds ...' % sleep_interval)
1cf376f5 789 time.sleep(sleep_interval)
790 else:
791 self._downloader._first_webpage_request = False
792
d6983cb4
PH
793 if note is None:
794 self.report_download_webpage(video_id)
795 elif note is not False:
7cc3570e 796 if video_id is None:
86e5f3ed 797 self.to_screen(str(note))
7cc3570e 798 else:
86e5f3ed 799 self.to_screen(f'{video_id}: {note}')
2132edaa
S
800
801 # Some sites check X-Forwarded-For HTTP header in order to figure out
802 # the origin of the client behind proxy. This allows bypassing geo
803 # restriction by faking this header's value to IP that belongs to some
804 # geo unrestricted country. We will do so once we encounter any
805 # geo restriction error.
806 if self._x_forwarded_for_ip:
c043c246 807 headers = (headers or {}).copy()
808 headers.setdefault('X-Forwarded-For', self._x_forwarded_for_ip)
2132edaa 809
d6983cb4 810 try:
f95b9dee 811 return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query))
3158150c 812 except network_exceptions as err:
ac668111 813 if isinstance(err, urllib.error.HTTPError):
d391b7e2 814 if self.__can_accept_status_code(err, expected_status):
95e42d73
XDG
815 # Retain reference to error to prevent file object from
816 # being closed before it can be read. Works around the
817 # effects of <https://bugs.python.org/issue15002>
818 # introduced in Python 3.4.1.
819 err.fp._error = err
d391b7e2
S
820 return err.fp
821
aa94a6d3
PH
822 if errnote is False:
823 return False
d6983cb4 824 if errnote is None:
f1a9d64e 825 errnote = 'Unable to download webpage'
7f8b2714 826
86e5f3ed 827 errmsg = f'{errnote}: {error_to_compat_str(err)}'
7cc3570e 828 if fatal:
497d2fab 829 raise ExtractorError(errmsg, cause=err)
7cc3570e 830 else:
6a39ee13 831 self.report_warning(errmsg)
7cc3570e 832 return False
d6983cb4 833
1890fc63 834 def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True,
835 encoding=None, data=None, headers={}, query={}, expected_status=None):
d391b7e2
S
836 """
837 Return a tuple (page content as string, URL handle).
838
617f658b 839 Arguments:
840 url_or_request -- plain text URL as a string or
ac668111 841 a urllib.request.Request object
617f658b 842 video_id -- Video/playlist/item identifier (string)
843
844 Keyword arguments:
845 note -- note printed before downloading (string)
846 errnote -- note printed in case of an error (string)
847 fatal -- flag denoting whether error should be considered fatal,
848 i.e. whether it should cause ExtractionError to be raised,
849 otherwise a warning will be reported and extraction continued
850 encoding -- encoding for a page content decoding, guessed automatically
851 when not explicitly specified
852 data -- POST data (bytes)
853 headers -- HTTP headers (dict)
854 query -- URL query (dict)
855 expected_status -- allows to accept failed HTTP requests (non 2xx
856 status code) by explicitly specifying a set of accepted status
857 codes. Can be any of the following entities:
858 - an integer type specifying an exact failed status code to
859 accept
860 - a list or a tuple of integer types specifying a list of
861 failed status codes to accept
862 - a callable accepting an actual failed status code and
863 returning True if it should be accepted
864 Note that this argument does not affect success status codes (2xx)
865 which are always accepted.
d391b7e2 866 """
617f658b 867
b9d3e163 868 # Strip hashes from the URL (#1038)
14f25df2 869 if isinstance(url_or_request, str):
b9d3e163
PH
870 url_or_request = url_or_request.partition('#')[0]
871
d391b7e2 872 urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
7cc3570e
PH
873 if urlh is False:
874 assert not fatal
875 return False
c9a77969 876 content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
23be51d8
PH
877 return (content, urlh)
878
c9a77969
YCH
879 @staticmethod
880 def _guess_encoding_from_content(content_type, webpage_bytes):
d6983cb4
PH
881 m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
882 if m:
883 encoding = m.group(1)
884 else:
0d75ae2c 885 m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
f143d86a
PH
886 webpage_bytes[:1024])
887 if m:
888 encoding = m.group(1).decode('ascii')
b60016e8
PH
889 elif webpage_bytes.startswith(b'\xff\xfe'):
890 encoding = 'utf-16'
f143d86a
PH
891 else:
892 encoding = 'utf-8'
c9a77969
YCH
893
894 return encoding
895
4457823d
S
896 def __check_blocked(self, content):
897 first_block = content[:512]
3089bc74
S
898 if ('<title>Access to this site is blocked</title>' in content
899 and 'Websense' in first_block):
4457823d
S
900 msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
901 blocked_iframe = self._html_search_regex(
902 r'<iframe src="([^"]+)"', content,
903 'Websense information URL', default=None)
904 if blocked_iframe:
905 msg += ' Visit %s for more details' % blocked_iframe
906 raise ExtractorError(msg, expected=True)
907 if '<title>The URL you requested has been blocked</title>' in first_block:
908 msg = (
909 'Access to this webpage has been blocked by Indian censorship. '
910 'Use a VPN or proxy server (with --proxy) to route around it.')
911 block_msg = self._html_search_regex(
912 r'</h1><p>(.*?)</p>',
913 content, 'block message', default=None)
914 if block_msg:
915 msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
916 raise ExtractorError(msg, expected=True)
3089bc74
S
917 if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
918 and 'blocklist.rkn.gov.ru' in content):
4457823d
S
919 raise ExtractorError(
920 'Access to this webpage has been blocked by decision of the Russian government. '
921 'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
922 expected=True)
923
f95b9dee 924 def _request_dump_filename(self, url, video_id):
925 basen = f'{video_id}_{url}'
926 trim_length = self.get_param('trim_file_name') or 240
927 if len(basen) > trim_length:
928 h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
929 basen = basen[:trim_length - len(h)] + h
930 filename = sanitize_filename(f'{basen}.dump', restricted=True)
931 # Working around MAX_PATH limitation on Windows (see
932 # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
933 if compat_os_name == 'nt':
934 absfilepath = os.path.abspath(filename)
935 if len(absfilepath) > 259:
936 filename = fR'\\?\{absfilepath}'
937 return filename
938
939 def __decode_webpage(self, webpage_bytes, encoding, headers):
940 if not encoding:
941 encoding = self._guess_encoding_from_content(headers.get('Content-Type', ''), webpage_bytes)
942 try:
943 return webpage_bytes.decode(encoding, 'replace')
944 except LookupError:
945 return webpage_bytes.decode('utf-8', 'replace')
946
c9a77969 947 def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
c9a77969
YCH
948 webpage_bytes = urlh.read()
949 if prefix is not None:
950 webpage_bytes = prefix + webpage_bytes
a06916d9 951 if self.get_param('dump_intermediate_pages', False):
f610dbb0 952 self.to_screen('Dumping request to ' + urlh.geturl())
d6983cb4
PH
953 dump = base64.b64encode(webpage_bytes).decode('ascii')
954 self._downloader.to_screen(dump)
f95b9dee 955 if self.get_param('write_pages'):
e121e3ce 956 filename = self._request_dump_filename(urlh.geturl(), video_id)
f95b9dee 957 self.to_screen(f'Saving request to {filename}')
d41e6efc
PH
958 with open(filename, 'wb') as outf:
959 outf.write(webpage_bytes)
960
f95b9dee 961 content = self.__decode_webpage(webpage_bytes, encoding, urlh.headers)
4457823d 962 self.__check_blocked(content)
2410c43d 963
23be51d8 964 return content
d6983cb4 965
6edf2808 966 def __print_error(self, errnote, fatal, video_id, err):
967 if fatal:
c6e07cf1 968 raise ExtractorError(f'{video_id}: {errnote}', cause=err)
6edf2808 969 elif errnote:
c6e07cf1 970 self.report_warning(f'{video_id}: {errnote}: {err}')
6edf2808 971
972 def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True, errnote=None):
e2b38da9
PH
973 if transform_source:
974 xml_string = transform_source(xml_string)
e01c3d2e
S
975 try:
976 return compat_etree_fromstring(xml_string.encode('utf-8'))
f9934b96 977 except xml.etree.ElementTree.ParseError as ve:
6edf2808 978 self.__print_error('Failed to parse XML' if errnote is None else errnote, fatal, video_id, ve)
267ed0c5 979
6edf2808 980 def _parse_json(self, json_string, video_id, transform_source=None, fatal=True, errnote=None, **parser_kwargs):
3d3538e4 981 try:
b7c47b74 982 return json.loads(
983 json_string, cls=LenientJSONDecoder, strict=False, transform_source=transform_source, **parser_kwargs)
3d3538e4 984 except ValueError as ve:
6edf2808 985 self.__print_error('Failed to parse JSON' if errnote is None else errnote, fatal, video_id, ve)
3d3538e4 986
6edf2808 987 def _parse_socket_response_as_json(self, data, *args, **kwargs):
988 return self._parse_json(data[data.find('{'):data.rfind('}') + 1], *args, **kwargs)
adddc50c 989
617f658b 990 def __create_download_methods(name, parser, note, errnote, return_value):
991
6edf2808 992 def parse(ie, content, *args, errnote=errnote, **kwargs):
617f658b 993 if parser is None:
994 return content
6edf2808 995 if errnote is False:
996 kwargs['errnote'] = errnote
617f658b 997 # parser is fetched by name so subclasses can override it
998 return getattr(ie, parser)(content, *args, **kwargs)
999
c4910024 1000 def download_handle(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
1001 fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
1002 res = self._download_webpage_handle(
1003 url_or_request, video_id, note=note, errnote=errnote, fatal=fatal, encoding=encoding,
1004 data=data, headers=headers, query=query, expected_status=expected_status)
617f658b 1005 if res is False:
1006 return res
1007 content, urlh = res
6edf2808 1008 return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote), urlh
617f658b 1009
f95b9dee 1010 def download_content(self, url_or_request, video_id, note=note, errnote=errnote, transform_source=None,
c4910024 1011 fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
f95b9dee 1012 if self.get_param('load_pages'):
1013 url_or_request = self._create_request(url_or_request, data, headers, query)
1014 filename = self._request_dump_filename(url_or_request.full_url, video_id)
1015 self.to_screen(f'Loading request from {filename}')
1016 try:
1017 with open(filename, 'rb') as dumpf:
1018 webpage_bytes = dumpf.read()
1019 except OSError as e:
1020 self.report_warning(f'Unable to load request from disk: {e}')
1021 else:
1022 content = self.__decode_webpage(webpage_bytes, encoding, url_or_request.headers)
6edf2808 1023 return parse(self, content, video_id, transform_source=transform_source, fatal=fatal, errnote=errnote)
c4910024 1024 kwargs = {
1025 'note': note,
1026 'errnote': errnote,
1027 'transform_source': transform_source,
1028 'fatal': fatal,
1029 'encoding': encoding,
1030 'data': data,
1031 'headers': headers,
1032 'query': query,
1033 'expected_status': expected_status,
1034 }
617f658b 1035 if parser is None:
c4910024 1036 kwargs.pop('transform_source')
617f658b 1037 # The method is fetched by name so subclasses can override _download_..._handle
c4910024 1038 res = getattr(self, download_handle.__name__)(url_or_request, video_id, **kwargs)
617f658b 1039 return res if res is False else res[0]
1040
1041 def impersonate(func, name, return_value):
1042 func.__name__, func.__qualname__ = name, f'InfoExtractor.{name}'
1043 func.__doc__ = f'''
1044 @param transform_source Apply this transformation before parsing
1045 @returns {return_value}
1046
1047 See _download_webpage_handle docstring for other arguments specification
1048 '''
1049
1050 impersonate(download_handle, f'_download_{name}_handle', f'({return_value}, URL handle)')
1051 impersonate(download_content, f'_download_{name}', f'{return_value}')
1052 return download_handle, download_content
1053
1054 _download_xml_handle, _download_xml = __create_download_methods(
1055 'xml', '_parse_xml', 'Downloading XML', 'Unable to download XML', 'xml as an xml.etree.ElementTree.Element')
1056 _download_json_handle, _download_json = __create_download_methods(
1057 'json', '_parse_json', 'Downloading JSON metadata', 'Unable to download JSON metadata', 'JSON object as a dict')
1058 _download_socket_json_handle, _download_socket_json = __create_download_methods(
1059 'socket_json', '_parse_socket_response_as_json', 'Polling socket', 'Unable to poll socket', 'JSON object as a dict')
1060 __download_webpage = __create_download_methods('webpage', None, None, None, 'data of the page as a string')[1]
adddc50c 1061
617f658b 1062 def _download_webpage(
1063 self, url_or_request, video_id, note=None, errnote=None,
1064 fatal=True, tries=1, timeout=NO_DEFAULT, *args, **kwargs):
adddc50c 1065 """
617f658b 1066 Return the data of the page as a string.
adddc50c 1067
617f658b 1068 Keyword arguments:
1069 tries -- number of tries
1070 timeout -- sleep interval between tries
1071
1072 See _download_webpage_handle docstring for other arguments specification.
adddc50c 1073 """
617f658b 1074
1075 R''' # NB: These are unused; should they be deprecated?
1076 if tries != 1:
1077 self._downloader.deprecation_warning('tries argument is deprecated in InfoExtractor._download_webpage')
1078 if timeout is NO_DEFAULT:
1079 timeout = 5
1080 else:
1081 self._downloader.deprecation_warning('timeout argument is deprecated in InfoExtractor._download_webpage')
1082 '''
1083
1084 try_count = 0
1085 while True:
1086 try:
1087 return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
ac668111 1088 except http.client.IncompleteRead as e:
617f658b 1089 try_count += 1
1090 if try_count >= tries:
1091 raise e
1092 self._sleep(timeout, video_id)
adddc50c 1093
28f436ba 1094 def report_warning(self, msg, video_id=None, *args, only_once=False, **kwargs):
a70635b8 1095 idstr = format_field(video_id, None, '%s: ')
28f436ba 1096 msg = f'[{self.IE_NAME}] {idstr}{msg}'
1097 if only_once:
1098 if f'WARNING: {msg}' in self._printed_messages:
1099 return
1100 self._printed_messages.add(f'WARNING: {msg}')
1101 self._downloader.report_warning(msg, *args, **kwargs)
f45f96f8 1102
a06916d9 1103 def to_screen(self, msg, *args, **kwargs):
d6983cb4 1104 """Print msg to screen, prefixing it with '[ie_name]'"""
86e5f3ed 1105 self._downloader.to_screen(f'[{self.IE_NAME}] {msg}', *args, **kwargs)
a06916d9 1106
1107 def write_debug(self, msg, *args, **kwargs):
86e5f3ed 1108 self._downloader.write_debug(f'[{self.IE_NAME}] {msg}', *args, **kwargs)
a06916d9 1109
1110 def get_param(self, name, default=None, *args, **kwargs):
1111 if self._downloader:
1112 return self._downloader.params.get(name, default, *args, **kwargs)
1113 return default
d6983cb4 1114
d5d1df8a 1115 def report_drm(self, video_id, partial=NO_DEFAULT):
1116 if partial is not NO_DEFAULT:
1117 self._downloader.deprecation_warning('InfoExtractor.report_drm no longer accepts the argument partial')
88acdbc2 1118 self.raise_no_formats('This video is DRM protected', expected=True, video_id=video_id)
1119
d6983cb4
PH
1120 def report_extraction(self, id_or_name):
1121 """Report information extraction."""
f1a9d64e 1122 self.to_screen('%s: Extracting information' % id_or_name)
d6983cb4
PH
1123
1124 def report_download_webpage(self, video_id):
1125 """Report webpage download."""
f1a9d64e 1126 self.to_screen('%s: Downloading webpage' % video_id)
d6983cb4
PH
1127
1128 def report_age_confirmation(self):
1129 """Report attempt to confirm age."""
f1a9d64e 1130 self.to_screen('Confirming age')
d6983cb4 1131
fc79158d
JMF
1132 def report_login(self):
1133 """Report attempt to log in."""
f1a9d64e 1134 self.to_screen('Logging in')
fc79158d 1135
b7da73eb 1136 def raise_login_required(
9d5d4d64 1137 self, msg='This video is only available for registered users',
52efa4b3 1138 metadata_available=False, method=NO_DEFAULT):
f2ebc5c7 1139 if metadata_available and (
1140 self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
b7da73eb 1141 self.report_warning(msg)
7265a219 1142 return
a70635b8 1143 msg += format_field(self._login_hint(method), None, '. %s')
46890374 1144 raise ExtractorError(msg, expected=True)
43e7d3c9 1145
b7da73eb 1146 def raise_geo_restricted(
1147 self, msg='This video is not available from your location due to geo restriction',
1148 countries=None, metadata_available=False):
f2ebc5c7 1149 if metadata_available and (
1150 self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
b7da73eb 1151 self.report_warning(msg)
1152 else:
1153 raise GeoRestrictedError(msg, countries=countries)
1154
1155 def raise_no_formats(self, msg, expected=False, video_id=None):
f2ebc5c7 1156 if expected and (
1157 self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
b7da73eb 1158 self.report_warning(msg, video_id)
68f5867c
L
1159 elif isinstance(msg, ExtractorError):
1160 raise msg
b7da73eb 1161 else:
1162 raise ExtractorError(msg, expected=expected, video_id=video_id)
c430802e 1163
5f6a1245 1164 # Methods for following #608
c0d0b01f 1165 @staticmethod
311b6615 1166 def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs):
10952eb2 1167 """Returns a URL that points to a page that should be processed"""
311b6615 1168 if ie is not None:
1169 kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key()
7012b23c 1170 if video_id is not None:
311b6615 1171 kwargs['id'] = video_id
830d53bf 1172 if video_title is not None:
311b6615 1173 kwargs['title'] = video_title
1174 return {
1175 **kwargs,
1176 '_type': 'url_transparent' if url_transparent else 'url',
1177 'url': url,
1178 }
1179
8f97a15d 1180 @classmethod
1181 def playlist_from_matches(cls, matches, playlist_id=None, playlist_title=None,
1182 getter=IDENTITY, ie=None, video_kwargs=None, **kwargs):
1183 return cls.playlist_result(
1184 (cls.url_result(m, ie, **(video_kwargs or {})) for m in orderedSet(map(getter, matches), lazy=True)),
1185 playlist_id, playlist_title, **kwargs)
46b18f23 1186
c0d0b01f 1187 @staticmethod
311b6615 1188 def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, *, multi_video=False, **kwargs):
d6983cb4 1189 """Returns a playlist"""
d6983cb4 1190 if playlist_id:
311b6615 1191 kwargs['id'] = playlist_id
d6983cb4 1192 if playlist_title:
311b6615 1193 kwargs['title'] = playlist_title
ecc97af3 1194 if playlist_description is not None:
311b6615 1195 kwargs['description'] = playlist_description
1196 return {
1197 **kwargs,
1198 '_type': 'multi_video' if multi_video else 'playlist',
1199 'entries': entries,
1200 }
d6983cb4 1201
c342041f 1202 def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
d6983cb4
PH
1203 """
1204 Perform a regex search on the given string, using a single or a list of
1205 patterns returning the first matching group.
1206 In case of failure return a default value or raise a WARNING or a
55b3e45b 1207 RegexNotFoundError, depending on fatal, specifying the field name.
d6983cb4 1208 """
61d3665d 1209 if string is None:
1210 mobj = None
77f90330 1211 elif isinstance(pattern, (str, re.Pattern)):
d6983cb4
PH
1212 mobj = re.search(pattern, string, flags)
1213 else:
1214 for p in pattern:
1215 mobj = re.search(p, string, flags)
c3415d1b
PH
1216 if mobj:
1217 break
d6983cb4 1218
ec11a9f4 1219 _name = self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
d6983cb4
PH
1220
1221 if mobj:
711ede6e
PH
1222 if group is None:
1223 # return the first matching group
1224 return next(g for g in mobj.groups() if g is not None)
198f7ea8 1225 elif isinstance(group, (list, tuple)):
1226 return tuple(mobj.group(g) for g in group)
711ede6e
PH
1227 else:
1228 return mobj.group(group)
c342041f 1229 elif default is not NO_DEFAULT:
d6983cb4
PH
1230 return default
1231 elif fatal:
f1a9d64e 1232 raise RegexNotFoundError('Unable to extract %s' % _name)
d6983cb4 1233 else:
6a39ee13 1234 self.report_warning('unable to extract %s' % _name + bug_reports_message())
d6983cb4
PH
1235 return None
1236
f0bc6e20 1237 def _search_json(self, start_pattern, string, name, video_id, *, end_pattern='',
8b7fb8b6 1238 contains_pattern=r'{(?s:.+)}', fatal=True, default=NO_DEFAULT, **kwargs):
b7c47b74 1239 """Searches string for the JSON object specified by start_pattern"""
1240 # NB: end_pattern is only used to reduce the size of the initial match
f0bc6e20 1241 if default is NO_DEFAULT:
1242 default, has_default = {}, False
1243 else:
1244 fatal, has_default = False, True
1245
1246 json_string = self._search_regex(
8b7fb8b6 1247 rf'(?:{start_pattern})\s*(?P<json>{contains_pattern})\s*(?:{end_pattern})',
f0bc6e20 1248 string, name, group='json', fatal=fatal, default=None if has_default else NO_DEFAULT)
1249 if not json_string:
1250 return default
1251
1252 _name = self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
1253 try:
1254 return self._parse_json(json_string, video_id, ignore_extra=True, **kwargs)
1255 except ExtractorError as e:
1256 if fatal:
1257 raise ExtractorError(
1258 f'Unable to extract {_name} - Failed to parse JSON', cause=e.cause, video_id=video_id)
1259 elif not has_default:
1260 self.report_warning(
1261 f'Unable to extract {_name} - Failed to parse JSON: {e}', video_id=video_id)
1262 return default
b7c47b74 1263
c342041f 1264 def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
d6983cb4
PH
1265 """
1266 Like _search_regex, but strips HTML tags and unescapes entities.
1267 """
711ede6e 1268 res = self._search_regex(pattern, string, name, default, fatal, flags, group)
d6983cb4
PH
1269 if res:
1270 return clean_html(res).strip()
1271 else:
1272 return res
1273
2118fdd1
RA
1274 def _get_netrc_login_info(self, netrc_machine=None):
1275 username = None
1276 password = None
1277 netrc_machine = netrc_machine or self._NETRC_MACHINE
1278
a06916d9 1279 if self.get_param('usenetrc', False):
2118fdd1 1280 try:
0001fcb5 1281 netrc_file = compat_expanduser(self.get_param('netrc_location') or '~')
1282 if os.path.isdir(netrc_file):
1283 netrc_file = os.path.join(netrc_file, '.netrc')
1284 info = netrc.netrc(file=netrc_file).authenticators(netrc_machine)
2118fdd1
RA
1285 if info is not None:
1286 username = info[0]
1287 password = info[2]
1288 else:
dcce092e
S
1289 raise netrc.NetrcParseError(
1290 'No authenticators for %s' % netrc_machine)
86e5f3ed 1291 except (OSError, netrc.NetrcParseError) as err:
6a39ee13 1292 self.report_warning(
dcce092e 1293 'parsing .netrc: %s' % error_to_compat_str(err))
2118fdd1 1294
dcce092e 1295 return username, password
2118fdd1 1296
1b6712ab 1297 def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
fc79158d 1298 """
cf0649f8 1299 Get the login info as (username, password)
32443dd3
S
1300 First look for the manually specified credentials using username_option
1301 and password_option as keys in params dictionary. If no such credentials
1302 available look in the netrc file using the netrc_machine or _NETRC_MACHINE
1303 value.
fc79158d
JMF
1304 If there's no info available, return (None, None)
1305 """
fc79158d
JMF
1306
1307 # Attempt to use provided username and password or .netrc data
a06916d9 1308 username = self.get_param(username_option)
1309 if username is not None:
1310 password = self.get_param(password_option)
2118fdd1 1311 else:
1b6712ab 1312 username, password = self._get_netrc_login_info(netrc_machine)
5f6a1245 1313
2133565c 1314 return username, password
fc79158d 1315
e64b7569 1316 def _get_tfa_info(self, note='two-factor verification code'):
83317f69 1317 """
1318 Get the two-factor authentication info
1319 TODO - asking the user will be required for sms/phone verify
1320 currently just uses the command line option
1321 If there's no info available, return None
1322 """
83317f69 1323
a06916d9 1324 tfa = self.get_param('twofactor')
1325 if tfa is not None:
1326 return tfa
83317f69 1327
ac668111 1328 return getpass.getpass('Type %s and press [Return]: ' % note)
83317f69 1329
46720279
JMF
1330 # Helper functions for extracting OpenGraph info
1331 @staticmethod
ab2d5247 1332 def _og_regexes(prop):
448ef1f3 1333 content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
fbfde1c3
F
1334 property_re = (r'(?:name|property)=(?:\'og%(sep)s%(prop)s\'|"og%(sep)s%(prop)s"|\s*og%(sep)s%(prop)s\b)'
1335 % {'prop': re.escape(prop), 'sep': '(?:&#x3A;|[:-])'})
78fb87b2 1336 template = r'<meta[^>]+?%s[^>]+?%s'
ab2d5247 1337 return [
78fb87b2
JMF
1338 template % (property_re, content_re),
1339 template % (content_re, property_re),
ab2d5247 1340 ]
46720279 1341
864f24bd
S
1342 @staticmethod
1343 def _meta_regex(prop):
1344 return r'''(?isx)<meta
8b9848ac 1345 (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
864f24bd
S
1346 [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
1347
3c4e6d83 1348 def _og_search_property(self, prop, html, name=None, **kargs):
6606817a 1349 prop = variadic(prop)
46720279 1350 if name is None:
b070564e
S
1351 name = 'OpenGraph %s' % prop[0]
1352 og_regexes = []
1353 for p in prop:
1354 og_regexes.extend(self._og_regexes(p))
1355 escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
eb0a8398
PH
1356 if escaped is None:
1357 return None
1358 return unescapeHTML(escaped)
46720279
JMF
1359
1360 def _og_search_thumbnail(self, html, **kargs):
10952eb2 1361 return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
46720279
JMF
1362
1363 def _og_search_description(self, html, **kargs):
1364 return self._og_search_property('description', html, fatal=False, **kargs)
1365
04f3fd2c 1366 def _og_search_title(self, html, *, fatal=False, **kargs):
1367 return self._og_search_property('title', html, fatal=fatal, **kargs)
46720279 1368
8ffa13e0 1369 def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
a3681973
PH
1370 regexes = self._og_regexes('video') + self._og_regexes('video:url')
1371 if secure:
1372 regexes = self._og_regexes('video:secure_url') + regexes
8ffa13e0 1373 return self._html_search_regex(regexes, html, name, **kargs)
46720279 1374
78338f71
JMF
1375 def _og_search_url(self, html, **kargs):
1376 return self._og_search_property('url', html, **kargs)
1377
04f3fd2c 1378 def _html_extract_title(self, html, name='title', *, fatal=False, **kwargs):
21633673 1379 return self._html_search_regex(r'(?s)<title\b[^>]*>([^<]+)</title>', html, name, fatal=fatal, **kwargs)
77cc7c6e 1380
40c696e5 1381 def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
6606817a 1382 name = variadic(name)
59040888 1383 if display_name is None:
88d9f6c0 1384 display_name = name[0]
59040888 1385 return self._html_search_regex(
88d9f6c0 1386 [self._meta_regex(n) for n in name],
711ede6e 1387 html, display_name, fatal=fatal, group='content', **kwargs)
59040888
PH
1388
1389 def _dc_search_uploader(self, html):
1390 return self._html_search_meta('dc.creator', html, 'uploader')
1391
8f97a15d 1392 @staticmethod
1393 def _rta_search(html):
8dbe9899
PH
1394 # See http://www.rtalabel.org/index.php?content=howtofaq#single
1395 if re.search(r'(?ix)<meta\s+name="rating"\s+'
1396 r' content="RTA-5042-1996-1400-1577-RTA"',
1397 html):
1398 return 18
8f97a15d 1399
1400 # And then there are the jokers who advertise that they use RTA, but actually don't.
1401 AGE_LIMIT_MARKERS = [
1402 r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
1403 ]
1404 if any(re.search(marker, html) for marker in AGE_LIMIT_MARKERS):
1405 return 18
8dbe9899
PH
1406 return 0
1407
59040888
PH
1408 def _media_rating_search(self, html):
1409 # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
1410 rating = self._html_search_meta('rating', html)
1411
1412 if not rating:
1413 return None
1414
1415 RATING_TABLE = {
1416 'safe for kids': 0,
1417 'general': 8,
1418 '14 years': 14,
1419 'mature': 17,
1420 'restricted': 19,
1421 }
d800609c 1422 return RATING_TABLE.get(rating.lower())
59040888 1423
69319969 1424 def _family_friendly_search(self, html):
6ca7732d 1425 # See http://schema.org/VideoObject
ac8491fc
S
1426 family_friendly = self._html_search_meta(
1427 'isFamilyFriendly', html, default=None)
69319969
NJ
1428
1429 if not family_friendly:
1430 return None
1431
1432 RATING_TABLE = {
1433 '1': 0,
1434 'true': 0,
1435 '0': 18,
1436 'false': 18,
1437 }
d800609c 1438 return RATING_TABLE.get(family_friendly.lower())
69319969 1439
0c708f11
JMF
1440 def _twitter_search_player(self, html):
1441 return self._html_search_meta('twitter:player', html,
9e1a5b84 1442 'twitter card player')
0c708f11 1443
0c36dc00 1444 def _yield_json_ld(self, html, video_id, *, fatal=True, default=NO_DEFAULT):
1445 """Yield all json ld objects in the html"""
1446 if default is not NO_DEFAULT:
1447 fatal = False
1448 for mobj in re.finditer(JSON_LD_RE, html):
1449 json_ld_item = self._parse_json(mobj.group('json_ld'), video_id, fatal=fatal)
1450 for json_ld in variadic(json_ld_item):
1451 if isinstance(json_ld, dict):
1452 yield json_ld
1453
1454 def _search_json_ld(self, html, video_id, expected_type=None, *, fatal=True, default=NO_DEFAULT):
1455 """Search for a video in any json ld in the html"""
1456 if default is not NO_DEFAULT:
1457 fatal = False
1458 info = self._json_ld(
1459 list(self._yield_json_ld(html, video_id, fatal=fatal, default=default)),
1460 video_id, fatal=fatal, expected_type=expected_type)
1461 if info:
1462 return info
4433bb02
S
1463 if default is not NO_DEFAULT:
1464 return default
1465 elif fatal:
1466 raise RegexNotFoundError('Unable to extract JSON-LD')
1467 else:
6a39ee13 1468 self.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
4433bb02 1469 return {}
4ca2a3cf 1470
95b31e26 1471 def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
14f25df2 1472 if isinstance(json_ld, str):
4ca2a3cf
S
1473 json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
1474 if not json_ld:
1475 return {}
1476 info = {}
bae14048 1477
e7e4a6e0
S
1478 INTERACTION_TYPE_MAP = {
1479 'CommentAction': 'comment',
1480 'AgreeAction': 'like',
1481 'DisagreeAction': 'dislike',
1482 'LikeAction': 'like',
1483 'DislikeAction': 'dislike',
1484 'ListenAction': 'view',
1485 'WatchAction': 'view',
1486 'ViewAction': 'view',
1487 }
1488
f3c0c773 1489 def is_type(e, *expected_types):
1490 type = variadic(traverse_obj(e, '@type'))
1491 return any(x in type for x in expected_types)
1492
29f7c58a 1493 def extract_interaction_type(e):
1494 interaction_type = e.get('interactionType')
1495 if isinstance(interaction_type, dict):
1496 interaction_type = interaction_type.get('@type')
1497 return str_or_none(interaction_type)
1498
e7e4a6e0
S
1499 def extract_interaction_statistic(e):
1500 interaction_statistic = e.get('interactionStatistic')
29f7c58a 1501 if isinstance(interaction_statistic, dict):
1502 interaction_statistic = [interaction_statistic]
e7e4a6e0
S
1503 if not isinstance(interaction_statistic, list):
1504 return
1505 for is_e in interaction_statistic:
f3c0c773 1506 if not is_type(is_e, 'InteractionCounter'):
e7e4a6e0 1507 continue
29f7c58a 1508 interaction_type = extract_interaction_type(is_e)
1509 if not interaction_type:
e7e4a6e0 1510 continue
ce5b9040
S
1511 # For interaction count some sites provide string instead of
1512 # an integer (as per spec) with non digit characters (e.g. ",")
1513 # so extracting count with more relaxed str_to_int
1514 interaction_count = str_to_int(is_e.get('userInteractionCount'))
e7e4a6e0
S
1515 if interaction_count is None:
1516 continue
1517 count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
1518 if not count_kind:
1519 continue
1520 count_key = '%s_count' % count_kind
1521 if info.get(count_key) is not None:
1522 continue
1523 info[count_key] = interaction_count
1524
f5225737 1525 def extract_chapter_information(e):
1526 chapters = [{
1527 'title': part.get('name'),
1528 'start_time': part.get('startOffset'),
1529 'end_time': part.get('endOffset'),
85553414 1530 } for part in variadic(e.get('hasPart') or []) if part.get('@type') == 'Clip']
f5225737 1531 for idx, (last_c, current_c, next_c) in enumerate(zip(
1532 [{'end_time': 0}] + chapters, chapters, chapters[1:])):
1533 current_c['end_time'] = current_c['end_time'] or next_c['start_time']
1534 current_c['start_time'] = current_c['start_time'] or last_c['end_time']
1535 if None in current_c.values():
1536 self.report_warning(f'Chapter {idx} contains broken data. Not extracting chapters')
1537 return
1538 if chapters:
1539 chapters[-1]['end_time'] = chapters[-1]['end_time'] or info['duration']
1540 info['chapters'] = chapters
1541
bae14048 1542 def extract_video_object(e):
f7ad7160 1543 author = e.get('author')
bae14048 1544 info.update({
0c36dc00 1545 'url': url_or_none(e.get('contentUrl')),
0f60ba6e 1546 'ext': mimetype2ext(e.get('encodingFormat')),
bae14048
S
1547 'title': unescapeHTML(e.get('name')),
1548 'description': unescapeHTML(e.get('description')),
eb2333bc 1549 'thumbnails': [{'url': unescapeHTML(url)}
21633673 1550 for url in variadic(traverse_obj(e, 'thumbnailUrl', 'thumbnailURL'))
1551 if url_or_none(url)],
bae14048
S
1552 'duration': parse_duration(e.get('duration')),
1553 'timestamp': unified_timestamp(e.get('uploadDate')),
f7ad7160 1554 # author can be an instance of 'Organization' or 'Person' types.
1555 # both types can have 'name' property(inherited from 'Thing' type). [1]
1556 # however some websites are using 'Text' type instead.
1557 # 1. https://schema.org/VideoObject
14f25df2 1558 'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, str) else None,
0f60ba6e 1559 'artist': traverse_obj(e, ('byArtist', 'name'), expected_type=str),
56ba69e4 1560 'filesize': int_or_none(float_or_none(e.get('contentSize'))),
bae14048
S
1561 'tbr': int_or_none(e.get('bitrate')),
1562 'width': int_or_none(e.get('width')),
1563 'height': int_or_none(e.get('height')),
33a81c2c 1564 'view_count': int_or_none(e.get('interactionCount')),
0f60ba6e 1565 'tags': try_call(lambda: e.get('keywords').split(',')),
bae14048 1566 })
0f60ba6e 1567 if is_type(e, 'AudioObject'):
1568 info.update({
1569 'vcodec': 'none',
1570 'abr': int_or_none(e.get('bitrate')),
1571 })
e7e4a6e0 1572 extract_interaction_statistic(e)
f5225737 1573 extract_chapter_information(e)
bae14048 1574
d5c32548 1575 def traverse_json_ld(json_ld, at_top_level=True):
1d55ebab
SS
1576 for e in variadic(json_ld):
1577 if not isinstance(e, dict):
1578 continue
d5c32548
ZM
1579 if at_top_level and '@context' not in e:
1580 continue
1581 if at_top_level and set(e.keys()) == {'@context', '@graph'}:
1d55ebab 1582 traverse_json_ld(e['@graph'], at_top_level=False)
c13a301a 1583 continue
f3c0c773 1584 if expected_type is not None and not is_type(e, expected_type):
4433bb02 1585 continue
8f122fa0 1586 rating = traverse_obj(e, ('aggregateRating', 'ratingValue'), expected_type=float_or_none)
1587 if rating is not None:
1588 info['average_rating'] = rating
f3c0c773 1589 if is_type(e, 'TVEpisode', 'Episode'):
440863ad 1590 episode_name = unescapeHTML(e.get('name'))
46933a15 1591 info.update({
440863ad 1592 'episode': episode_name,
46933a15
S
1593 'episode_number': int_or_none(e.get('episodeNumber')),
1594 'description': unescapeHTML(e.get('description')),
1595 })
440863ad
S
1596 if not info.get('title') and episode_name:
1597 info['title'] = episode_name
46933a15 1598 part_of_season = e.get('partOfSeason')
f3c0c773 1599 if is_type(part_of_season, 'TVSeason', 'Season', 'CreativeWorkSeason'):
458fd30f
S
1600 info.update({
1601 'season': unescapeHTML(part_of_season.get('name')),
1602 'season_number': int_or_none(part_of_season.get('seasonNumber')),
1603 })
d16b3c66 1604 part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
f3c0c773 1605 if is_type(part_of_series, 'TVSeries', 'Series', 'CreativeWorkSeries'):
46933a15 1606 info['series'] = unescapeHTML(part_of_series.get('name'))
f3c0c773 1607 elif is_type(e, 'Movie'):
391256dc
S
1608 info.update({
1609 'title': unescapeHTML(e.get('name')),
1610 'description': unescapeHTML(e.get('description')),
1611 'duration': parse_duration(e.get('duration')),
1612 'timestamp': unified_timestamp(e.get('dateCreated')),
1613 })
f3c0c773 1614 elif is_type(e, 'Article', 'NewsArticle'):
46933a15
S
1615 info.update({
1616 'timestamp': parse_iso8601(e.get('datePublished')),
1617 'title': unescapeHTML(e.get('headline')),
d5c32548 1618 'description': unescapeHTML(e.get('articleBody') or e.get('description')),
46933a15 1619 })
f3c0c773 1620 if is_type(traverse_obj(e, ('video', 0)), 'VideoObject'):
2edb38e8 1621 extract_video_object(e['video'][0])
f3c0c773 1622 elif is_type(traverse_obj(e, ('subjectOf', 0)), 'VideoObject'):
e50c3500 1623 extract_video_object(e['subjectOf'][0])
0f60ba6e 1624 elif is_type(e, 'VideoObject', 'AudioObject'):
bae14048 1625 extract_video_object(e)
4433bb02
S
1626 if expected_type is None:
1627 continue
1628 else:
1629 break
c69701c6 1630 video = e.get('video')
f3c0c773 1631 if is_type(video, 'VideoObject'):
c69701c6 1632 extract_video_object(video)
4433bb02
S
1633 if expected_type is None:
1634 continue
1635 else:
1636 break
d5c32548 1637
1d55ebab 1638 traverse_json_ld(json_ld)
90137ca4 1639 return filter_dict(info)
4ca2a3cf 1640
135dfa2c 1641 def _search_nextjs_data(self, webpage, video_id, *, transform_source=None, fatal=True, **kw):
f98709af
LL
1642 return self._parse_json(
1643 self._search_regex(
1644 r'(?s)<script[^>]+id=[\'"]__NEXT_DATA__[\'"][^>]*>([^<]+)</script>',
135dfa2c 1645 webpage, 'next.js data', fatal=fatal, **kw),
1646 video_id, transform_source=transform_source, fatal=fatal)
f98709af 1647
8072ef2b 1648 def _search_nuxt_data(self, webpage, video_id, context_name='__NUXT__', *, fatal=True, traverse=('data', 0)):
1649 """Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function"""
66f4c04e 1650 rectx = re.escape(context_name)
8072ef2b 1651 FUNCTION_RE = r'\(function\((?P<arg_keys>.*?)\){return\s+(?P<js>{.*?})\s*;?\s*}\((?P<arg_vals>.*?)\)'
66f4c04e 1652 js, arg_keys, arg_vals = self._search_regex(
8072ef2b 1653 (rf'<script>\s*window\.{rectx}={FUNCTION_RE}\s*\)\s*;?\s*</script>', rf'{rectx}\(.*?{FUNCTION_RE}'),
f7fc8d39 1654 webpage, context_name, group=('js', 'arg_keys', 'arg_vals'),
1655 default=NO_DEFAULT if fatal else (None, None, None))
1656 if js is None:
1657 return {}
66f4c04e
THD
1658
1659 args = dict(zip(arg_keys.split(','), arg_vals.split(',')))
1660
1661 for key, val in args.items():
1662 if val in ('undefined', 'void 0'):
1663 args[key] = 'null'
1664
8072ef2b 1665 ret = self._parse_json(js, video_id, transform_source=functools.partial(js_to_json, vars=args), fatal=fatal)
1666 return traverse_obj(ret, traverse) or {}
66f4c04e 1667
27713812 1668 @staticmethod
f8da79f8 1669 def _hidden_inputs(html):
586f1cc5 1670 html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
201ea3ee 1671 hidden_inputs = {}
c8498368
S
1672 for input in re.findall(r'(?i)(<input[^>]+>)', html):
1673 attrs = extract_attributes(input)
1674 if not input:
201ea3ee 1675 continue
c8498368 1676 if attrs.get('type') not in ('hidden', 'submit'):
201ea3ee 1677 continue
c8498368
S
1678 name = attrs.get('name') or attrs.get('id')
1679 value = attrs.get('value')
1680 if name and value is not None:
1681 hidden_inputs[name] = value
201ea3ee 1682 return hidden_inputs
27713812 1683
cf61d96d
S
1684 def _form_hidden_inputs(self, form_id, html):
1685 form = self._search_regex(
73eb13df 1686 r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
cf61d96d
S
1687 html, '%s form' % form_id, group='form')
1688 return self._hidden_inputs(form)
1689
d0d74b71 1690 @classproperty(cache=True)
1691 def FormatSort(cls):
1692 class FormatSort(FormatSorter):
1693 def __init__(ie, *args, **kwargs):
1694 super().__init__(ie._downloader, *args, **kwargs)
eb8a4433 1695
d0d74b71 1696 deprecation_warning(
1697 'yt_dlp.InfoExtractor.FormatSort is deprecated and may be removed in the future. '
1698 'Use yt_dlp.utils.FormatSorter instead')
1699 return FormatSort
eb8a4433 1700
1701 def _sort_formats(self, formats, field_preference=[]):
9f14daf2 1702 if not field_preference:
1703 self._downloader.deprecation_warning(
1704 'yt_dlp.InfoExtractor._sort_formats is deprecated and is no longer required')
1705 return
1706 self._downloader.deprecation_warning(
1707 'yt_dlp.InfoExtractor._sort_formats is deprecated and no longer works as expected. '
1708 'Return _format_sort_fields in the info_dict instead')
1709 if formats:
784320c9 1710 formats[0]['__sort_fields'] = field_preference
59040888 1711
96a53167
S
1712 def _check_formats(self, formats, video_id):
1713 if formats:
1714 formats[:] = filter(
1715 lambda f: self._is_valid_url(
1716 f['url'], video_id,
1717 item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
1718 formats)
1719
f5bdb444
S
1720 @staticmethod
1721 def _remove_duplicate_formats(formats):
1722 format_urls = set()
1723 unique_formats = []
1724 for f in formats:
1725 if f['url'] not in format_urls:
1726 format_urls.add(f['url'])
1727 unique_formats.append(f)
1728 formats[:] = unique_formats
1729
45024183 1730 def _is_valid_url(self, url, video_id, item='video', headers={}):
2f0f6578
S
1731 url = self._proto_relative_url(url, scheme='http:')
1732 # For now assume non HTTP(S) URLs always valid
1733 if not (url.startswith('http://') or url.startswith('https://')):
1734 return True
96a53167 1735 try:
45024183 1736 self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
96a53167 1737 return True
8bdd16b4 1738 except ExtractorError as e:
25e911a9 1739 self.to_screen(
8bdd16b4 1740 '%s: %s URL is invalid, skipping: %s'
1741 % (video_id, item, error_to_compat_str(e.cause)))
25e911a9 1742 return False
96a53167 1743
20991253 1744 def http_scheme(self):
1ede5b24 1745 """ Either "http:" or "https:", depending on the user's preferences """
20991253
PH
1746 return (
1747 'http:'
a06916d9 1748 if self.get_param('prefer_insecure', False)
20991253
PH
1749 else 'https:')
1750
57c7411f 1751 def _proto_relative_url(self, url, scheme=None):
8f97a15d 1752 scheme = scheme or self.http_scheme()
1753 assert scheme.endswith(':')
1754 return sanitize_url(url, scheme=scheme[:-1])
57c7411f 1755
4094b6e3
PH
1756 def _sleep(self, timeout, video_id, msg_template=None):
1757 if msg_template is None:
f1a9d64e 1758 msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
4094b6e3
PH
1759 msg = msg_template % {'video_id': video_id, 'timeout': timeout}
1760 self.to_screen(msg)
1761 time.sleep(timeout)
1762
f983b875 1763 def _extract_f4m_formats(self, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
4de61310 1764 transform_source=lambda s: fix_xml_ampersands(s).strip(),
7360c06f 1765 fatal=True, m3u8_id=None, data=None, headers={}, query={}):
a076c1f9 1766 res = self._download_xml_handle(
f036a632 1767 manifest_url, video_id, 'Downloading f4m manifest',
97f4aecf
S
1768 'Unable to download f4m manifest',
1769 # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
067aa17e 1770 # (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
4de61310 1771 transform_source=transform_source,
7360c06f 1772 fatal=fatal, data=data, headers=headers, query=query)
a076c1f9 1773 if res is False:
8d29e47f 1774 return []
31bb8d3f 1775
a076c1f9
E
1776 manifest, urlh = res
1777 manifest_url = urlh.geturl()
1778
0fdbb332 1779 return self._parse_f4m_formats(
f983b875 1780 manifest, manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
448bb5f3 1781 transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
0fdbb332 1782
f983b875 1783 def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
0fdbb332 1784 transform_source=lambda s: fix_xml_ampersands(s).strip(),
448bb5f3 1785 fatal=True, m3u8_id=None):
f9934b96 1786 if not isinstance(manifest, xml.etree.ElementTree.Element) and not fatal:
d9eb580a
S
1787 return []
1788
7a5c1cfe 1789 # currently yt-dlp cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
fb72ec58 1790 akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
1791 if akamai_pv is not None and ';' in akamai_pv.text:
1792 playerVerificationChallenge = akamai_pv.text.split(';')[0]
1793 if playerVerificationChallenge.strip() != '':
1794 return []
1795
31bb8d3f 1796 formats = []
7a47d07c 1797 manifest_version = '1.0'
b2527359 1798 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
34e48bed 1799 if not media_nodes:
7a47d07c 1800 manifest_version = '2.0'
34e48bed 1801 media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
b22ca762 1802 # Remove unsupported DRM protected media from final formats
067aa17e 1803 # rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
b22ca762
S
1804 media_nodes = remove_encrypted_media(media_nodes)
1805 if not media_nodes:
1806 return formats
48107c19
S
1807
1808 manifest_base_url = get_base_url(manifest)
0a5685b2 1809
a6571f10 1810 bootstrap_info = xpath_element(
0a5685b2
YCH
1811 manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
1812 'bootstrap info', default=None)
1813
edd6074c
RA
1814 vcodec = None
1815 mime_type = xpath_text(
1816 manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
1817 'base URL', default=None)
1818 if mime_type and mime_type.startswith('audio/'):
1819 vcodec = 'none'
1820
b2527359 1821 for i, media_el in enumerate(media_nodes):
77b8b4e6
S
1822 tbr = int_or_none(media_el.attrib.get('bitrate'))
1823 width = int_or_none(media_el.attrib.get('width'))
1824 height = int_or_none(media_el.attrib.get('height'))
34921b43 1825 format_id = join_nonempty(f4m_id, tbr or i)
448bb5f3
YCH
1826 # If <bootstrapInfo> is present, the specified f4m is a
1827 # stream-level manifest, and only set-level manifests may refer to
1828 # external resources. See section 11.4 and section 4 of F4M spec
1829 if bootstrap_info is None:
1830 media_url = None
1831 # @href is introduced in 2.0, see section 11.6 of F4M spec
1832 if manifest_version == '2.0':
1833 media_url = media_el.attrib.get('href')
1834 if media_url is None:
1835 media_url = media_el.attrib.get('url')
31c746e5
S
1836 if not media_url:
1837 continue
cc357c4d
S
1838 manifest_url = (
1839 media_url if media_url.startswith('http://') or media_url.startswith('https://')
48107c19 1840 else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
70f0f5a8
S
1841 # If media_url is itself a f4m manifest do the recursive extraction
1842 # since bitrates in parent manifest (this one) and media_url manifest
1843 # may differ leading to inability to resolve the format by requested
1844 # bitrate in f4m downloader
240b6045
YCH
1845 ext = determine_ext(manifest_url)
1846 if ext == 'f4m':
77b8b4e6 1847 f4m_formats = self._extract_f4m_formats(
f983b875 1848 manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
77b8b4e6
S
1849 transform_source=transform_source, fatal=fatal)
1850 # Sometimes stream-level manifest contains single media entry that
1851 # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
1852 # At the same time parent's media entry in set-level manifest may
1853 # contain it. We will copy it from parent in such cases.
1854 if len(f4m_formats) == 1:
1855 f = f4m_formats[0]
1856 f.update({
1857 'tbr': f.get('tbr') or tbr,
1858 'width': f.get('width') or width,
1859 'height': f.get('height') or height,
1860 'format_id': f.get('format_id') if not tbr else format_id,
edd6074c 1861 'vcodec': vcodec,
77b8b4e6
S
1862 })
1863 formats.extend(f4m_formats)
70f0f5a8 1864 continue
240b6045
YCH
1865 elif ext == 'm3u8':
1866 formats.extend(self._extract_m3u8_formats(
1867 manifest_url, video_id, 'mp4', preference=preference,
f983b875 1868 quality=quality, m3u8_id=m3u8_id, fatal=fatal))
240b6045 1869 continue
31bb8d3f 1870 formats.append({
77b8b4e6 1871 'format_id': format_id,
31bb8d3f 1872 'url': manifest_url,
30d0b549 1873 'manifest_url': manifest_url,
a6571f10 1874 'ext': 'flv' if bootstrap_info is not None else None,
187ee66c 1875 'protocol': 'f4m',
b2527359 1876 'tbr': tbr,
77b8b4e6
S
1877 'width': width,
1878 'height': height,
edd6074c 1879 'vcodec': vcodec,
60ca389c 1880 'preference': preference,
f983b875 1881 'quality': quality,
31bb8d3f 1882 })
31bb8d3f
JMF
1883 return formats
1884
f983b875 1885 def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, quality=None, m3u8_id=None):
16da9bbc 1886 return {
34921b43 1887 'format_id': join_nonempty(m3u8_id, 'meta'),
704df56d
PH
1888 'url': m3u8_url,
1889 'ext': ext,
1890 'protocol': 'm3u8',
37768f92 1891 'preference': preference - 100 if preference else -100,
f983b875 1892 'quality': quality,
704df56d
PH
1893 'resolution': 'multiple',
1894 'format_note': 'Quality selection URL',
16da9bbc
YCH
1895 }
1896
b5ae35ee 1897 def _report_ignoring_subs(self, name):
1898 self.report_warning(bug_reports_message(
1899 f'Ignoring subtitle tracks found in the {name} manifest; '
1900 'if any subtitle tracks are missing,'
1901 ), only_once=True)
1902
a0c3b2d5
F
1903 def _extract_m3u8_formats(self, *args, **kwargs):
1904 fmts, subs = self._extract_m3u8_formats_and_subtitles(*args, **kwargs)
1905 if subs:
b5ae35ee 1906 self._report_ignoring_subs('HLS')
a0c3b2d5
F
1907 return fmts
1908
1909 def _extract_m3u8_formats_and_subtitles(
177877c5 1910 self, m3u8_url, video_id, ext=None, entry_protocol='m3u8_native',
a0c3b2d5
F
1911 preference=None, quality=None, m3u8_id=None, note=None,
1912 errnote=None, fatal=True, live=False, data=None, headers={},
1913 query={}):
1914
dbd82a1d 1915 res = self._download_webpage_handle(
81515ad9 1916 m3u8_url, video_id,
37a3bb66 1917 note='Downloading m3u8 information' if note is None else note,
1918 errnote='Failed to download m3u8 information' if errnote is None else errnote,
7360c06f 1919 fatal=fatal, data=data, headers=headers, query=query)
cb252080 1920
dbd82a1d 1921 if res is False:
a0c3b2d5 1922 return [], {}
cb252080 1923
dbd82a1d 1924 m3u8_doc, urlh = res
37113045 1925 m3u8_url = urlh.geturl()
9cdffeeb 1926
a0c3b2d5 1927 return self._parse_m3u8_formats_and_subtitles(
cb252080 1928 m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
310c2ed2 1929 preference=preference, quality=quality, m3u8_id=m3u8_id,
1930 note=note, errnote=errnote, fatal=fatal, live=live, data=data,
1931 headers=headers, query=query, video_id=video_id)
cb252080 1932
a0c3b2d5 1933 def _parse_m3u8_formats_and_subtitles(
42676437 1934 self, m3u8_doc, m3u8_url=None, ext=None, entry_protocol='m3u8_native',
a0c3b2d5
F
1935 preference=None, quality=None, m3u8_id=None, live=False, note=None,
1936 errnote=None, fatal=True, data=None, headers={}, query={},
1937 video_id=None):
60755938 1938 formats, subtitles = [], {}
a0c3b2d5 1939
6b993ca7 1940 has_drm = re.search('|'.join([
1941 r'#EXT-X-FAXS-CM:', # Adobe Flash Access
1942 r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd://', # Apple FairPlay
1943 ]), m3u8_doc)
a0c3b2d5 1944
60755938 1945 def format_url(url):
14f25df2 1946 return url if re.match(r'^https?://', url) else urllib.parse.urljoin(m3u8_url, url)
60755938 1947
1948 if self.get_param('hls_split_discontinuity', False):
1949 def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None):
1950 if not m3u8_doc:
1951 if not manifest_url:
1952 return []
1953 m3u8_doc = self._download_webpage(
1954 manifest_url, video_id, fatal=fatal, data=data, headers=headers,
1955 note=False, errnote='Failed to download m3u8 playlist information')
1956 if m3u8_doc is False:
1957 return []
1958 return range(1 + sum(line.startswith('#EXT-X-DISCONTINUITY') for line in m3u8_doc.splitlines()))
0def7587 1959
60755938 1960 else:
1961 def _extract_m3u8_playlist_indices(*args, **kwargs):
1962 return [None]
310c2ed2 1963
cb252080
S
1964 # References:
1965 # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
067aa17e
S
1966 # 2. https://github.com/ytdl-org/youtube-dl/issues/12211
1967 # 3. https://github.com/ytdl-org/youtube-dl/issues/18923
cb252080
S
1968
1969 # We should try extracting formats only from master playlists [1, 4.3.4],
1970 # i.e. playlists that describe available qualities. On the other hand
1971 # media playlists [1, 4.3.3] should be returned as is since they contain
1972 # just the media without qualities renditions.
9cdffeeb 1973 # Fortunately, master playlist can be easily distinguished from media
cb252080 1974 # playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
a0566bbf 1975 # master playlist tags MUST NOT appear in a media playlist and vice versa.
cb252080
S
1976 # As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
1977 # media playlist and MUST NOT appear in master playlist thus we can
1978 # clearly detect media playlist with this criterion.
1979
9cdffeeb 1980 if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
60755938 1981 formats = [{
34921b43 1982 'format_id': join_nonempty(m3u8_id, idx),
60755938 1983 'format_index': idx,
42676437 1984 'url': m3u8_url or encode_data_uri(m3u8_doc.encode('utf-8'), 'application/x-mpegurl'),
60755938 1985 'ext': ext,
1986 'protocol': entry_protocol,
1987 'preference': preference,
1988 'quality': quality,
88acdbc2 1989 'has_drm': has_drm,
60755938 1990 } for idx in _extract_m3u8_playlist_indices(m3u8_doc=m3u8_doc)]
310c2ed2 1991
a0c3b2d5 1992 return formats, subtitles
cb252080
S
1993
1994 groups = {}
1995 last_stream_inf = {}
1996
1997 def extract_media(x_media_line):
1998 media = parse_m3u8_attributes(x_media_line)
1999 # As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
2000 media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
2001 if not (media_type and group_id and name):
2002 return
2003 groups.setdefault(group_id, []).append(media)
a0c3b2d5
F
2004 # <https://tools.ietf.org/html/rfc8216#section-4.3.4.1>
2005 if media_type == 'SUBTITLES':
3907333c 2006 # According to RFC 8216 §4.3.4.2.1, URI is REQUIRED in the
2007 # EXT-X-MEDIA tag if the media type is SUBTITLES.
2008 # However, lack of URI has been spotted in the wild.
2009 # e.g. NebulaIE; see https://github.com/yt-dlp/yt-dlp/issues/339
2010 if not media.get('URI'):
2011 return
a0c3b2d5
F
2012 url = format_url(media['URI'])
2013 sub_info = {
2014 'url': url,
2015 'ext': determine_ext(url),
2016 }
4a2f19ab
F
2017 if sub_info['ext'] == 'm3u8':
2018 # Per RFC 8216 §3.1, the only possible subtitle format m3u8
2019 # files may contain is WebVTT:
2020 # <https://tools.ietf.org/html/rfc8216#section-3.1>
2021 sub_info['ext'] = 'vtt'
2022 sub_info['protocol'] = 'm3u8_native'
37a3bb66 2023 lang = media.get('LANGUAGE') or 'und'
a0c3b2d5 2024 subtitles.setdefault(lang, []).append(sub_info)
cb252080
S
2025 if media_type not in ('VIDEO', 'AUDIO'):
2026 return
2027 media_url = media.get('URI')
2028 if media_url:
310c2ed2 2029 manifest_url = format_url(media_url)
60755938 2030 formats.extend({
34921b43 2031 'format_id': join_nonempty(m3u8_id, group_id, name, idx),
60755938 2032 'format_note': name,
2033 'format_index': idx,
2034 'url': manifest_url,
2035 'manifest_url': m3u8_url,
2036 'language': media.get('LANGUAGE'),
2037 'ext': ext,
2038 'protocol': entry_protocol,
2039 'preference': preference,
2040 'quality': quality,
2041 'vcodec': 'none' if media_type == 'AUDIO' else None,
2042 } for idx in _extract_m3u8_playlist_indices(manifest_url))
cb252080
S
2043
2044 def build_stream_name():
2045 # Despite specification does not mention NAME attribute for
3019cb0c
S
2046 # EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
2047 # or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
ddd258f9 2048 # 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
cb252080
S
2049 stream_name = last_stream_inf.get('NAME')
2050 if stream_name:
2051 return stream_name
2052 # If there is no NAME in EXT-X-STREAM-INF it will be obtained
2053 # from corresponding rendition group
2054 stream_group_id = last_stream_inf.get('VIDEO')
2055 if not stream_group_id:
2056 return
2057 stream_group = groups.get(stream_group_id)
2058 if not stream_group:
2059 return stream_group_id
2060 rendition = stream_group[0]
2061 return rendition.get('NAME') or stream_group_id
2062
379306ef 2063 # parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
2bfc1d9d
RA
2064 # chance to detect video only formats when EXT-X-STREAM-INF tags
2065 # precede EXT-X-MEDIA tags in HLS manifest such as [3].
2066 for line in m3u8_doc.splitlines():
2067 if line.startswith('#EXT-X-MEDIA:'):
2068 extract_media(line)
2069
704df56d
PH
2070 for line in m3u8_doc.splitlines():
2071 if line.startswith('#EXT-X-STREAM-INF:'):
cb252080 2072 last_stream_inf = parse_m3u8_attributes(line)
704df56d
PH
2073 elif line.startswith('#') or not line.strip():
2074 continue
2075 else:
9c99bef7 2076 tbr = float_or_none(
3089bc74
S
2077 last_stream_inf.get('AVERAGE-BANDWIDTH')
2078 or last_stream_inf.get('BANDWIDTH'), scale=1000)
30d0b549 2079 manifest_url = format_url(line.strip())
5ef62fc4 2080
60755938 2081 for idx in _extract_m3u8_playlist_indices(manifest_url):
2082 format_id = [m3u8_id, None, idx]
310c2ed2 2083 # Bandwidth of live streams may differ over time thus making
2084 # format_id unpredictable. So it's better to keep provided
2085 # format_id intact.
2086 if not live:
60755938 2087 stream_name = build_stream_name()
34921b43 2088 format_id[1] = stream_name or '%d' % (tbr or len(formats))
310c2ed2 2089 f = {
34921b43 2090 'format_id': join_nonempty(*format_id),
60755938 2091 'format_index': idx,
310c2ed2 2092 'url': manifest_url,
2093 'manifest_url': m3u8_url,
2094 'tbr': tbr,
2095 'ext': ext,
2096 'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
2097 'protocol': entry_protocol,
2098 'preference': preference,
2099 'quality': quality,
2100 }
2101 resolution = last_stream_inf.get('RESOLUTION')
2102 if resolution:
2103 mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
2104 if mobj:
2105 f['width'] = int(mobj.group('width'))
2106 f['height'] = int(mobj.group('height'))
2107 # Unified Streaming Platform
2108 mobj = re.search(
2109 r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
2110 if mobj:
2111 abr, vbr = mobj.groups()
2112 abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
2113 f.update({
2114 'vbr': vbr,
2115 'abr': abr,
2116 })
2117 codecs = parse_codecs(last_stream_inf.get('CODECS'))
2118 f.update(codecs)
2119 audio_group_id = last_stream_inf.get('AUDIO')
2120 # As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
2121 # references a rendition group MUST have a CODECS attribute.
62b58c09 2122 # However, this is not always respected. E.g. [2]
310c2ed2 2123 # contains EXT-X-STREAM-INF tag which references AUDIO
2124 # rendition group but does not have CODECS and despite
2125 # referencing an audio group it represents a complete
2126 # (with audio and video) format. So, for such cases we will
2127 # ignore references to rendition groups and treat them
2128 # as complete formats.
2129 if audio_group_id and codecs and f.get('vcodec') != 'none':
2130 audio_group = groups.get(audio_group_id)
2131 if audio_group and audio_group[0].get('URI'):
2132 # TODO: update acodec for audio only formats with
2133 # the same GROUP-ID
2134 f['acodec'] = 'none'
fc21af50 2135 if not f.get('ext'):
2136 f['ext'] = 'm4a' if f.get('vcodec') == 'none' else 'mp4'
310c2ed2 2137 formats.append(f)
2138
2139 # for DailyMotion
2140 progressive_uri = last_stream_inf.get('PROGRESSIVE-URI')
2141 if progressive_uri:
2142 http_f = f.copy()
2143 del http_f['manifest_url']
2144 http_f.update({
2145 'format_id': f['format_id'].replace('hls-', 'http-'),
2146 'protocol': 'http',
2147 'url': progressive_uri,
2148 })
2149 formats.append(http_f)
5ef62fc4 2150
cb252080 2151 last_stream_inf = {}
a0c3b2d5 2152 return formats, subtitles
704df56d 2153
3cf4b91d
C
2154 def _extract_m3u8_vod_duration(
2155 self, m3u8_vod_url, video_id, note=None, errnote=None, data=None, headers={}, query={}):
2156
2157 m3u8_vod = self._download_webpage(
2158 m3u8_vod_url, video_id,
2159 note='Downloading m3u8 VOD manifest' if note is None else note,
2160 errnote='Failed to download VOD manifest' if errnote is None else errnote,
2161 fatal=False, data=data, headers=headers, query=query)
2162
2163 return self._parse_m3u8_vod_duration(m3u8_vod or '', video_id)
2164
2165 def _parse_m3u8_vod_duration(self, m3u8_vod, video_id):
2166 if '#EXT-X-PLAYLIST-TYPE:VOD' not in m3u8_vod:
2167 return None
2168
2169 return int(sum(
2170 float(line[len('#EXTINF:'):].split(',')[0])
2171 for line in m3u8_vod.splitlines() if line.startswith('#EXTINF:'))) or None
2172
a107193e
S
2173 @staticmethod
2174 def _xpath_ns(path, namespace=None):
2175 if not namespace:
2176 return path
2177 out = []
2178 for c in path.split('/'):
2179 if not c or c == '.':
2180 out.append(c)
2181 else:
2182 out.append('{%s}%s' % (namespace, c))
2183 return '/'.join(out)
2184
da1c94ee 2185 def _extract_smil_formats_and_subtitles(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
a076c1f9
E
2186 res = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
2187 if res is False:
995029a1 2188 assert not fatal
774a46c5 2189 return [], {}
e89a2aab 2190
a076c1f9
E
2191 smil, urlh = res
2192 smil_url = urlh.geturl()
2193
17712eeb 2194 namespace = self._parse_smil_namespace(smil)
a107193e 2195
da1c94ee 2196 fmts = self._parse_smil_formats(
a107193e 2197 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
da1c94ee
F
2198 subs = self._parse_smil_subtitles(
2199 smil, namespace=namespace)
2200
2201 return fmts, subs
2202
2203 def _extract_smil_formats(self, *args, **kwargs):
2204 fmts, subs = self._extract_smil_formats_and_subtitles(*args, **kwargs)
2205 if subs:
b5ae35ee 2206 self._report_ignoring_subs('SMIL')
da1c94ee 2207 return fmts
a107193e
S
2208
2209 def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
a076c1f9
E
2210 res = self._download_smil(smil_url, video_id, fatal=fatal)
2211 if res is False:
a107193e 2212 return {}
a076c1f9
E
2213
2214 smil, urlh = res
2215 smil_url = urlh.geturl()
2216
a107193e
S
2217 return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
2218
09f572fb 2219 def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
a076c1f9 2220 return self._download_xml_handle(
a107193e 2221 smil_url, video_id, 'Downloading SMIL file',
09f572fb 2222 'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
a107193e
S
2223
2224 def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
17712eeb 2225 namespace = self._parse_smil_namespace(smil)
a107193e
S
2226
2227 formats = self._parse_smil_formats(
2228 smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
2229 subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
2230
2231 video_id = os.path.splitext(url_basename(smil_url))[0]
2232 title = None
2233 description = None
647eab45 2234 upload_date = None
a107193e
S
2235 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
2236 name = meta.attrib.get('name')
2237 content = meta.attrib.get('content')
2238 if not name or not content:
2239 continue
2240 if not title and name == 'title':
2241 title = content
2242 elif not description and name in ('description', 'abstract'):
2243 description = content
647eab45
S
2244 elif not upload_date and name == 'date':
2245 upload_date = unified_strdate(content)
a107193e 2246
1e5bcdec
S
2247 thumbnails = [{
2248 'id': image.get('type'),
2249 'url': image.get('src'),
2250 'width': int_or_none(image.get('width')),
2251 'height': int_or_none(image.get('height')),
2252 } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
2253
a107193e
S
2254 return {
2255 'id': video_id,
2256 'title': title or video_id,
2257 'description': description,
647eab45 2258 'upload_date': upload_date,
1e5bcdec 2259 'thumbnails': thumbnails,
a107193e
S
2260 'formats': formats,
2261 'subtitles': subtitles,
2262 }
2263
17712eeb
S
2264 def _parse_smil_namespace(self, smil):
2265 return self._search_regex(
2266 r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
2267
f877c6ae 2268 def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
a107193e
S
2269 base = smil_url
2270 for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
2271 b = meta.get('base') or meta.get('httpBase')
2272 if b:
2273 base = b
2274 break
e89a2aab
S
2275
2276 formats = []
2277 rtmp_count = 0
a107193e 2278 http_count = 0
7f32e5dc 2279 m3u8_count = 0
9359f3d4 2280 imgs_count = 0
a107193e 2281
9359f3d4 2282 srcs = set()
ad96b4c8
YCH
2283 media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
2284 for medium in media:
2285 src = medium.get('src')
81e1c4e2 2286 if not src or src in srcs:
a107193e 2287 continue
9359f3d4 2288 srcs.add(src)
a107193e 2289
ad96b4c8
YCH
2290 bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
2291 filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
2292 width = int_or_none(medium.get('width'))
2293 height = int_or_none(medium.get('height'))
2294 proto = medium.get('proto')
2295 ext = medium.get('ext')
a107193e 2296 src_ext = determine_ext(src)
ad96b4c8 2297 streamer = medium.get('streamer') or base
a107193e
S
2298
2299 if proto == 'rtmp' or streamer.startswith('rtmp'):
2300 rtmp_count += 1
2301 formats.append({
2302 'url': streamer,
2303 'play_path': src,
2304 'ext': 'flv',
2305 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
2306 'tbr': bitrate,
2307 'filesize': filesize,
2308 'width': width,
2309 'height': height,
2310 })
f877c6ae
YCH
2311 if transform_rtmp_url:
2312 streamer, src = transform_rtmp_url(streamer, src)
2313 formats[-1].update({
2314 'url': streamer,
2315 'play_path': src,
2316 })
a107193e
S
2317 continue
2318
14f25df2 2319 src_url = src if src.startswith('http') else urllib.parse.urljoin(base, src)
c349456e 2320 src_url = src_url.strip()
a107193e
S
2321
2322 if proto == 'm3u8' or src_ext == 'm3u8':
7f32e5dc 2323 m3u8_formats = self._extract_m3u8_formats(
2324 src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
2325 if len(m3u8_formats) == 1:
2326 m3u8_count += 1
2327 m3u8_formats[0].update({
2328 'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
2329 'tbr': bitrate,
2330 'width': width,
2331 'height': height,
2332 })
2333 formats.extend(m3u8_formats)
bd21ead2 2334 elif src_ext == 'f4m':
a107193e
S
2335 f4m_url = src_url
2336 if not f4m_params:
2337 f4m_params = {
2338 'hdcore': '3.2.0',
2339 'plugin': 'flowplayer-3.2.0.1',
2340 }
2341 f4m_url += '&' if '?' in f4m_url else '?'
14f25df2 2342 f4m_url += urllib.parse.urlencode(f4m_params)
7e5edcfd 2343 formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
bd21ead2
RA
2344 elif src_ext == 'mpd':
2345 formats.extend(self._extract_mpd_formats(
2346 src_url, video_id, mpd_id='dash', fatal=False))
2347 elif re.search(r'\.ism/[Mm]anifest', src_url):
2348 formats.extend(self._extract_ism_formats(
2349 src_url, video_id, ism_id='mss', fatal=False))
2350 elif src_url.startswith('http') and self._is_valid_url(src, video_id):
a107193e
S
2351 http_count += 1
2352 formats.append({
2353 'url': src_url,
2354 'ext': ext or src_ext or 'flv',
2355 'format_id': 'http-%d' % (bitrate or http_count),
2356 'tbr': bitrate,
2357 'filesize': filesize,
2358 'width': width,
2359 'height': height,
2360 })
63757032 2361
9359f3d4
F
2362 for medium in smil.findall(self._xpath_ns('.//imagestream', namespace)):
2363 src = medium.get('src')
2364 if not src or src in srcs:
2365 continue
2366 srcs.add(src)
2367
2368 imgs_count += 1
2369 formats.append({
2370 'format_id': 'imagestream-%d' % (imgs_count),
2371 'url': src,
2372 'ext': mimetype2ext(medium.get('type')),
2373 'acodec': 'none',
2374 'vcodec': 'none',
2375 'width': int_or_none(medium.get('width')),
2376 'height': int_or_none(medium.get('height')),
2377 'format_note': 'SMIL storyboards',
2378 })
2379
e89a2aab
S
2380 return formats
2381
ce00af87 2382 def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
d413095f 2383 urls = []
a107193e
S
2384 subtitles = {}
2385 for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
2386 src = textstream.get('src')
d413095f 2387 if not src or src in urls:
a107193e 2388 continue
d413095f 2389 urls.append(src)
df634be2 2390 ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
03bc7237 2391 lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
a107193e
S
2392 subtitles.setdefault(lang, []).append({
2393 'url': src,
2394 'ext': ext,
2395 })
2396 return subtitles
63757032 2397
47a5cb77 2398 def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
a076c1f9 2399 res = self._download_xml_handle(
47a5cb77 2400 xspf_url, playlist_id, 'Downloading xpsf playlist',
942acef5 2401 'Unable to download xspf manifest', fatal=fatal)
a076c1f9 2402 if res is False:
942acef5 2403 return []
a076c1f9
E
2404
2405 xspf, urlh = res
2406 xspf_url = urlh.geturl()
2407
47a5cb77
S
2408 return self._parse_xspf(
2409 xspf, playlist_id, xspf_url=xspf_url,
2410 xspf_base_url=base_url(xspf_url))
8d6765cf 2411
47a5cb77 2412 def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
8d6765cf
S
2413 NS_MAP = {
2414 'xspf': 'http://xspf.org/ns/0/',
2415 's1': 'http://static.streamone.nl/player/ns/0',
2416 }
2417
2418 entries = []
47a5cb77 2419 for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
8d6765cf 2420 title = xpath_text(
98044462 2421 track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
8d6765cf
S
2422 description = xpath_text(
2423 track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
2424 thumbnail = xpath_text(
2425 track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
2426 duration = float_or_none(
2427 xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
2428
47a5cb77
S
2429 formats = []
2430 for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
2431 format_url = urljoin(xspf_base_url, location.text)
2432 if not format_url:
2433 continue
2434 formats.append({
2435 'url': format_url,
2436 'manifest_url': xspf_url,
2437 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
2438 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
2439 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
2440 })
8d6765cf
S
2441
2442 entries.append({
2443 'id': playlist_id,
2444 'title': title,
2445 'description': description,
2446 'thumbnail': thumbnail,
2447 'duration': duration,
2448 'formats': formats,
2449 })
2450 return entries
2451
171e59ed
F
2452 def _extract_mpd_formats(self, *args, **kwargs):
2453 fmts, subs = self._extract_mpd_formats_and_subtitles(*args, **kwargs)
2454 if subs:
b5ae35ee 2455 self._report_ignoring_subs('DASH')
171e59ed
F
2456 return fmts
2457
2458 def _extract_mpd_formats_and_subtitles(
2459 self, mpd_url, video_id, mpd_id=None, note=None, errnote=None,
2460 fatal=True, data=None, headers={}, query={}):
47a5cb77 2461 res = self._download_xml_handle(
1bac3455 2462 mpd_url, video_id,
37a3bb66 2463 note='Downloading MPD manifest' if note is None else note,
2464 errnote='Failed to download MPD manifest' if errnote is None else errnote,
7360c06f 2465 fatal=fatal, data=data, headers=headers, query=query)
1bac3455 2466 if res is False:
171e59ed 2467 return [], {}
47a5cb77 2468 mpd_doc, urlh = res
c25720ef 2469 if mpd_doc is None:
171e59ed 2470 return [], {}
779da8e3
E
2471
2472 # We could have been redirected to a new url when we retrieved our mpd file.
2473 mpd_url = urlh.geturl()
2474 mpd_base_url = base_url(mpd_url)
1bac3455 2475
171e59ed 2476 return self._parse_mpd_formats_and_subtitles(
545cc85d 2477 mpd_doc, mpd_id, mpd_base_url, mpd_url)
2d2fa82d 2478
171e59ed
F
2479 def _parse_mpd_formats(self, *args, **kwargs):
2480 fmts, subs = self._parse_mpd_formats_and_subtitles(*args, **kwargs)
2481 if subs:
b5ae35ee 2482 self._report_ignoring_subs('DASH')
171e59ed
F
2483 return fmts
2484
2485 def _parse_mpd_formats_and_subtitles(
2486 self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None):
f0948348
S
2487 """
2488 Parse formats from MPD manifest.
2489 References:
2490 1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
2491 http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2492 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
2493 """
a06916d9 2494 if not self.get_param('dynamic_mpd', True):
78895bd3 2495 if mpd_doc.get('type') == 'dynamic':
171e59ed 2496 return [], {}
2d2fa82d 2497
91cb6b50 2498 namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
f14be228 2499
2500 def _add_ns(path):
2501 return self._xpath_ns(path, namespace)
2502
675d0016 2503 def is_drm_protected(element):
2504 return element.find(_add_ns('ContentProtection')) is not None
2505
1bac3455 2506 def extract_multisegment_info(element, ms_parent_info):
2507 ms_info = ms_parent_info.copy()
b4c1d6e8
S
2508
2509 # As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
2510 # common attributes and elements. We will only extract relevant
2511 # for us.
2512 def extract_common(source):
2513 segment_timeline = source.find(_add_ns('SegmentTimeline'))
2514 if segment_timeline is not None:
2515 s_e = segment_timeline.findall(_add_ns('S'))
2516 if s_e:
2517 ms_info['total_number'] = 0
2518 ms_info['s'] = []
2519 for s in s_e:
2520 r = int(s.get('r', 0))
2521 ms_info['total_number'] += 1 + r
2522 ms_info['s'].append({
2523 't': int(s.get('t', 0)),
2524 # @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
2525 'd': int(s.attrib['d']),
2526 'r': r,
2527 })
2528 start_number = source.get('startNumber')
2529 if start_number:
2530 ms_info['start_number'] = int(start_number)
2531 timescale = source.get('timescale')
2532 if timescale:
2533 ms_info['timescale'] = int(timescale)
2534 segment_duration = source.get('duration')
2535 if segment_duration:
48504785 2536 ms_info['segment_duration'] = float(segment_duration)
b4c1d6e8
S
2537
2538 def extract_Initialization(source):
2539 initialization = source.find(_add_ns('Initialization'))
2540 if initialization is not None:
2541 ms_info['initialization_url'] = initialization.attrib['sourceURL']
2542
f14be228 2543 segment_list = element.find(_add_ns('SegmentList'))
1bac3455 2544 if segment_list is not None:
b4c1d6e8
S
2545 extract_common(segment_list)
2546 extract_Initialization(segment_list)
f14be228 2547 segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
1bac3455 2548 if segment_urls_e:
2549 ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
1bac3455 2550 else:
f14be228 2551 segment_template = element.find(_add_ns('SegmentTemplate'))
1bac3455 2552 if segment_template is not None:
b4c1d6e8 2553 extract_common(segment_template)
e228616c
S
2554 media = segment_template.get('media')
2555 if media:
2556 ms_info['media'] = media
1bac3455 2557 initialization = segment_template.get('initialization')
2558 if initialization:
e228616c 2559 ms_info['initialization'] = initialization
1bac3455 2560 else:
b4c1d6e8 2561 extract_Initialization(segment_template)
1bac3455 2562 return ms_info
b323e170 2563
1bac3455 2564 mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
6251555f 2565 formats, subtitles = [], {}
234416e4 2566 stream_numbers = collections.defaultdict(int)
f14be228 2567 for period in mpd_doc.findall(_add_ns('Period')):
1bac3455 2568 period_duration = parse_duration(period.get('duration')) or mpd_duration
2569 period_ms_info = extract_multisegment_info(period, {
2570 'start_number': 1,
2571 'timescale': 1,
2572 })
f14be228 2573 for adaptation_set in period.findall(_add_ns('AdaptationSet')):
1bac3455 2574 adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
f14be228 2575 for representation in adaptation_set.findall(_add_ns('Representation')):
1bac3455 2576 representation_attrib = adaptation_set.attrib.copy()
2577 representation_attrib.update(representation.attrib)
f0948348 2578 # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
a6c8b759 2579 mime_type = representation_attrib['mimeType']
171e59ed
F
2580 content_type = representation_attrib.get('contentType', mime_type.split('/')[0])
2581
21633673 2582 codec_str = representation_attrib.get('codecs', '')
2583 # Some kind of binary subtitle found in some youtube livestreams
2584 if mime_type == 'application/x-rawcc':
2585 codecs = {'scodec': codec_str}
2586 else:
2587 codecs = parse_codecs(codec_str)
be2fc5b2 2588 if content_type not in ('video', 'audio', 'text'):
2589 if mime_type == 'image/jpeg':
a8731fcc 2590 content_type = mime_type
21633673 2591 elif codecs.get('vcodec', 'none') != 'none':
4afa3ec4 2592 content_type = 'video'
21633673 2593 elif codecs.get('acodec', 'none') != 'none':
4afa3ec4 2594 content_type = 'audio'
3fe75fdc 2595 elif codecs.get('scodec', 'none') != 'none':
be2fc5b2 2596 content_type = 'text'
6993f78d 2597 elif mimetype2ext(mime_type) in ('tt', 'dfxp', 'ttml', 'xml', 'json'):
2598 content_type = 'text'
cdb19aa4 2599 else:
be2fc5b2 2600 self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
2601 continue
2602
2603 base_url = ''
2604 for element in (representation, adaptation_set, period, mpd_doc):
2605 base_url_e = element.find(_add_ns('BaseURL'))
47046464 2606 if try_call(lambda: base_url_e.text) is not None:
be2fc5b2 2607 base_url = base_url_e.text + base_url
2608 if re.match(r'^https?://', base_url):
2609 break
f9cc0161 2610 if mpd_base_url and base_url.startswith('/'):
14f25df2 2611 base_url = urllib.parse.urljoin(mpd_base_url, base_url)
f9cc0161
D
2612 elif mpd_base_url and not re.match(r'^https?://', base_url):
2613 if not mpd_base_url.endswith('/'):
be2fc5b2 2614 mpd_base_url += '/'
2615 base_url = mpd_base_url + base_url
2616 representation_id = representation_attrib.get('id')
2617 lang = representation_attrib.get('lang')
2618 url_el = representation.find(_add_ns('BaseURL'))
2619 filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
2620 bandwidth = int_or_none(representation_attrib.get('bandwidth'))
2621 if representation_id is not None:
2622 format_id = representation_id
2623 else:
2624 format_id = content_type
2625 if mpd_id:
2626 format_id = mpd_id + '-' + format_id
2627 if content_type in ('video', 'audio'):
2628 f = {
2629 'format_id': format_id,
2630 'manifest_url': mpd_url,
2631 'ext': mimetype2ext(mime_type),
2632 'width': int_or_none(representation_attrib.get('width')),
2633 'height': int_or_none(representation_attrib.get('height')),
2634 'tbr': float_or_none(bandwidth, 1000),
2635 'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
2636 'fps': int_or_none(representation_attrib.get('frameRate')),
2637 'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
2638 'format_note': 'DASH %s' % content_type,
2639 'filesize': filesize,
2640 'container': mimetype2ext(mime_type) + '_dash',
4afa3ec4 2641 **codecs
be2fc5b2 2642 }
be2fc5b2 2643 elif content_type == 'text':
2644 f = {
2645 'ext': mimetype2ext(mime_type),
2646 'manifest_url': mpd_url,
2647 'filesize': filesize,
2648 }
2649 elif content_type == 'image/jpeg':
2650 # See test case in VikiIE
2651 # https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1
2652 f = {
2653 'format_id': format_id,
2654 'ext': 'mhtml',
2655 'manifest_url': mpd_url,
2656 'format_note': 'DASH storyboards (jpeg)',
2657 'acodec': 'none',
2658 'vcodec': 'none',
2659 }
88acdbc2 2660 if is_drm_protected(adaptation_set) or is_drm_protected(representation):
2661 f['has_drm'] = True
be2fc5b2 2662 representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
2663
2664 def prepare_template(template_name, identifiers):
2665 tmpl = representation_ms_info[template_name]
0cb0fdbb 2666 if representation_id is not None:
2667 tmpl = tmpl.replace('$RepresentationID$', representation_id)
be2fc5b2 2668 # First of, % characters outside $...$ templates
2669 # must be escaped by doubling for proper processing
2670 # by % operator string formatting used further (see
2671 # https://github.com/ytdl-org/youtube-dl/issues/16867).
2672 t = ''
2673 in_template = False
2674 for c in tmpl:
2675 t += c
2676 if c == '$':
2677 in_template = not in_template
2678 elif c == '%' and not in_template:
eca1f0d1 2679 t += c
be2fc5b2 2680 # Next, $...$ templates are translated to their
2681 # %(...) counterparts to be used with % operator
be2fc5b2 2682 t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
2683 t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
2684 t.replace('$$', '$')
2685 return t
2686
2687 # @initialization is a regular template like @media one
2688 # so it should be handled just the same way (see
2689 # https://github.com/ytdl-org/youtube-dl/issues/11605)
2690 if 'initialization' in representation_ms_info:
2691 initialization_template = prepare_template(
2692 'initialization',
2693 # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
2694 # $Time$ shall not be included for @initialization thus
2695 # only $Bandwidth$ remains
2696 ('Bandwidth', ))
2697 representation_ms_info['initialization_url'] = initialization_template % {
2698 'Bandwidth': bandwidth,
2699 }
2700
2701 def location_key(location):
2702 return 'url' if re.match(r'^https?://', location) else 'path'
2703
2704 if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
2705
2706 media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
2707 media_location_key = location_key(media_template)
2708
2709 # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
2710 # can't be used at the same time
2711 if '%(Number' in media_template and 's' not in representation_ms_info:
2712 segment_duration = None
2713 if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
2714 segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
ffa89477 2715 representation_ms_info['total_number'] = int(math.ceil(
2716 float_or_none(period_duration, segment_duration, default=0)))
be2fc5b2 2717 representation_ms_info['fragments'] = [{
2718 media_location_key: media_template % {
2719 'Number': segment_number,
2720 'Bandwidth': bandwidth,
2721 },
2722 'duration': segment_duration,
2723 } for segment_number in range(
2724 representation_ms_info['start_number'],
2725 representation_ms_info['total_number'] + representation_ms_info['start_number'])]
2726 else:
2727 # $Number*$ or $Time$ in media template with S list available
2728 # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
2729 # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
2730 representation_ms_info['fragments'] = []
2731 segment_time = 0
2732 segment_d = None
2733 segment_number = representation_ms_info['start_number']
2734
2735 def add_segment_url():
2736 segment_url = media_template % {
2737 'Time': segment_time,
2738 'Bandwidth': bandwidth,
2739 'Number': segment_number,
2740 }
2741 representation_ms_info['fragments'].append({
2742 media_location_key: segment_url,
2743 'duration': float_or_none(segment_d, representation_ms_info['timescale']),
2744 })
2745
2746 for num, s in enumerate(representation_ms_info['s']):
2747 segment_time = s.get('t') or segment_time
2748 segment_d = s['d']
2749 add_segment_url()
2750 segment_number += 1
2751 for r in range(s.get('r', 0)):
2752 segment_time += segment_d
f0948348 2753 add_segment_url()
b4c1d6e8 2754 segment_number += 1
be2fc5b2 2755 segment_time += segment_d
2756 elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
62b58c09
L
2757 # No media template,
2758 # e.g. https://www.youtube.com/watch?v=iXZV5uAYMJI
be2fc5b2 2759 # or any YouTube dashsegments video
2760 fragments = []
2761 segment_index = 0
2762 timescale = representation_ms_info['timescale']
2763 for s in representation_ms_info['s']:
2764 duration = float_or_none(s['d'], timescale)
2765 for r in range(s.get('r', 0) + 1):
2766 segment_uri = representation_ms_info['segment_urls'][segment_index]
2767 fragments.append({
2768 location_key(segment_uri): segment_uri,
2769 'duration': duration,
2770 })
2771 segment_index += 1
2772 representation_ms_info['fragments'] = fragments
2773 elif 'segment_urls' in representation_ms_info:
2774 # Segment URLs with no SegmentTimeline
62b58c09 2775 # E.g. https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
be2fc5b2 2776 # https://github.com/ytdl-org/youtube-dl/pull/14844
2777 fragments = []
2778 segment_duration = float_or_none(
2779 representation_ms_info['segment_duration'],
2780 representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
2781 for segment_url in representation_ms_info['segment_urls']:
2782 fragment = {
2783 location_key(segment_url): segment_url,
2784 }
2785 if segment_duration:
2786 fragment['duration'] = segment_duration
2787 fragments.append(fragment)
2788 representation_ms_info['fragments'] = fragments
2789 # If there is a fragments key available then we correctly recognized fragmented media.
2790 # Otherwise we will assume unfragmented media with direct access. Technically, such
2791 # assumption is not necessarily correct since we may simply have no support for
2792 # some forms of fragmented media renditions yet, but for now we'll use this fallback.
2793 if 'fragments' in representation_ms_info:
2794 f.update({
2795 # NB: mpd_url may be empty when MPD manifest is parsed from a string
2796 'url': mpd_url or base_url,
2797 'fragment_base_url': base_url,
2798 'fragments': [],
2799 'protocol': 'http_dash_segments' if mime_type != 'image/jpeg' else 'mhtml',
2800 })
2801 if 'initialization_url' in representation_ms_info:
2802 initialization_url = representation_ms_info['initialization_url']
2803 if not f.get('url'):
2804 f['url'] = initialization_url
2805 f['fragments'].append({location_key(initialization_url): initialization_url})
2806 f['fragments'].extend(representation_ms_info['fragments'])
ffa89477 2807 if not period_duration:
2808 period_duration = try_get(
2809 representation_ms_info,
2810 lambda r: sum(frag['duration'] for frag in r['fragments']), float)
17b598d3 2811 else:
be2fc5b2 2812 # Assuming direct URL to unfragmented media.
2813 f['url'] = base_url
234416e4 2814 if content_type in ('video', 'audio', 'image/jpeg'):
2815 f['manifest_stream_number'] = stream_numbers[f['url']]
2816 stream_numbers[f['url']] += 1
be2fc5b2 2817 formats.append(f)
2818 elif content_type == 'text':
2819 subtitles.setdefault(lang or 'und', []).append(f)
2820
171e59ed 2821 return formats, subtitles
17b598d3 2822
fd76a142
F
2823 def _extract_ism_formats(self, *args, **kwargs):
2824 fmts, subs = self._extract_ism_formats_and_subtitles(*args, **kwargs)
2825 if subs:
b5ae35ee 2826 self._report_ignoring_subs('ISM')
fd76a142
F
2827 return fmts
2828
2829 def _extract_ism_formats_and_subtitles(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
47a5cb77 2830 res = self._download_xml_handle(
b2758123 2831 ism_url, video_id,
37a3bb66 2832 note='Downloading ISM manifest' if note is None else note,
2833 errnote='Failed to download ISM manifest' if errnote is None else errnote,
7360c06f 2834 fatal=fatal, data=data, headers=headers, query=query)
b2758123 2835 if res is False:
fd76a142 2836 return [], {}
47a5cb77 2837 ism_doc, urlh = res
13b08034 2838 if ism_doc is None:
fd76a142 2839 return [], {}
b2758123 2840
fd76a142 2841 return self._parse_ism_formats_and_subtitles(ism_doc, urlh.geturl(), ism_id)
b2758123 2842
fd76a142 2843 def _parse_ism_formats_and_subtitles(self, ism_doc, ism_url, ism_id=None):
76d5a363
S
2844 """
2845 Parse formats from ISM manifest.
2846 References:
2847 1. [MS-SSTR]: Smooth Streaming Protocol,
2848 https://msdn.microsoft.com/en-us/library/ff469518.aspx
2849 """
06869367 2850 if ism_doc.get('IsLive') == 'TRUE':
fd76a142 2851 return [], {}
b2758123 2852
b2758123
RA
2853 duration = int(ism_doc.attrib['Duration'])
2854 timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
2855
2856 formats = []
fd76a142 2857 subtitles = {}
b2758123
RA
2858 for stream in ism_doc.findall('StreamIndex'):
2859 stream_type = stream.get('Type')
fd76a142 2860 if stream_type not in ('video', 'audio', 'text'):
b2758123
RA
2861 continue
2862 url_pattern = stream.attrib['Url']
2863 stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
2864 stream_name = stream.get('Name')
fd76a142 2865 stream_language = stream.get('Language', 'und')
b2758123 2866 for track in stream.findall('QualityLevel'):
81b6102d 2867 KNOWN_TAGS = {'255': 'AACL', '65534': 'EC-3'}
2868 fourcc = track.get('FourCC') or KNOWN_TAGS.get(track.get('AudioTag'))
b2758123 2869 # TODO: add support for WVC1 and WMAP
81b6102d 2870 if fourcc not in ('H264', 'AVC1', 'AACL', 'TTML', 'EC-3'):
b2758123
RA
2871 self.report_warning('%s is not a supported codec' % fourcc)
2872 continue
2873 tbr = int(track.attrib['Bitrate']) // 1000
76d5a363
S
2874 # [1] does not mention Width and Height attributes. However,
2875 # they're often present while MaxWidth and MaxHeight are
2876 # missing, so should be used as fallbacks
2877 width = int_or_none(track.get('MaxWidth') or track.get('Width'))
2878 height = int_or_none(track.get('MaxHeight') or track.get('Height'))
b2758123
RA
2879 sampling_rate = int_or_none(track.get('SamplingRate'))
2880
2881 track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
14f25df2 2882 track_url_pattern = urllib.parse.urljoin(ism_url, track_url_pattern)
b2758123
RA
2883
2884 fragments = []
2885 fragment_ctx = {
2886 'time': 0,
2887 }
2888 stream_fragments = stream.findall('c')
2889 for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
2890 fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
2891 fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
2892 fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
2893 if not fragment_ctx['duration']:
2894 try:
2895 next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
2896 except IndexError:
2897 next_fragment_time = duration
1616f9b4 2898 fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
b2758123
RA
2899 for _ in range(fragment_repeat):
2900 fragments.append({
14f25df2 2901 'url': re.sub(r'{start[ _]time}', str(fragment_ctx['time']), track_url_pattern),
b2758123
RA
2902 'duration': fragment_ctx['duration'] / stream_timescale,
2903 })
2904 fragment_ctx['time'] += fragment_ctx['duration']
2905
fd76a142
F
2906 if stream_type == 'text':
2907 subtitles.setdefault(stream_language, []).append({
2908 'ext': 'ismt',
2909 'protocol': 'ism',
2910 'url': ism_url,
2911 'manifest_url': ism_url,
2912 'fragments': fragments,
2913 '_download_params': {
2914 'stream_type': stream_type,
2915 'duration': duration,
2916 'timescale': stream_timescale,
2917 'fourcc': fourcc,
2918 'language': stream_language,
2919 'codec_private_data': track.get('CodecPrivateData'),
2920 }
2921 })
2922 elif stream_type in ('video', 'audio'):
2923 formats.append({
34921b43 2924 'format_id': join_nonempty(ism_id, stream_name, tbr),
fd76a142
F
2925 'url': ism_url,
2926 'manifest_url': ism_url,
2927 'ext': 'ismv' if stream_type == 'video' else 'isma',
2928 'width': width,
2929 'height': height,
2930 'tbr': tbr,
2931 'asr': sampling_rate,
2932 'vcodec': 'none' if stream_type == 'audio' else fourcc,
2933 'acodec': 'none' if stream_type == 'video' else fourcc,
2934 'protocol': 'ism',
2935 'fragments': fragments,
88acdbc2 2936 'has_drm': ism_doc.find('Protection') is not None,
fd76a142
F
2937 '_download_params': {
2938 'stream_type': stream_type,
2939 'duration': duration,
2940 'timescale': stream_timescale,
2941 'width': width or 0,
2942 'height': height or 0,
2943 'fourcc': fourcc,
2944 'language': stream_language,
2945 'codec_private_data': track.get('CodecPrivateData'),
2946 'sampling_rate': sampling_rate,
2947 'channels': int_or_none(track.get('Channels', 2)),
2948 'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
2949 'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
2950 },
2951 })
2952 return formats, subtitles
b2758123 2953
079a7cfc 2954 def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8_native', mpd_id=None, preference=None, quality=None):
6780154e
S
2955 def absolute_url(item_url):
2956 return urljoin(base_url, item_url)
59bbe491 2957
2958 def parse_content_type(content_type):
2959 if not content_type:
2960 return {}
2961 ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
2962 if ctr:
2963 mimetype, codecs = ctr.groups()
2964 f = parse_codecs(codecs)
2965 f['ext'] = mimetype2ext(mimetype)
2966 return f
2967 return {}
2968
222a2308
L
2969 def _media_formats(src, cur_media_type, type_info=None):
2970 type_info = type_info or {}
520251c0 2971 full_url = absolute_url(src)
82889d4a 2972 ext = type_info.get('ext') or determine_ext(full_url)
87a449c1 2973 if ext == 'm3u8':
520251c0
YCH
2974 is_plain_url = False
2975 formats = self._extract_m3u8_formats(
ad120ae1 2976 full_url, video_id, ext='mp4',
eeb0a956 2977 entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
f983b875 2978 preference=preference, quality=quality, fatal=False)
87a449c1
S
2979 elif ext == 'mpd':
2980 is_plain_url = False
2981 formats = self._extract_mpd_formats(
b359e977 2982 full_url, video_id, mpd_id=mpd_id, fatal=False)
520251c0
YCH
2983 else:
2984 is_plain_url = True
2985 formats = [{
2986 'url': full_url,
2987 'vcodec': 'none' if cur_media_type == 'audio' else None,
222a2308 2988 'ext': ext,
520251c0
YCH
2989 }]
2990 return is_plain_url, formats
2991
59bbe491 2992 entries = []
4328ddf8 2993 # amp-video and amp-audio are very similar to their HTML5 counterparts
962ffcf8 2994 # so we will include them right here (see
4328ddf8 2995 # https://www.ampproject.org/docs/reference/components/amp-video)
29f7c58a 2996 # For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
2997 _MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)'
2998 media_tags = [(media_tag, media_tag_name, media_type, '')
2999 for media_tag, media_tag_name, media_type
3000 in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
2aec7256
S
3001 media_tags.extend(re.findall(
3002 # We only allow video|audio followed by a whitespace or '>'.
3003 # Allowing more characters may end up in significant slow down (see
62b58c09
L
3004 # https://github.com/ytdl-org/youtube-dl/issues/11979,
3005 # e.g. http://www.porntrex.com/maps/videositemap.xml).
29f7c58a 3006 r'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE, webpage))
3007 for media_tag, _, media_type, media_content in media_tags:
59bbe491 3008 media_info = {
3009 'formats': [],
3010 'subtitles': {},
3011 }
3012 media_attributes = extract_attributes(media_tag)
bfbecd11 3013 src = strip_or_none(dict_get(media_attributes, ('src', 'data-video-src', 'data-src', 'data-source')))
59bbe491 3014 if src:
222a2308
L
3015 f = parse_content_type(media_attributes.get('type'))
3016 _, formats = _media_formats(src, media_type, f)
520251c0 3017 media_info['formats'].extend(formats)
6780154e 3018 media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
59bbe491 3019 if media_content:
3020 for source_tag in re.findall(r'<source[^>]+>', media_content):
d493f15c
S
3021 s_attr = extract_attributes(source_tag)
3022 # data-video-src and data-src are non standard but seen
3023 # several times in the wild
bfbecd11 3024 src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src', 'data-source')))
59bbe491 3025 if not src:
3026 continue
d493f15c 3027 f = parse_content_type(s_attr.get('type'))
868f79db 3028 is_plain_url, formats = _media_formats(src, media_type, f)
520251c0 3029 if is_plain_url:
d493f15c
S
3030 # width, height, res, label and title attributes are
3031 # all not standard but seen several times in the wild
3032 labels = [
3033 s_attr.get(lbl)
3034 for lbl in ('label', 'title')
3035 if str_or_none(s_attr.get(lbl))
3036 ]
3037 width = int_or_none(s_attr.get('width'))
3089bc74
S
3038 height = (int_or_none(s_attr.get('height'))
3039 or int_or_none(s_attr.get('res')))
d493f15c
S
3040 if not width or not height:
3041 for lbl in labels:
3042 resolution = parse_resolution(lbl)
3043 if not resolution:
3044 continue
3045 width = width or resolution.get('width')
3046 height = height or resolution.get('height')
3047 for lbl in labels:
3048 tbr = parse_bitrate(lbl)
3049 if tbr:
3050 break
3051 else:
3052 tbr = None
1ed45499 3053 f.update({
d493f15c
S
3054 'width': width,
3055 'height': height,
3056 'tbr': tbr,
3057 'format_id': s_attr.get('label') or s_attr.get('title'),
1ed45499 3058 })
520251c0
YCH
3059 f.update(formats[0])
3060 media_info['formats'].append(f)
3061 else:
3062 media_info['formats'].extend(formats)
59bbe491 3063 for track_tag in re.findall(r'<track[^>]+>', media_content):
3064 track_attributes = extract_attributes(track_tag)
3065 kind = track_attributes.get('kind')
5968d7d2 3066 if not kind or kind in ('subtitles', 'captions'):
f856816b 3067 src = strip_or_none(track_attributes.get('src'))
59bbe491 3068 if not src:
3069 continue
3070 lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
3071 media_info['subtitles'].setdefault(lang, []).append({
3072 'url': absolute_url(src),
3073 })
5e8e2fa5
S
3074 for f in media_info['formats']:
3075 f.setdefault('http_headers', {})['Referer'] = base_url
5968d7d2 3076 if media_info['formats'] or media_info['subtitles']:
59bbe491 3077 entries.append(media_info)
3078 return entries
3079
f6a1d69a
F
3080 def _extract_akamai_formats(self, *args, **kwargs):
3081 fmts, subs = self._extract_akamai_formats_and_subtitles(*args, **kwargs)
3082 if subs:
b5ae35ee 3083 self._report_ignoring_subs('akamai')
f6a1d69a
F
3084 return fmts
3085
3086 def _extract_akamai_formats_and_subtitles(self, manifest_url, video_id, hosts={}):
29f7c58a 3087 signed = 'hdnea=' in manifest_url
3088 if not signed:
3089 # https://learn.akamai.com/en-us/webhelp/media-services-on-demand/stream-packaging-user-guide/GUID-BE6C0F73-1E06-483B-B0EA-57984B91B7F9.html
3090 manifest_url = re.sub(
3091 r'(?:b=[\d,-]+|(?:__a__|attributes)=off|__b__=\d+)&?',
3092 '', manifest_url).strip('?')
3093
c7c43a93 3094 formats = []
f6a1d69a 3095 subtitles = {}
70c5802b 3096
e71a4509 3097 hdcore_sign = 'hdcore=3.7.0'
ff6f9a67 3098 f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
c4251b9a
RA
3099 hds_host = hosts.get('hds')
3100 if hds_host:
3101 f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
e71a4509
RA
3102 if 'hdcore=' not in f4m_url:
3103 f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
3104 f4m_formats = self._extract_f4m_formats(
3105 f4m_url, video_id, f4m_id='hds', fatal=False)
3106 for entry in f4m_formats:
3107 entry.update({'extra_param_to_segment_url': hdcore_sign})
3108 formats.extend(f4m_formats)
70c5802b 3109
c4251b9a
RA
3110 m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
3111 hls_host = hosts.get('hls')
3112 if hls_host:
3113 m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
f6a1d69a 3114 m3u8_formats, m3u8_subtitles = self._extract_m3u8_formats_and_subtitles(
c7c43a93 3115 m3u8_url, video_id, 'mp4', 'm3u8_native',
29f7c58a 3116 m3u8_id='hls', fatal=False)
3117 formats.extend(m3u8_formats)
f6a1d69a 3118 subtitles = self._merge_subtitles(subtitles, m3u8_subtitles)
70c5802b 3119
3120 http_host = hosts.get('http')
29f7c58a 3121 if http_host and m3u8_formats and not signed:
3122 REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
70c5802b 3123 qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
3124 qualities_length = len(qualities)
29f7c58a 3125 if len(m3u8_formats) in (qualities_length, qualities_length + 1):
70c5802b 3126 i = 0
29f7c58a 3127 for f in m3u8_formats:
3128 if f['vcodec'] != 'none':
70c5802b 3129 for protocol in ('http', 'https'):
3130 http_f = f.copy()
3131 del http_f['manifest_url']
3132 http_url = re.sub(
86e5f3ed 3133 REPL_REGEX, protocol + fr'://{http_host}/\g<1>{qualities[i]}\3', f['url'])
70c5802b 3134 http_f.update({
3135 'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
3136 'url': http_url,
3137 'protocol': protocol,
3138 })
29f7c58a 3139 formats.append(http_f)
70c5802b 3140 i += 1
70c5802b 3141
f6a1d69a 3142 return formats, subtitles
c7c43a93 3143
6ad02195 3144 def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
14f25df2 3145 query = urllib.parse.urlparse(url).query
6ad02195 3146 url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
240f2622
S
3147 mobj = re.search(
3148 r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
3149 url_base = mobj.group('url')
3150 http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
6ad02195 3151 formats = []
044eeb14
S
3152
3153 def manifest_url(manifest):
86e5f3ed 3154 m_url = f'{http_base_url}/{manifest}'
044eeb14
S
3155 if query:
3156 m_url += '?%s' % query
3157 return m_url
3158
6ad02195
RA
3159 if 'm3u8' not in skip_protocols:
3160 formats.extend(self._extract_m3u8_formats(
044eeb14 3161 manifest_url('playlist.m3u8'), video_id, 'mp4',
6ad02195
RA
3162 m3u8_entry_protocol, m3u8_id='hls', fatal=False))
3163 if 'f4m' not in skip_protocols:
3164 formats.extend(self._extract_f4m_formats(
044eeb14 3165 manifest_url('manifest.f4m'),
6ad02195 3166 video_id, f4m_id='hds', fatal=False))
0384932e
RA
3167 if 'dash' not in skip_protocols:
3168 formats.extend(self._extract_mpd_formats(
044eeb14 3169 manifest_url('manifest.mpd'),
0384932e 3170 video_id, mpd_id='dash', fatal=False))
6ad02195 3171 if re.search(r'(?:/smil:|\.smil)', url_base):
6ad02195
RA
3172 if 'smil' not in skip_protocols:
3173 rtmp_formats = self._extract_smil_formats(
044eeb14 3174 manifest_url('jwplayer.smil'),
6ad02195
RA
3175 video_id, fatal=False)
3176 for rtmp_format in rtmp_formats:
3177 rtsp_format = rtmp_format.copy()
3178 rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
3179 del rtsp_format['play_path']
3180 del rtsp_format['ext']
3181 rtsp_format.update({
3182 'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
3183 'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
3184 'protocol': 'rtsp',
3185 })
3186 formats.extend([rtmp_format, rtsp_format])
3187 else:
3188 for protocol in ('rtmp', 'rtsp'):
3189 if protocol not in skip_protocols:
3190 formats.append({
86e5f3ed 3191 'url': f'{protocol}:{url_base}',
6ad02195
RA
3192 'format_id': protocol,
3193 'protocol': protocol,
3194 })
3195 return formats
3196
c73e330e 3197 def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
a4a554a7 3198 mobj = re.search(
ac9c69ac 3199 r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
a4a554a7
YCH
3200 webpage)
3201 if mobj:
c73e330e
RU
3202 try:
3203 jwplayer_data = self._parse_json(mobj.group('options'),
3204 video_id=video_id,
3205 transform_source=transform_source)
3206 except ExtractorError:
3207 pass
3208 else:
3209 if isinstance(jwplayer_data, dict):
3210 return jwplayer_data
a4a554a7
YCH
3211
3212 def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
c73e330e
RU
3213 jwplayer_data = self._find_jwplayer_data(
3214 webpage, video_id, transform_source=js_to_json)
a4a554a7
YCH
3215 return self._parse_jwplayer_data(
3216 jwplayer_data, video_id, *args, **kwargs)
3217
3218 def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
3219 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
3220 # JWPlayer backward compatibility: flattened playlists
3221 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
3222 if 'playlist' not in jwplayer_data:
3223 jwplayer_data = {'playlist': [jwplayer_data]}
3224
3225 entries = []
3226
3227 # JWPlayer backward compatibility: single playlist item
3228 # https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
3229 if not isinstance(jwplayer_data['playlist'], list):
3230 jwplayer_data['playlist'] = [jwplayer_data['playlist']]
3231
3232 for video_data in jwplayer_data['playlist']:
3233 # JWPlayer backward compatibility: flattened sources
3234 # https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
3235 if 'sources' not in video_data:
3236 video_data['sources'] = [video_data]
3237
3238 this_video_id = video_id or video_data['mediaid']
3239
1a2192cb
S
3240 formats = self._parse_jwplayer_formats(
3241 video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
3242 mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
a4a554a7
YCH
3243
3244 subtitles = {}
3245 tracks = video_data.get('tracks')
3246 if tracks and isinstance(tracks, list):
3247 for track in tracks:
96a2daa1
S
3248 if not isinstance(track, dict):
3249 continue
f4b74272 3250 track_kind = track.get('kind')
14f25df2 3251 if not track_kind or not isinstance(track_kind, str):
f4b74272
S
3252 continue
3253 if track_kind.lower() not in ('captions', 'subtitles'):
a4a554a7
YCH
3254 continue
3255 track_url = urljoin(base_url, track.get('file'))
3256 if not track_url:
3257 continue
3258 subtitles.setdefault(track.get('label') or 'en', []).append({
3259 'url': self._proto_relative_url(track_url)
3260 })
3261
50d808f5 3262 entry = {
a4a554a7 3263 'id': this_video_id,
50d808f5 3264 'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
f81dd65b 3265 'description': clean_html(video_data.get('description')),
6945b9e7 3266 'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
a4a554a7
YCH
3267 'timestamp': int_or_none(video_data.get('pubdate')),
3268 'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
3269 'subtitles': subtitles,
50d808f5
RA
3270 }
3271 # https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
3272 if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
3273 entry.update({
3274 '_type': 'url_transparent',
3275 'url': formats[0]['url'],
3276 })
3277 else:
50d808f5
RA
3278 entry['formats'] = formats
3279 entries.append(entry)
a4a554a7
YCH
3280 if len(entries) == 1:
3281 return entries[0]
3282 else:
3283 return self.playlist_result(entries)
3284
ed0cf9b3
S
3285 def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
3286 m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
bf1b87cd 3287 urls = []
ed0cf9b3 3288 formats = []
1a2192cb 3289 for source in jwplayer_sources_data:
0a268c6e
S
3290 if not isinstance(source, dict):
3291 continue
6945b9e7
RA
3292 source_url = urljoin(
3293 base_url, self._proto_relative_url(source.get('file')))
3294 if not source_url or source_url in urls:
bf1b87cd
RA
3295 continue
3296 urls.append(source_url)
ed0cf9b3
S
3297 source_type = source.get('type') or ''
3298 ext = mimetype2ext(source_type) or determine_ext(source_url)
3299 if source_type == 'hls' or ext == 'm3u8':
3300 formats.extend(self._extract_m3u8_formats(
0236cd0d
S
3301 source_url, video_id, 'mp4', entry_protocol='m3u8_native',
3302 m3u8_id=m3u8_id, fatal=False))
0d9c48de 3303 elif source_type == 'dash' or ext == 'mpd':
ed0cf9b3
S
3304 formats.extend(self._extract_mpd_formats(
3305 source_url, video_id, mpd_id=mpd_id, fatal=False))
b51dc9db
S
3306 elif ext == 'smil':
3307 formats.extend(self._extract_smil_formats(
3308 source_url, video_id, fatal=False))
ed0cf9b3 3309 # https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
0236cd0d
S
3310 elif source_type.startswith('audio') or ext in (
3311 'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
ed0cf9b3
S
3312 formats.append({
3313 'url': source_url,
3314 'vcodec': 'none',
3315 'ext': ext,
3316 })
3317 else:
3318 height = int_or_none(source.get('height'))
3319 if height is None:
3320 # Often no height is provided but there is a label in
0236cd0d 3321 # format like "1080p", "720p SD", or 1080.
ed0cf9b3 3322 height = int_or_none(self._search_regex(
14f25df2 3323 r'^(\d{3,4})[pP]?(?:\b|$)', str(source.get('label') or ''),
ed0cf9b3
S
3324 'height', default=None))
3325 a_format = {
3326 'url': source_url,
3327 'width': int_or_none(source.get('width')),
3328 'height': height,
d3a3d7f0 3329 'tbr': int_or_none(source.get('bitrate'), scale=1000),
3330 'filesize': int_or_none(source.get('filesize')),
ed0cf9b3
S
3331 'ext': ext,
3332 }
3333 if source_url.startswith('rtmp'):
3334 a_format['ext'] = 'flv'
ed0cf9b3
S
3335 # See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
3336 # of jwplayer.flash.swf
3337 rtmp_url_parts = re.split(
3338 r'((?:mp4|mp3|flv):)', source_url, 1)
3339 if len(rtmp_url_parts) == 3:
3340 rtmp_url, prefix, play_path = rtmp_url_parts
3341 a_format.update({
3342 'url': rtmp_url,
3343 'play_path': prefix + play_path,
3344 })
3345 if rtmp_params:
3346 a_format.update(rtmp_params)
3347 formats.append(a_format)
3348 return formats
3349
f4b1c7ad 3350 def _live_title(self, name):
39ca3b5c 3351 self._downloader.deprecation_warning('yt_dlp.InfoExtractor._live_title is deprecated and does not work as expected')
3352 return name
f4b1c7ad 3353
b14f3a4c
PH
3354 def _int(self, v, name, fatal=False, **kwargs):
3355 res = int_or_none(v, **kwargs)
b14f3a4c 3356 if res is None:
86e5f3ed 3357 msg = f'Failed to extract {name}: Could not parse value {v!r}'
b14f3a4c
PH
3358 if fatal:
3359 raise ExtractorError(msg)
3360 else:
6a39ee13 3361 self.report_warning(msg)
b14f3a4c
PH
3362 return res
3363
3364 def _float(self, v, name, fatal=False, **kwargs):
3365 res = float_or_none(v, **kwargs)
3366 if res is None:
86e5f3ed 3367 msg = f'Failed to extract {name}: Could not parse value {v!r}'
b14f3a4c
PH
3368 if fatal:
3369 raise ExtractorError(msg)
3370 else:
6a39ee13 3371 self.report_warning(msg)
b14f3a4c
PH
3372 return res
3373
40e41780
TF
3374 def _set_cookie(self, domain, name, value, expire_time=None, port=None,
3375 path='/', secure=False, discard=False, rest={}, **kwargs):
ac668111 3376 cookie = http.cookiejar.Cookie(
4ed2d7b7 3377 0, name, value, port, port is not None, domain, True,
40e41780
TF
3378 domain.startswith('.'), path, True, secure, expire_time,
3379 discard, None, None, rest)
9809740b 3380 self.cookiejar.set_cookie(cookie)
42939b61 3381
799207e8 3382 def _get_cookies(self, url):
ac668111 3383 """ Return a http.cookies.SimpleCookie with the cookies for the url """
8817a80d 3384 return LenientSimpleCookie(self._downloader._calc_cookies(url))
799207e8 3385
e3c1266f 3386 def _apply_first_set_cookie_header(self, url_handle, cookie):
ce2fe4c0
S
3387 """
3388 Apply first Set-Cookie header instead of the last. Experimental.
3389
3390 Some sites (e.g. [1-3]) may serve two cookies under the same name
3391 in Set-Cookie header and expect the first (old) one to be set rather
3392 than second (new). However, as of RFC6265 the newer one cookie
3393 should be set into cookie store what actually happens.
3394 We will workaround this issue by resetting the cookie to
3395 the first one manually.
3396 1. https://new.vk.com/
3397 2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
3398 3. https://learning.oreilly.com/
3399 """
e3c1266f
S
3400 for header, cookies in url_handle.headers.items():
3401 if header.lower() != 'set-cookie':
3402 continue
cfb0511d 3403 cookies = cookies.encode('iso-8859-1').decode('utf-8')
e3c1266f
S
3404 cookie_value = re.search(
3405 r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
3406 if cookie_value:
3407 value, domain = cookie_value.groups()
3408 self._set_cookie(domain, cookie, value)
3409 break
3410
82d02080 3411 @classmethod
3412 def get_testcases(cls, include_onlymatching=False):
6368e2e6 3413 # Do not look in super classes
3414 t = vars(cls).get('_TEST')
05900629 3415 if t:
82d02080 3416 assert not hasattr(cls, '_TESTS'), f'{cls.ie_key()}IE has _TEST and _TESTS'
05900629
PH
3417 tests = [t]
3418 else:
6368e2e6 3419 tests = vars(cls).get('_TESTS', [])
05900629
PH
3420 for t in tests:
3421 if not include_onlymatching and t.get('only_matching', False):
3422 continue
82d02080 3423 t['name'] = cls.ie_key()
05900629
PH
3424 yield t
3425
f2e8dbcc 3426 @classmethod
3427 def get_webpage_testcases(cls):
6368e2e6 3428 tests = vars(cls).get('_WEBPAGE_TESTS', [])
f2e8dbcc 3429 for t in tests:
3430 t['name'] = cls.ie_key()
3431 return tests
3432
6368e2e6 3433 @classproperty(cache=True)
24146491 3434 def age_limit(cls):
3435 """Get age limit from the testcases"""
3436 return max(traverse_obj(
f2e8dbcc 3437 (*cls.get_testcases(include_onlymatching=False), *cls.get_webpage_testcases()),
24146491 3438 (..., (('playlist', 0), None), 'info_dict', 'age_limit')) or [0])
3439
171a31db 3440 @classproperty(cache=True)
3441 def _RETURN_TYPE(cls):
3442 """What the extractor returns: "video", "playlist", "any", or None (Unknown)"""
3443 tests = tuple(cls.get_testcases(include_onlymatching=False))
3444 if not tests:
3445 return None
3446 elif not any(k.startswith('playlist') for test in tests for k in test):
3447 return 'video'
3448 elif all(any(k.startswith('playlist') for k in test) for test in tests):
3449 return 'playlist'
3450 return 'any'
3451
3452 @classmethod
3453 def is_single_video(cls, url):
3454 """Returns whether the URL is of a single video, None if unknown"""
3455 assert cls.suitable(url), 'The URL must be suitable for the extractor'
3456 return {'video': True, 'playlist': False}.get(cls._RETURN_TYPE)
3457
82d02080 3458 @classmethod
3459 def is_suitable(cls, age_limit):
24146491 3460 """Test whether the extractor is generally suitable for the given age limit"""
3461 return not age_restricted(cls.age_limit, age_limit)
05900629 3462
82d02080 3463 @classmethod
3464 def description(cls, *, markdown=True, search_examples=None):
8dcce6a8 3465 """Description of the extractor"""
3466 desc = ''
82d02080 3467 if cls._NETRC_MACHINE:
8dcce6a8 3468 if markdown:
82d02080 3469 desc += f' [<abbr title="netrc machine"><em>{cls._NETRC_MACHINE}</em></abbr>]'
8dcce6a8 3470 else:
82d02080 3471 desc += f' [{cls._NETRC_MACHINE}]'
3472 if cls.IE_DESC is False:
8dcce6a8 3473 desc += ' [HIDDEN]'
82d02080 3474 elif cls.IE_DESC:
3475 desc += f' {cls.IE_DESC}'
3476 if cls.SEARCH_KEY:
3477 desc += f'; "{cls.SEARCH_KEY}:" prefix'
8dcce6a8 3478 if search_examples:
3479 _COUNTS = ('', '5', '10', 'all')
62b58c09 3480 desc += f' (e.g. "{cls.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(search_examples)}")'
82d02080 3481 if not cls.working():
8dcce6a8 3482 desc += ' (**Currently broken**)' if markdown else ' (Currently broken)'
3483
46d09f87 3484 # Escape emojis. Ref: https://github.com/github/markup/issues/1153
3485 name = (' - **%s**' % re.sub(r':(\w+:)', ':\u200B\\g<1>', cls.IE_NAME)) if markdown else cls.IE_NAME
8dcce6a8 3486 return f'{name}:{desc}' if desc else name
3487
a504ced0 3488 def extract_subtitles(self, *args, **kwargs):
a06916d9 3489 if (self.get_param('writesubtitles', False)
3490 or self.get_param('listsubtitles')):
9868ea49
JMF
3491 return self._get_subtitles(*args, **kwargs)
3492 return {}
a504ced0
JMF
3493
3494 def _get_subtitles(self, *args, **kwargs):
611c1dd9 3495 raise NotImplementedError('This method must be implemented by subclasses')
a504ced0 3496
0cf643b2
M
3497 class CommentsDisabled(Exception):
3498 """Raise in _get_comments if comments are disabled for the video"""
3499
a2160aa4 3500 def extract_comments(self, *args, **kwargs):
3501 if not self.get_param('getcomments'):
3502 return None
3503 generator = self._get_comments(*args, **kwargs)
3504
3505 def extractor():
3506 comments = []
d2b2fca5 3507 interrupted = True
a2160aa4 3508 try:
3509 while True:
3510 comments.append(next(generator))
a2160aa4 3511 except StopIteration:
3512 interrupted = False
d2b2fca5 3513 except KeyboardInterrupt:
3514 self.to_screen('Interrupted by user')
0cf643b2
M
3515 except self.CommentsDisabled:
3516 return {'comments': None, 'comment_count': None}
d2b2fca5 3517 except Exception as e:
3518 if self.get_param('ignoreerrors') is not True:
3519 raise
3520 self._downloader.report_error(e)
a2160aa4 3521 comment_count = len(comments)
3522 self.to_screen(f'Extracted {comment_count} comments')
3523 return {
3524 'comments': comments,
3525 'comment_count': None if interrupted else comment_count
3526 }
3527 return extractor
3528
3529 def _get_comments(self, *args, **kwargs):
3530 raise NotImplementedError('This method must be implemented by subclasses')
3531
912e0b7e
YCH
3532 @staticmethod
3533 def _merge_subtitle_items(subtitle_list1, subtitle_list2):
a825ffbf 3534 """ Merge subtitle items for one language. Items with duplicated URLs/data
912e0b7e 3535 will be dropped. """
86e5f3ed 3536 list1_data = {(item.get('url'), item.get('data')) for item in subtitle_list1}
912e0b7e 3537 ret = list(subtitle_list1)
a44ca5a4 3538 ret.extend(item for item in subtitle_list2 if (item.get('url'), item.get('data')) not in list1_data)
912e0b7e
YCH
3539 return ret
3540
3541 @classmethod
46890374 3542 def _merge_subtitles(cls, *dicts, target=None):
19bb3920 3543 """ Merge subtitle dictionaries, language by language. """
19bb3920
F
3544 if target is None:
3545 target = {}
3546 for d in dicts:
3547 for lang, subs in d.items():
3548 target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs)
3549 return target
912e0b7e 3550
360e1ca5 3551 def extract_automatic_captions(self, *args, **kwargs):
a06916d9 3552 if (self.get_param('writeautomaticsub', False)
3553 or self.get_param('listsubtitles')):
9868ea49
JMF
3554 return self._get_automatic_captions(*args, **kwargs)
3555 return {}
360e1ca5
JMF
3556
3557 def _get_automatic_captions(self, *args, **kwargs):
611c1dd9 3558 raise NotImplementedError('This method must be implemented by subclasses')
360e1ca5 3559
2762dbb1 3560 @functools.cached_property
24146491 3561 def _cookies_passed(self):
3562 """Whether cookies have been passed to YoutubeDL"""
3563 return self.get_param('cookiefile') is not None or self.get_param('cookiesfrombrowser') is not None
3564
d77ab8e2 3565 def mark_watched(self, *args, **kwargs):
1813a6cc 3566 if not self.get_param('mark_watched', False):
3567 return
24146491 3568 if self.supports_login() and self._get_login_info()[0] is not None or self._cookies_passed:
d77ab8e2
S
3569 self._mark_watched(*args, **kwargs)
3570
3571 def _mark_watched(self, *args, **kwargs):
3572 raise NotImplementedError('This method must be implemented by subclasses')
3573
38cce791
YCH
3574 def geo_verification_headers(self):
3575 headers = {}
a06916d9 3576 geo_verification_proxy = self.get_param('geo_verification_proxy')
38cce791
YCH
3577 if geo_verification_proxy:
3578 headers['Ytdl-request-proxy'] = geo_verification_proxy
3579 return headers
3580
8f97a15d 3581 @staticmethod
3582 def _generic_id(url):
14f25df2 3583 return urllib.parse.unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
98763ee3 3584
62b8dac4 3585 def _generic_title(self, url='', webpage='', *, default=None):
3586 return (self._og_search_title(webpage, default=None)
3587 or self._html_extract_title(webpage, default=None)
3588 or urllib.parse.unquote(os.path.splitext(url_basename(url))[0])
3589 or default)
98763ee3 3590
c224251a 3591 @staticmethod
b0089e89 3592 def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None):
c224251a
M
3593 all_known = all(map(
3594 lambda x: x is not None,
3595 (is_private, needs_premium, needs_subscription, needs_auth, is_unlisted)))
3596 return (
3597 'private' if is_private
3598 else 'premium_only' if needs_premium
3599 else 'subscriber_only' if needs_subscription
3600 else 'needs_auth' if needs_auth
3601 else 'unlisted' if is_unlisted
3602 else 'public' if all_known
3603 else None)
3604
d43de682 3605 def _configuration_arg(self, key, default=NO_DEFAULT, *, ie_key=None, casesense=False):
4bb6b02f 3606 '''
3607 @returns A list of values for the extractor argument given by "key"
3608 or "default" if no such key is present
3609 @param default The default value to return when the key is not present (default: [])
3610 @param casesense When false, the values are converted to lower case
3611 '''
5225df50 3612 ie_key = ie_key if isinstance(ie_key, str) else (ie_key or self).ie_key()
3613 val = traverse_obj(self._downloader.params, ('extractor_args', ie_key.lower(), key))
4bb6b02f 3614 if val is None:
3615 return [] if default is NO_DEFAULT else default
3616 return list(val) if casesense else [x.lower() for x in val]
5d3a0e79 3617
f40ee5e9 3618 def _yes_playlist(self, playlist_id, video_id, smuggled_data=None, *, playlist_label='playlist', video_label='video'):
3619 if not playlist_id or not video_id:
3620 return not video_id
3621
3622 no_playlist = (smuggled_data or {}).get('force_noplaylist')
3623 if no_playlist is not None:
3624 return not no_playlist
3625
3626 video_id = '' if video_id is True else f' {video_id}'
3627 playlist_id = '' if playlist_id is True else f' {playlist_id}'
3628 if self.get_param('noplaylist'):
3629 self.to_screen(f'Downloading just the {video_label}{video_id} because of --no-playlist')
3630 return False
3631 self.to_screen(f'Downloading {playlist_label}{playlist_id} - add --no-playlist to download just the {video_label}{video_id}')
3632 return True
3633
be5c1ae8 3634 def _error_or_warning(self, err, _count=None, _retries=0, *, fatal=True):
8ca48a1a 3635 RetryManager.report_retry(
3636 err, _count or int(fatal), _retries,
3637 info=self.to_screen, warn=self.report_warning, error=None if fatal else self.report_warning,
3638 sleep_func=self.get_param('retry_sleep_functions', {}).get('extractor'))
be5c1ae8 3639
3640 def RetryManager(self, **kwargs):
3641 return RetryManager(self.get_param('extractor_retries', 3), self._error_or_warning, **kwargs)
3642
ade1fa70 3643 def _extract_generic_embeds(self, url, *args, info_dict={}, note='Extracting generic embeds', **kwargs):
3644 display_id = traverse_obj(info_dict, 'display_id', 'id')
3645 self.to_screen(f'{format_field(display_id, None, "%s: ")}{note}')
3646 return self._downloader.get_info_extractor('Generic')._extract_embeds(
3647 smuggle_url(url, {'block_ies': [self.ie_key()]}), *args, **kwargs)
3648
8f97a15d 3649 @classmethod
3650 def extract_from_webpage(cls, ydl, url, webpage):
3651 ie = (cls if isinstance(cls._extract_from_webpage, types.MethodType)
3652 else ydl.get_info_extractor(cls.ie_key()))
f2e8dbcc 3653 for info in ie._extract_from_webpage(url, webpage) or []:
3654 # url = None since we do not want to set (webpage/original)_url
3655 ydl.add_default_extra_info(info, ie, None)
3656 yield info
8f97a15d 3657
3658 @classmethod
3659 def _extract_from_webpage(cls, url, webpage):
3660 for embed_url in orderedSet(
3661 cls._extract_embed_urls(url, webpage) or [], lazy=True):
d2c8aadf 3662 yield cls.url_result(embed_url, None if cls._VALID_URL is False else cls)
8f97a15d 3663
3664 @classmethod
3665 def _extract_embed_urls(cls, url, webpage):
3666 """@returns all the embed urls on the webpage"""
3667 if '_EMBED_URL_RE' not in cls.__dict__:
3668 assert isinstance(cls._EMBED_REGEX, (list, tuple))
3669 for idx, regex in enumerate(cls._EMBED_REGEX):
3670 assert regex.count('(?P<url>') == 1, \
3671 f'{cls.__name__}._EMBED_REGEX[{idx}] must have exactly 1 url group\n\t{regex}'
3672 cls._EMBED_URL_RE = tuple(map(re.compile, cls._EMBED_REGEX))
3673
3674 for regex in cls._EMBED_URL_RE:
3675 for mobj in regex.finditer(webpage):
3676 embed_url = urllib.parse.urljoin(url, unescapeHTML(mobj.group('url')))
3677 if cls._VALID_URL is False or cls.suitable(embed_url):
3678 yield embed_url
3679
3680 class StopExtraction(Exception):
3681 pass
3682
bfd973ec 3683 @classmethod
3684 def _extract_url(cls, webpage): # TODO: Remove
3685 """Only for compatibility with some older extractors"""
3686 return next(iter(cls._extract_embed_urls(None, webpage) or []), None)
3687
2314b4d8 3688 @classmethod
3689 def __init_subclass__(cls, *, plugin_name=None, **kwargs):
3690 if plugin_name:
3691 mro = inspect.getmro(cls)
3692 super_class = cls.__wrapped__ = mro[mro.index(cls) + 1]
3693 cls.IE_NAME, cls.ie_key = f'{super_class.IE_NAME}+{plugin_name}', super_class.ie_key
3694 while getattr(super_class, '__wrapped__', None):
3695 super_class = super_class.__wrapped__
3696 setattr(sys.modules[super_class.__module__], super_class.__name__, cls)
3697
3698 return super().__init_subclass__(**kwargs)
3699
8dbe9899 3700
d6983cb4
PH
3701class SearchInfoExtractor(InfoExtractor):
3702 """
3703 Base class for paged search queries extractors.
10952eb2 3704 They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
96565c7e 3705 Instances should define _SEARCH_KEY and optionally _MAX_RESULTS
d6983cb4
PH
3706 """
3707
96565c7e 3708 _MAX_RESULTS = float('inf')
171a31db 3709 _RETURN_TYPE = 'playlist'
96565c7e 3710
8f97a15d 3711 @classproperty
3712 def _VALID_URL(cls):
d6983cb4
PH
3713 return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
3714
d6983cb4 3715 def _real_extract(self, query):
2c4aaadd 3716 prefix, query = self._match_valid_url(query).group('prefix', 'query')
d6983cb4
PH
3717 if prefix == '':
3718 return self._get_n_results(query, 1)
3719 elif prefix == 'all':
3720 return self._get_n_results(query, self._MAX_RESULTS)
3721 else:
3722 n = int(prefix)
3723 if n <= 0:
86e5f3ed 3724 raise ExtractorError(f'invalid download number {n} for query "{query}"')
d6983cb4 3725 elif n > self._MAX_RESULTS:
6a39ee13 3726 self.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
d6983cb4
PH
3727 n = self._MAX_RESULTS
3728 return self._get_n_results(query, n)
3729
3730 def _get_n_results(self, query, n):
cc16383f 3731 """Get a specified number of results for a query.
3732 Either this function or _search_results must be overridden by subclasses """
3733 return self.playlist_result(
3734 itertools.islice(self._search_results(query), 0, None if n == float('inf') else n),
3735 query, query)
3736
3737 def _search_results(self, query):
3738 """Returns an iterator of search results"""
611c1dd9 3739 raise NotImplementedError('This method must be implemented by subclasses')
0f818663 3740
82d02080 3741 @classproperty
3742 def SEARCH_KEY(cls):
3743 return cls._SEARCH_KEY
fe7866d0 3744
3745
3746class UnsupportedURLIE(InfoExtractor):
3747 _VALID_URL = '.*'
3748 _ENABLED = False
3749 IE_DESC = False
3750
3751 def _real_extract(self, url):
3752 raise UnsupportedError(url)